input stringlengths 2.65k 237k | output stringclasses 1 value |
|---|---|
family_directory
assert family_directory("some_directory/Foo.ttf") == "some_directory"
assert family_directory("some_directory/subdir/Foo.ttf") == "some_directory/subdir"
assert family_directory("Foo.ttf") == "." # This is meant to ensure license files
# are correctly detected on the current
# working directory.
def test_check_family_has_license():
""" Check font project has a license. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/family/has_license")
from fontbakery.profiles.googlefonts import licenses
# The lines maked with 'hack' below are meant to
# not let fontbakery's own license to mess up
# this code test.
detected_licenses = licenses(portable_path("data/test/028/multiple"))
detected_licenses.pop(-1) # hack
assert_results_contain(check([], {"licenses": detected_licenses}),
FAIL, 'multiple',
'with multiple licenses...')
detected_licenses = licenses(portable_path("data/test/028/none"))
detected_licenses.pop(-1) # hack
assert_results_contain(check([], {"licenses": detected_licenses}),
FAIL, 'no-license',
'with no license...')
detected_licenses = licenses(portable_path("data/test/028/pass_ofl"))
detected_licenses.pop(-1) # hack
assert_PASS(check([], {"licenses": detected_licenses}),
'with a single OFL license...')
detected_licenses = licenses(portable_path("data/test/028/pass_apache"))
detected_licenses.pop(-1) # hack
assert_PASS(check([], {"licenses": detected_licenses}),
'with a single Apache license...')
def test_check_license_ofl_copyright():
"""Check license file has good copyright string."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/license/OFL_copyright")
# And Mada has a bad copyright string format:
font = TEST_FILE("mada/Mada-Regular.ttf")
ttFont = TTFont(font)
assert_results_contain(check(ttFont),
FAIL, "bad-format",
"with bad string formatting.")
# so we fix it:
SOME_GOOD_TEXT = "Copyright 2019 The Montserrat Project Authors (https://github.com/julietaula/montserrat)"
assert_PASS(check(ttFont, {"license_contents": SOME_GOOD_TEXT}),
'with good license contents.')
def test_check_license_ofl_body_text():
"""Check OFL.txt contains correct body text."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/license/OFL_body_text")
# Our reference Montserrat family is know to have
# a proper OFL.txt license file.
# NOTE: This is currently considered good
# even though it uses an "http://" URL
font = TEST_FILE("montserrat/Montserrat-Regular.ttf")
ttFont = TTFont(font)
assert_PASS(check(ttFont),
'with a good OFL.txt license with "http://" url.')
# using "https://" is also considered good:
good_license = check["license_contents"].replace("http://", "https://")
assert_PASS(check(ttFont, {'license_contents': good_license}),
'with a good OFL.txt license with "https://" url.')
# modify a tiny bit of the license text, to trigger the FAIL:
bad_license = check["license_contents"].replace("SIL OPEN FONT LICENSE Version 1.1",
"SOMETHING ELSE :-P Version Foo")
assert_results_contain(check(ttFont, {'license_contents': bad_license}),
FAIL, "incorrect-ofl-body-text",
"with incorrect ofl body text")
def test_check_name_license(mada_ttFonts):
""" Check copyright namerecords match license file. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/name/license")
# Our reference Mada family has its copyright name records properly set
# identifying it as being licensed under the Open Font License
for ttFont in mada_ttFonts:
assert_PASS(check(ttFont),
'with good fonts ...')
for ttFont in mada_ttFonts:
assert_results_contain(check(ttFont, {"license": "LICENSE.txt"}), # Apache
FAIL, 'wrong',
'with wrong entry values ...')
for ttFont in mada_ttFonts:
delete_name_table_id(ttFont, NameID.LICENSE_DESCRIPTION)
assert_results_contain(check(ttFont),
FAIL, 'missing',
'with missing copyright namerecords ...')
# TODO:
# WARN, "http" / "http-in-description"
def NOT_IMPLEMENTED_test_check_name_license_url():
""" License URL matches License text on name table? """
# check = CheckTester(googlefonts_profile,
# "com.google.fonts/check/name/license_url")
# TODO: Implement-me!
#
# code-paths:
# - FAIL, code="ufl"
# - FAIL, code="licensing-inconsistency"
# - FAIL, code="no-license-found"
# - FAIL, code="bad-entries"
# - WARN, code="http-in-description"
# - WARN, code="http"
# - PASS
def test_check_name_description_max_length():
""" Description strings in the name table
must not exceed 200 characters.
"""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/name/description_max_length")
# Our reference Mada Regular is know to be good here.
ttFont = TTFont(TEST_FILE("mada/Mada-Regular.ttf"))
assert_PASS(check(ttFont),
'with a good font...')
# Here we add strings to NameID.DESCRIPTION with exactly 100 chars,
# so it should still PASS:
for i, name in enumerate(ttFont['name'].names):
if name.nameID == NameID.DESCRIPTION:
ttFont['name'].names[i].string = ('a' * 200).encode(name.getEncoding())
assert_PASS(check(ttFont),
'with a 200 char string...')
# And here we make the strings longer than 200 chars
# in order to make the check emit a WARN:
for i, name in enumerate(ttFont['name'].names):
if name.nameID == NameID.DESCRIPTION:
ttFont['name'].names[i].string = ('a' * 201).encode(name.getEncoding())
assert_results_contain(check(ttFont),
WARN, 'too-long',
'with a too long description string...')
def test_check_hinting_impact():
""" Show hinting filesize impact. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/hinting_impact")
font = TEST_FILE("mada/Mada-Regular.ttf")
assert_results_contain(check(font),
INFO, 'size-impact',
'this check always emits an INFO result...')
# TODO: test the CFF code-path
def test_check_file_size():
"""Ensure files are not too large."""
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/file_size")
assert_PASS(check(TEST_FILE("mada/Mada-Regular.ttf")))
assert_results_contain(check(TEST_FILE("varfont/inter/Inter[slnt,wght].ttf")),
WARN, 'large-font',
'with quite a big font...')
assert_results_contain(check(TEST_FILE("cjk/SourceHanSans-Regular.otf")),
FAIL, 'massive-font',
'with a very big font...')
def test_check_name_version_format():
""" Version format is correct in 'name' table ? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/name/version_format")
# Our reference Mada Regular font is good here:
ttFont = TTFont(TEST_FILE("mada/Mada-Regular.ttf"))
# So it must PASS the check:
assert_PASS(check(ttFont),
'with a good font...')
# then we introduce bad strings in all version-string entries:
for i, name in enumerate(ttFont["name"].names):
if name.nameID == NameID.VERSION_STRING:
invalid = "invalid-version-string".encode(name.getEncoding())
ttFont["name"].names[i].string = invalid
assert_results_contain(check(ttFont),
FAIL, 'bad-version-strings',
'with bad version format in name table...')
# and finally we remove all version-string entries:
for i, name in enumerate(ttFont["name"].names):
if name.nameID == NameID.VERSION_STRING:
del ttFont["name"].names[i]
assert_results_contain(check(ttFont),
FAIL, 'no-version-string',
'with font lacking version string entries in name table...')
def NOT_IMPLEMENTED_test_check_old_ttfautohint():
""" Font has old ttfautohint applied? """
# check = CheckTester(googlefonts_profile,
# "com.google.fonts/check/old_ttfautohint")
# TODO: Implement-me!
#
# code-paths:
# - FAIL, code="lacks-version-strings"
# - INFO, code="version-not-detected" "Could not detect which version of ttfautohint
# was used in this font."
# - WARN, code="old-ttfa" "detected an old ttfa version"
# - PASS
# - FAIL, code="parse-error"
@pytest.mark.parametrize("expected_status,expected_keyword,reason,font",[
(FAIL, "lacks-ttfa-params",
'with a font lacking ttfautohint params on its version strings on the name table.',
TEST_FILE("coveredbyyourgrace/CoveredByYourGrace.ttf")),
(SKIP, "not-hinted",
'with a font which appears to our heuristic as not hinted using ttfautohint.',
TEST_FILE("mada/Mada-Regular.ttf")),
(PASS, "ok",
'with a font that has ttfautohint params'
' (-l 6 -r 36 -G 0 -x 10 -H 350 -D latn -f cyrl -w "" -X "")',
TEST_FILE("merriweather/Merriweather-Regular.ttf"))
])
def test_check_has_ttfautohint_params(expected_status, expected_keyword, reason, font):
""" Font has ttfautohint params? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/has_ttfautohint_params")
assert_results_contain(check(font),
expected_status, expected_keyword,
reason)
def test_check_epar():
""" EPAR table present in font? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/epar")
# Our reference Mada Regular lacks an EPAR table:
ttFont = TTFont(TEST_FILE("mada/Mada-Regular.ttf"))
# So it must emit an INFO message inviting the designers
# to learn more about it:
assert_results_contain(check(ttFont),
INFO, 'lacks-EPAR',
'with a font lacking an EPAR table...')
# add a fake EPAR table to validate the PASS code-path:
ttFont["EPAR"] = "foo"
assert_PASS(check(ttFont),
'with a good font...')
def NOT_IMPLEMENTED_test_check_gasp():
""" Is GASP table correctly set? """
# check = CheckTester(googlefonts_profile,
# "com.google.fonts/check/gasp")
# TODO: Implement-me!
#
# code-paths:
# - FAIL, "lacks-gasp" "Font is missing the gasp table."
# - FAIL, "empty" "The gasp table has no values."
# - FAIL, "lacks-ffff-range" "The gasp table does not have a 0xFFFF gasp range."
# - INFO, "ranges" "These are the ppm ranges declared on the gasp table:"
# - WARN, "non-ffff-range" "The gasp table has a range that may be unneccessary."
# - WARN, "unset-flags" "All flags in gasp range 0xFFFF (i.e. all font sizes) must be set to 1"
# - PASS "The gasp table is correctly set."
def test_check_name_familyname_first_char():
""" Make sure family name does not begin with a digit. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/name/familyname_first_char")
# Our reference Mada Regular is known to be good
ttFont = TTFont(TEST_FILE("mada/Mada-Regular.ttf"))
# So it must PASS the check:
assert_PASS(check(ttFont),
'with a good font...')
# alter the family-name prepending a digit:
for i, name in enumerate(ttFont["name"].names):
if name.nameID == NameID.FONT_FAMILY_NAME:
ttFont["name"].names[i].string = "1badname".encode(name.getEncoding())
# and make sure the check FAILs:
assert_results_contain(check(ttFont),
FAIL, 'begins-with-digit',
'with a font in which the family name begins with a digit...')
def test_check_name_ascii_only_entries():
""" Are there non-ASCII characters in ASCII-only NAME table entries? """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/name/ascii_only_entries")
# Our reference Merriweather Regular is known to be good
ttFont = TTFont(TEST_FILE("merriweather/Merriweather-Regular.ttf"))
# So it must PASS the check:
assert_PASS(check(ttFont),
'with a good font...')
# The OpenType spec requires ASCII for the POSTSCRIPT_NAME (nameID 6).
# For COPYRIGHT_NOTICE (nameID 0) ASCII is required because that
# string should be the same in CFF fonts which also have this
# requirement in the OpenType spec.
# Let's check detection of both. First nameId 6:
for i, name in enumerate(ttFont['name'].names):
if name.nameID == NameID.POSTSCRIPT_NAME:
ttFont['name'].names[i].string = "Infração".encode(encoding="utf-8")
assert_results_contain(check(ttFont),
FAIL, 'bad-string',
'with non-ascii on nameID 6 entry (Postscript name)...')
assert_results_contain(check(ttFont),
FAIL, 'non-ascii-strings',
'with non-ascii on nameID 6 entry (Postscript name)...')
# Then reload the good font
ttFont = TTFont(TEST_FILE("merriweather/Merriweather-Regular.ttf"))
# And check detection of a problem on nameId 0:
for i, name in enumerate(ttFont['name'].names):
if name.nameID == NameID.COPYRIGHT_NOTICE:
ttFont['name'].names[i].string = "Infração".encode(encoding="utf-8")
assert_results_contain(check(ttFont),
FAIL, 'bad-string',
'with non-ascii on nameID 0 entry (Copyright notice)...')
assert_results_contain(check(ttFont),
FAIL, 'non-ascii-strings',
'with non-ascii | |
<gh_stars>1-10
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD
from collections import Sequence
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from .utils import check_arrays, array2d, atleast2d_or_csr, safe_asarray
from .utils import warn_if_not_float
from .utils.fixes import unique
from .base import BaseEstimator, TransformerMixin
from .utils.sparsefuncs import inplace_csr_row_normalize_l1
from .utils.sparsefuncs import inplace_csr_row_normalize_l2
from .utils.sparsefuncs import inplace_csr_column_scale
from .utils.sparsefuncs import mean_variance_axis0
__all__ = ['Binarizer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'Normalizer',
'StandardScaler',
'binarize',
'normalize',
'scale']
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
if isinstance(std_, np.ndarray):
std_[std_ == 0.0] = 1.0
elif std_ == 0.:
std_ = 1.
else:
std_ = None
return mean_, std_
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if sp.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
warn_if_not_float(X, estimator='The scale function')
if not sp.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis0(X)
var[var == 0.0] = 1.0
inplace_csr_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
warn_if_not_float(X, estimator='The scale function')
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
if with_std:
Xr /= std_
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Standardizes features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The standardization is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std / (max - min) + min
where min, max = feature_range.
This standardization is often used as an alternative to zero mean,
unit variance scaling.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default is True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_arrays(X, sparse_format="dense", copy=self.copy)[0]
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
min_ = np.min(X, axis=0)
scale_ = np.max(X, axis=0) - min_
self.scale_ = (feature_range[1] - feature_range[0]) / scale_
self.min_ = feature_range[0] - min_ / scale_
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
X = check_arrays(X, sparse_format="dense", copy=self.copy)[0]
X *= self.scale_
X += self.min_
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen indepently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Attributes
----------
`mean_` : array of floats with shape [n_features]
The mean value for each feature in the training set.
`std_` : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_arrays(X, copy=self.copy, sparse_format="csr")[0]
if sp.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
warn_if_not_float(X, estimator=self)
self.mean_ = None
var = mean_variance_axis0(X)[1]
self.std_ = np.sqrt(var)
self.std_[var == 0.0] = 1.0
return self
else:
warn_if_not_float(X, estimator=self)
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
copy = copy if copy is not None else self.copy
X = check_arrays(X, copy=copy, sparse_format="csr")[0]
if sp.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
warn_if_not_float(X, estimator=self)
inplace_csr_column_scale(X, 1 / self.std_)
else:
warn_if_not_float(X, estimator=self)
if | |
<reponame>skywolf829/MRSR<gh_stars>0
from __future__ import absolute_import, division, print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
import numpy as np
import time
import math
import random
import datetime
import os
from utility_functions import *
from options import *
import matplotlib.pyplot as plt
from torch.optim.optimizer import Optimizer, required
from torch.autograd import Variable
from torch import Tensor
from torch.nn import Parameter
from matplotlib.pyplot import cm
from math import pi
from skimage.transform.pyramids import pyramid_reduce
from torch.utils.tensorboard import SummaryWriter
import copy
from pytorch_memlab import LineProfiler, MemReporter, profile, profile_every
import h5py
from datasets import NetworkDataset, LocalDataset
FlowSTSR_folder_path = os.path.dirname(os.path.abspath(__file__))
input_folder = os.path.join(FlowSTSR_folder_path, "InputData")
output_folder = os.path.join(FlowSTSR_folder_path, "Output")
save_folder = os.path.join(FlowSTSR_folder_path, "SavedModels")
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('Conv3d') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('Norm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def reset_grads(model,require_grad):
for p in model.parameters():
p.requires_grad_(require_grad)
return model
def TAD(field, device):
if(field.shape[1] == 2):
tx = spatial_derivative2D(field[:,0:1,:,:], 1, device)
ty = spatial_derivative2D(field[:,1:2,:,:], 0, device)
g = torch.abs(tx + ty)
elif(field.shape[1] == 3):
tx = spatial_derivative2D(field[:,0:1,:,:], 1, device)
ty = spatial_derivative2D(field[:,1:2,:,:], 0, device)
g = torch.abs(tx + ty)
return g
def TAD3D(field, device):
tx = spatial_derivative3D(field[:,0:1,:,:,:], 2, device)
ty = spatial_derivative3D(field[:,1:2,:,:,:], 1, device)
tz = spatial_derivative3D(field[:,2:3,:,:,:], 0, device)
g = torch.abs(tx + ty + tz)
return g
def curl2D(field, device):
dydx = spatial_derivative2D(field[:,1:2], 0, device)
dxdy = spatial_derivative2D(field[:,0:1], 1, device)
output = dydx-dxdy
return output
def curl3D(field, device):
dzdy = spatial_derivative3D_CD(field[:,2:3], 1, device)
dydz = spatial_derivative3D_CD(field[:,1:2], 2, device)
dxdz = spatial_derivative3D_CD(field[:,0:1], 2, device)
dzdx = spatial_derivative3D_CD(field[:,2:3], 0, device)
dydx = spatial_derivative3D_CD(field[:,1:2], 0, device)
dxdy = spatial_derivative3D_CD(field[:,0:1], 1, device)
output = torch.cat((dzdy-dydz,dxdz-dzdx,dydx-dxdy), 1)
return output
def curl3D8(field, device):
dzdy = spatial_derivative3D_CD8(field[:,2:3], 1, device)
dydz = spatial_derivative3D_CD8(field[:,1:2], 2, device)
dxdz = spatial_derivative3D_CD8(field[:,0:1], 2, device)
dzdx = spatial_derivative3D_CD8(field[:,2:3], 0, device)
dydx = spatial_derivative3D_CD8(field[:,1:2], 0, device)
dxdy = spatial_derivative3D_CD8(field[:,0:1], 1, device)
output = torch.cat((dzdy-dydz,dxdz-dzdx,dydx-dxdy), 1)
return output
def TAD3D_CD(field, device):
tx = spatial_derivative3D_CD(field[:,0:1,:,:,:], 0, device)
ty = spatial_derivative3D_CD(field[:,1:2,:,:,:], 1, device)
tz = spatial_derivative3D_CD(field[:,2:3,:,:,:], 2, device)
g = torch.abs(tx + ty + tz)
return g
def TAD3D_CD8(field, device):
tx = spatial_derivative3D_CD8(field[:,0:1,:,:,:], 0, device)
ty = spatial_derivative3D_CD8(field[:,1:2,:,:,:], 1, device)
tz = spatial_derivative3D_CD8(field[:,2:3,:,:,:], 2, device)
g = torch.abs(tx + ty + tz)
return g
def spatial_derivative2D_sobel(field, axis, device):
m = nn.ReplicationPad2d(1)
if(axis == 0):
weights = torch.tensor(
np.array([
[-1/8, 0, 1/8],
[-1/4, 0, 1/4],
[-1/8, 0, 1/8]
]
).astype(np.float32)).to(device)
weights = weights.view(1, 1, 3, 3)
field = m(field)
output = F.conv2d(field, weights)
elif(axis == 1):
weights = torch.tensor(
np.array([
[-1/8, -1/4, -1/8],
[ 0, 0, 0],
[ 1/8, 1/4, 1/8]
]
).astype(np.float32)).to(device)
weights = weights.view(1, 1, 3, 3)
field = m(field)
output = F.conv2d(field, weights)
return output
def spatial_derivative2D(field, axis, device):
m = nn.ReplicationPad2d(1)
if(axis == 0):
weights = torch.tensor(
np.array([
[0, 0, 0],
[-0.5, 0, 0.5],
[0, 0, 0]
]
).astype(np.float32)).to(device)
weights = weights.view(1, 1, 3, 3)
field = m(field)
output = F.conv2d(field, weights)
elif(axis == 1):
weights = torch.tensor(
np.array([
[0, -0.5, 0],
[0, 0, 0],
[0, 0.5, 0]
]
).astype(np.float32)).to(device)
weights = weights.view(1, 1, 3, 3)
field = m(field)
output = F.conv2d(field, weights)
return output
def spatial_derivative3D_CD(field, axis, device):
m = nn.ReplicationPad3d(1)
# the first (a) axis in [a, b, c]
if(axis == 0):
weights = torch.tensor(np.array(
[[[0, 0, 0],
[0, -0.5, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0.5, 0],
[0, 0, 0]]])
.astype(np.float32)).to(device)
elif(axis == 1):
# the second (b) axis in [a, b, c]
weights = torch.tensor(np.array([
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, -0.5, 0],
[0, 0, 0],
[0, 0.5, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]])
.astype(np.float32)).to(device)
elif(axis == 2):
# the third (c) axis in [a, b, c]
weights = torch.tensor(np.array([
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[-0.5, 0, 0.5],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[ 0, 0, 0]]])
.astype(np.float32)).to(device)
weights = weights.view(1, 1, 3, 3, 3)
field = m(field)
output = F.conv3d(field, weights)
return output
def spatial_derivative3D_CD8(field, axis, device):
m = nn.ReplicationPad3d(4)
# the first (a) axis in [a, b, c]
if(axis == 0):
weights = torch.zeros([9, 9, 9], dtype=torch.float32).to(device)
weights[0, 4, 4] = 1/280
weights[1, 4, 4] = -4/105
weights[2, 4, 4] = 1/5
weights[3, 4, 4] = -4/5
weights[4, 4, 4] = 0
weights[5, 4, 4] = 4/5
weights[6, 4, 4] = -1/5
weights[7, 4, 4] = 4/105
weights[8, 4, 4] = -1/280
elif(axis == 1):
# the second (b) axis in [a, b, c]
weights = torch.zeros([9, 9, 9], dtype=torch.float32).to(device)
weights[4, 0, 4] = 1/280
weights[4, 1, 4] = -4/105
weights[4, 2, 4] = 1/5
weights[4, 3, 4] = -4/5
weights[4, 4, 4] = 0
weights[4, 5, 4] = 4/5
weights[4, 6, 4] = -1/5
weights[4, 7, 4] = 4/105
weights[4, 8, 4] = -1/280
elif(axis == 2):
# the third (c) axis in [a, b, c]
weights = torch.zeros([9, 9, 9], dtype=torch.float32).to(device)
weights[4, 4, 1] = 1/280
weights[4, 4, 1] = -4/105
weights[4, 4, 2] = 1/5
weights[4, 4, 3] = -4/5
weights[4, 4, 4] = 0
weights[4, 4, 5] = 4/5
weights[4, 4, 6] = -1/5
weights[4, 4, 7] = 4/105
weights[4, 4, 8] = -1/280
weights = weights.view(1, 1, 9, 9, 9)
field = m(field)
output = F.conv3d(field, weights)
return output
def calc_gradient_penalty(discrim, real_data, fake_data, LAMBDA, device):
#print real_data.size()
alpha = torch.rand(1, 1, device=device)
alpha = alpha.expand(real_data.size())
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
#interpolates = interpolates.to(device)
interpolates = torch.autograd.Variable(interpolates, requires_grad=True)
disc_interpolates = discrim(interpolates)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
#LAMBDA = 1
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
return gradient_penalty
def mag_difference(t1, t2):
mag_1 = torch.zeros(t1.shape).to(t1.device)
mag_2 = torch.zeros(t1.shape).to(t1.device)
for i in range(t1.shape[1]):
mag_1[0, 0] += t1[0, i]**2
mag_2[0, 0] += t2[0, i]**2
mag_1 = torch.sqrt(mag_1[0:, 0:1])
mag_2 = torch.sqrt(mag_2[0:, 0:1])
mag_diff = torch.abs(mag_2-mag_1)
'''
t_1 = t1*(1/torch.norm(t1, dim=1).view(1, 1, t1.shape[2], t1.shape[3]).repeat(1, t1.shape[1], 1, 1))
t_2 = t2*(1/torch.norm(t2, dim=1).view(1, 1, t1.shape[2], t1.shape[3]).repeat(1, t1.shape[1], 1, 1))
c = (t_1* t_2).sum(dim=1)
angle_diff = torch.acos(c)
angle_diff[angle_diff != angle_diff] = 0
angle_diff = angle_diff.unsqueeze(0)
'''
return mag_diff
def reflection_pad2D(frame, padding, device):
frame = F.pad(frame,
[padding, padding, padding, padding])
indices_to_fix = []
for i in range(0, padding):
indices_to_fix.append(i)
for i in range(frame.shape[2] - padding, frame.shape[2]):
indices_to_fix.append(i)
for x in indices_to_fix:
if(x < padding):
correct_x = frame.shape[2] - 2*padding - x
else:
correct_x = x - frame.shape[2] + 2*padding
for y in indices_to_fix:
if(y < padding):
correct_y = frame.shape[3] - 2*padding - y
else:
correct_y = y - frame.shape[3] + 2*padding
frame[:, :, x, y] = frame[:, :, correct_x, correct_y]
return frame
def reflection_pad3D(frame, padding, device):
frame = F.pad(frame,
[padding, padding, padding, padding, padding, padding])
indices_to_fix = []
for i in range(0, padding):
indices_to_fix.append(i)
for i in range(frame.shape[2] - padding, frame.shape[2]):
indices_to_fix.append(i)
for x in indices_to_fix:
if(x < padding):
correct_x = frame.shape[2] - 2*padding - x
else:
correct_x = x - frame.shape[2] + 2*padding
for y in indices_to_fix:
if(y < padding):
correct_y = frame.shape[3] - 2*padding - y
else:
correct_y = y - frame.shape[3] + 2*padding
for z in indices_to_fix:
if(z < padding):
correct_z = frame.shape[4] - 2*padding - z
else:
correct_z = z - frame.shape[4] + 2*padding
frame[:, :, x, y, z] = frame[:, :, correct_x, correct_y, correct_z]
return frame
def laplace_pyramid_downscale2D(frame, level, downscale_per_level, device, periodic=False):
kernel_size = 5
sigma = 2 * (1 / downscale_per_level) / 6
xy_grid = torch.zeros([kernel_size, kernel_size, 2])
for i in range(kernel_size):
for j in range(kernel_size):
xy_grid[i, j, 0] = i
xy_grid[i, j, 1] = j
mean = (kernel_size - 1)/2.
variance = sigma**2.
gaussian_kernel = (1./(2.*math.pi*variance)) *\
torch.exp(
-torch.sum((xy_grid - mean)**2., dim=-1) /\
(2*variance)
)
# Make sure sum of values in gaussian kernel equals 1.
gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel)
# Reshape to 2d depthwise convolutional weight
gaussian_kernel = gaussian_kernel.view(1, 1, kernel_size, kernel_size).to(device)
gaussian_kernel = gaussian_kernel.repeat(frame.shape[1], 1, | |
import numpy as np
import sys
import monai
import ponai
# sys.path.append('/nfs/home/pedro/portio')
from torchvision import datasets, transforms, models
from torch.utils.data import DataLoader, Dataset
from PIL import Image
import pandas as pd
import os
import argparse
import torchvision
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from model.model import nnUNet
import random
from model.metric import DiceLoss
import glob
import time
import nibabel as nib
import re
import monai.visualize.img2tensorboard as img2tensorboard
sys.path.append('/nfs/home/pedro/RangerLARS/over9000')
# from over9000 import RangerLars
os.chdir('/nfs/home/pedro/PhysicsPyTorch')
import porchio
from early_stopping import pytorchtools
import runai.hpo
strategy = runai.hpo.Strategy.GridSearch
runai.hpo.init('/nfs/home/pedro/', 'stratification')
# import yaml
# print(f'The pyyaml version is {yaml.__version__}')
class PairwiseMeasures(object):
def __init__(self, seg_img, ref_img,
measures=None, num_neighbors=8, pixdim=(1, 1, 1),
empty=False, list_labels=None):
self.m_dict = {
'ref volume': (self.n_pos_ref, 'Volume (Ref)'),
'seg volume': (self.n_pos_seg, 'Volume (Seg)'),
'ref bg volume': (self.n_neg_ref, 'Volume (Ref bg)'),
'seg bg volume': (self.n_neg_seg, 'Volume (Seg bg)'),
'list_labels': (self.list_labels, 'List Labels Seg'),
'fp': (self.fp, 'FP'),
'fn': (self.fn, 'FN'),
'tp': (self.tp, 'TP'),
'tn': (self.tn, 'TN'),
'n_intersection': (self.n_intersection, 'Intersection'),
'n_union': (self.n_union, 'Union'),
'sensitivity': (self.sensitivity, 'Sens'),
'specificity': (self.specificity, 'Spec'),
'accuracy': (self.accuracy, 'Acc'),
'fpr': (self.false_positive_rate, 'FPR'),
'ppv': (self.positive_predictive_values, 'PPV'),
'npv': (self.negative_predictive_values, 'NPV'),
'dice': (self.dice_score, 'Dice'),
'IoU': (self.intersection_over_union, 'IoU'),
'jaccard': (self.jaccard, 'Jaccard'),
'informedness': (self.informedness, 'Informedness'),
'markedness': (self.markedness, 'Markedness'),
'vol_diff': (self.vol_diff, 'VolDiff'),
'ave_dist': (self.measured_average_distance, 'AveDist'),
'haus_dist': (self.measured_hausdorff_distance, 'HausDist'),
'connected_elements': (self.connected_elements, 'TPc,FPc,FNc'),
'outline_error': (self.outline_error, 'OER,OEFP,OEFN'),
'detection_error': (self.detection_error, 'DE,DEFP,DEFN')
}
self.seg = seg_img
self.ref = ref_img
self.list_labels = list_labels
self.flag_empty = empty
self.measures = measures if measures is not None else self.m_dict
self.neigh = num_neighbors
self.pixdim = pixdim
def check_binary(self):
"""
Checks whether self.seg and self.ref are binary. This is to enable
measurements such as 'false positives', which only have meaning in
the binary case (what is positive/negative for multiple class?)
"""
is_seg_binary, is_ref_binary = [((x > 0.5) == x).all()
for x in [self.seg, self.ref]]
# if (not is_ref_binary) or (not is_seg_binary):
# raise ValueError("The input segmentation/reference images"
# " must be binary for this function.")
def __FPmap(self):
"""
This function calculates the false positive map from binary
segmentation and reference maps
:return: FP map
"""
self.check_binary()
return np.asarray((self.seg - self.ref) > 0.0, dtype=np.float32)
def __FNmap(self):
"""
This function calculates the false negative map
:return: FN map
"""
self.check_binary()
return np.asarray((self.ref - self.seg) > 0.0, dtype=np.float32)
def __TPmap(self):
"""
This function calculates the true positive map (i.e. how many
reference voxels are positive)
:return: TP map
"""
self.check_binary()
return np.logical_and(self.ref > 0.5, self.seg > 0.5).astype(float)
def __TNmap(self):
"""
This function calculates the true negative map
:return: TN map
"""
self.check_binary()
return np.logical_and(self.ref < 0.5, self.seg < 0.5).astype(float)
def __union_map(self):
"""
This function calculates the union map between segmentation and
reference image
:return: union map
"""
self.check_binary()
return np.logical_or(self.ref, self.seg).astype(float)
def __intersection_map(self):
"""
This function calculates the intersection between segmentation and
reference image
:return: intersection map
"""
self.check_binary()
return np.multiply(self.ref, self.seg)
def n_pos_ref(self):
return np.sum(self.ref)
def n_neg_ref(self):
self.check_binary()
return np.sum(self.ref == 0)
def n_pos_seg(self):
return np.sum(self.seg)
def n_neg_seg(self):
return np.sum(1 - self.seg)
def fp(self):
return np.sum(self.__FPmap())
def fn(self):
return np.sum(self.__FNmap())
def tp(self):
return np.sum(self.__TPmap())
def tn(self):
return np.sum(self.__TNmap())
def n_intersection(self):
return np.sum(self.__intersection_map())
def n_union(self):
return np.sum(self.__union_map())
def sensitivity(self):
return self.tp() / self.n_pos_ref()
def specificity(self):
return self.tn() / self.n_neg_ref()
def accuracy(self):
return (self.tn() + self.tp()) / \
(self.tn() + self.tp() + self.fn() + self.fp())
def false_positive_rate(self):
return self.fp() / self.n_neg_ref()
def positive_predictive_values(self):
if self.flag_empty:
return -1
return self.tp() / (self.tp() + self.fp())
def negative_predictive_values(self):
"""
This function calculates the negative predictive value ratio between
the number of true negatives and the total number of negative elements
:return:
"""
return self.tn() / (self.fn() + self.tn())
def dice_score(self):
"""
This function returns the dice score coefficient between a reference
and segmentation images
:return: dice score
"""
return 2 * self.tp() / np.sum(self.ref + self.seg)
def intersection_over_union(self):
"""
This function the intersection over union ratio - Definition of
jaccard coefficient
:return:
"""
return self.n_intersection() / self.n_union()
def jaccard(self):
"""
This function returns the jaccard coefficient (defined as
intersection over union)
:return: jaccard coefficient
"""
return self.intersection_over_union()
def informedness(self):
"""
This function calculates the informedness between the segmentation
and the reference
:return: informedness
"""
return self.sensitivity() + self.specificity() - 1
def markedness(self):
"""
This functions calculates the markedness
:return:
"""
return self.positive_predictive_values() + \
self.negative_predictive_values() - 1
def list_labels(self):
if self.list_labels is None:
return ()
return tuple(np.unique(self.list_labels))
def vol_diff(self):
"""
This function calculates the ratio of difference in volume between
the reference and segmentation images.
:return: vol_diff
"""
return np.abs(self.n_pos_ref() - self.n_pos_seg()) / self.n_pos_ref()
# @CacheFunctionOutput
# def _boundaries_dist_mat(self):
# dist = DistanceMetric.get_metric('euclidean')
# border_ref = MorphologyOps(self.ref, self.neigh).border_map()
# border_seg = MorphologyOps(self.seg, self.neigh).border_map()
# coord_ref = np.multiply(np.argwhere(border_ref > 0), self.pixdim)
# coord_seg = np.multiply(np.argwhere(border_seg > 0), self.pixdim)
# pairwise_dist = dist.pairwise(coord_ref, coord_seg)
# return pairwise_dist
def measured_distance(self):
"""
This functions calculates the average symmetric distance and the
hausdorff distance between a segmentation and a reference image
:return: hausdorff distance and average symmetric distance
"""
ref_border_dist, seg_border_dist, ref_border, \
seg_border = self.border_distance()
average_distance = (np.sum(ref_border_dist) + np.sum(
seg_border_dist)) / (np.sum(self.ref + self.seg))
hausdorff_distance = np.max(
[np.max(ref_border_dist), np.max(seg_border_dist)])
return hausdorff_distance, average_distance
def measured_average_distance(self):
"""
This function returns only the average distance when calculating the
distances between segmentation and reference
:return:
"""
return self.measured_distance()[1]
def measured_hausdorff_distance(self):
"""
This function returns only the hausdorff distance when calculated the
distances between segmentation and reference
:return:
"""
return self.measured_distance()[0]
# def average_distance(self):
# pairwise_dist = self._boundaries_dist_mat()
# return (np.sum(np.min(pairwise_dist, 0)) + \
# np.sum(np.min(pairwise_dist, 1))) / \
# (np.sum(self.ref + self.seg))
#
# def hausdorff_distance(self):
# pairwise_dist = self._boundaries_dist_mat()
# return np.max((np.max(np.min(pairwise_dist, 0)),
# np.max(np.min(pairwise_dist, 1))))
def connected_elements(self):
"""
This function returns the number of FP FN and TP in terms of
connected components.
:return: Number of true positive connected components, Number of
false positives connected components, Number of false negatives
connected components
"""
blobs_ref, blobs_seg, init = self._connected_components()
list_blobs_ref = range(1, blobs_ref[1])
list_blobs_seg = range(1, blobs_seg[1])
mul_blobs_ref = np.multiply(blobs_ref[0], init)
mul_blobs_seg = np.multiply(blobs_seg[0], init)
list_TP_ref = np.unique(mul_blobs_ref[mul_blobs_ref > 0])
list_TP_seg = np.unique(mul_blobs_seg[mul_blobs_seg > 0])
list_FN = [x for x in list_blobs_ref if x not in list_TP_ref]
list_FP = [x for x in list_blobs_seg if x not in list_TP_seg]
return len(list_TP_ref), len(list_FP), len(list_FN)
def connected_errormaps(self):
"""
This functions calculates the error maps from the connected components
:return:
"""
blobs_ref, blobs_seg, init = self._connected_components()
list_blobs_ref = range(1, blobs_ref[1])
list_blobs_seg = range(1, blobs_seg[1])
mul_blobs_ref = np.multiply(blobs_ref[0], init)
mul_blobs_seg = np.multiply(blobs_seg[0], init)
list_TP_ref = np.unique(mul_blobs_ref[mul_blobs_ref > 0])
list_TP_seg = np.unique(mul_blobs_seg[mul_blobs_seg > 0])
list_FN = [x for x in list_blobs_ref if x not in list_TP_ref]
list_FP = [x for x in list_blobs_seg if x not in list_TP_seg]
# print(np.max(blobs_ref),np.max(blobs_seg))
tpc_map = np.zeros_like(blobs_ref[0])
fpc_map = np.zeros_like(blobs_ref[0])
fnc_map = np.zeros_like(blobs_ref[0])
for i in list_TP_ref:
tpc_map[blobs_ref[0] == i] = 1
for i in list_TP_seg:
tpc_map[blobs_seg[0] == i] = 1
for i in list_FN:
fnc_map[blobs_ref[0] == i] = 1
for i in list_FP:
fpc_map[blobs_seg[0] == i] = 1
return tpc_map, fnc_map, fpc_map
def outline_error(self):
"""
This function calculates the outline error as defined in Wack et al.
:return: OER: Outline error ratio, OEFP: number of false positive
outlier error voxels, OEFN: number of false negative outline error
elements
"""
TPcMap, _, _ = self.connected_errormaps()
OEFMap = self.ref - np.multiply(TPcMap, self.seg)
unique, counts = np.unique(OEFMap, return_counts=True)
# print(counts)
OEFN = counts[unique == 1]
OEFP = counts[unique == -1]
OEFN = 0 if len(OEFN) == 0 else OEFN[0]
OEFP = 0 if len(OEFP) == 0 else OEFP[0]
OER = 2 * (OEFN + OEFP) / (self.n_pos_seg() + self.n_pos_ref())
return OER, OEFP, OEFN
def detection_error(self):
"""
This function calculates the volume of detection error as defined in
Wack et al.
:return: DE: Total volume of detection error, DEFP: Detection error
false positives, DEFN: Detection error false negatives
"""
TPcMap, FNcMap, FPcMap = self.connected_errormaps()
DEFN = np.sum(FNcMap)
DEFP = np.sum(FPcMap)
return DEFN + DEFP, DEFP, DEFN
def header_str(self):
result_str = [self.m_dict[key][1] for key in self.measures]
result_str = ',' + ','.join(result_str)
return result_str
def to_string(self, fmt='{:.4f}'):
result_str = ""
list_space = ['com_ref', 'com_seg', 'list_labels']
for key in self.measures:
result = self.m_dict[key][0]()
if key in list_space:
result_str += ' '.join(fmt.format(x) for | |
# coding: utf-8
"""
zweig
~~~~~
:copyright: 2014 by <NAME>
:license: BSD, see LICENSE.rst for details
"""
from __future__ import unicode_literals
import os
import sys
import ast
from io import StringIO
from contextlib import contextmanager
from itertools import chain
from functools import reduce
__version__ = '0.1.0'
__version_info__ = (0, 1, 0)
PY2 = sys.version_info[0] == 2
def walk_preorder(tree):
"""
Yields the nodes in the `tree` in preorder.
"""
yield tree
for child in ast.iter_child_nodes(tree):
for descendent in walk_preorder(child):
yield descendent
def to_source(tree):
"""
Returns the Python source code representation of the `tree`.
"""
writer = _SourceWriter()
writer.visit(tree)
return writer.output.getvalue()
class _SourceWriter(ast.NodeVisitor):
def __init__(self):
self.output = StringIO()
self.indentation_level = 0
self.newline = True
@contextmanager
def indented(self):
self.indentation_level += 1
try:
yield
finally:
self.indentation_level -= 1
def write(self, source):
if self.newline:
self.newline = False
self.write_indentation()
self.output.write(source)
def write_indentation(self):
self.write(' ' * self.indentation_level)
def write_newline(self):
if self.newline:
self.newline = False
self.write('\n')
self.newline = True
def write_line(self, source):
self.write(source)
self.write_newline()
def write_identifier(self, identifier):
if PY2:
self.write(identifier.decode('ascii'))
else:
self.write(identifier)
def write_repr(self, obj):
if PY2:
self.write(repr(obj).decode('ascii'))
else:
self.write(repr(obj))
def writing_comma_separated(self, items):
if items:
for item in items[:-1]:
yield item
self.write(', ')
yield items[-1]
def write_comma_separated_nodes(self, nodes):
for node in self.writing_comma_separated(nodes):
self.visit(node)
@contextmanager
def writing_statement(self):
yield
self.write_newline()
def visit_statements(self, statements):
for statement in statements[:-1]:
self.visit(statement)
if isinstance(statement, (ast.FunctionDef, ast.ClassDef)):
self.write_newline()
self.visit(statements[-1])
def visit_Module(self, node):
self.visit_statements(node.body)
def visit_FunctionDef(self, node):
for decorator in node.decorator_list:
self.write('@')
self.visit(decorator)
self.write_newline()
self.write('def ')
self.write_identifier(node.name)
self.write('(')
self.visit(node.args)
self.write(')')
if not PY2 and node.returns is not None:
self.write(' -> ')
self.visit(node.returns)
self.write(':')
self.write_newline()
with self.indented():
self.visit_statements(node.body)
def visit_ClassDef(self, node):
for decorator in node.decorator_list:
self.write('@')
self.visit(decorator)
self.write_newline()
self.write('class ')
self.write_identifier(node.name)
if (
node.bases or
(not PY2 and (node.keywords or node.starargs or node.kwargs))
):
self.write('(')
self.write_comma_separated_nodes(node.bases)
if not PY2:
if node.keywords:
if node.bases:
self.write(', ')
self.write_comma_separated_nodes(node.keywords)
if node.starargs is not None:
if node.bases or node.keywords:
self.write(', ')
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
if node.bases or node.keywords or node.starargs:
self.write(', ')
self.write('**')
self.visit(node.kwargs)
self.write(')')
self.write(':')
self.write_newline()
with self.indented():
self.visit_statements(node.body)
def visit_Return(self, node):
with self.writing_statement():
self.write('return')
if node.value:
self.write(' ')
self.visit(node.value)
def visit_Delete(self, node):
with self.writing_statement():
self.write('del ')
self.write_comma_separated_nodes(node.targets)
def visit_Assign(self, node):
with self.writing_statement():
for target in node.targets:
self.visit(target)
self.write(' = ')
self.visit(node.value)
def visit_AugAssign(self, node):
with self.writing_statement():
self.visit(node.target)
self.write(' ')
self.visit(node.op)
self.write('= ')
self.visit(node.value)
if PY2:
def visit_Print(self, node):
with self.writing_statement():
self.write('print')
if node.values:
self.write(' ')
self.write_comma_separated_nodes(node.values)
if not node.nl:
self.write(',')
def visit_For(self, node):
self.write('for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
self.write(':')
self.write_newline()
with self.indented():
self.visit_statements(node.body)
if node.orelse:
self.write_line('else:')
with self.indented():
self.visit_statements(node.orelse)
def visit_While(self, node):
self.write('while ')
self.visit(node.test)
self.write(':')
self.write_newline()
with self.indented():
self.visit_statements(node.body)
if node.orelse:
self.write_line('else:')
with self.indented():
self.visit_statements(node.orelse)
def visit_If(self, node):
self.write('if ')
self.visit(node.test)
self.write(':')
self.write_newline()
with self.indented():
self.visit_statements(node.body)
if node.orelse:
self.write_line('else:')
with self.indented():
self.visit_statements(node.orelse)
def visit_With(self, node):
self.write('with ')
if PY2:
self.visit(node.context_expr)
if node.optional_vars:
self.write(' as ')
self.visit(node.optional_vars)
else:
self.write_comma_separated_nodes(node.items)
self.write(':')
self.write_newline()
with self.indented():
self.visit_statements(node.body)
def visit_Raise(self, node):
with self.writing_statement():
self.write('raise')
if PY2:
if node.type is not None:
self.write(' ')
self.visit(node.type)
if node.inst is not None:
self.write(', ')
self.visit(node.inst)
if node.tback is not None:
self.write(', ')
self.visit(node.tback)
else:
if node.exc is not None:
self.write(' ')
self.visit(node.exc)
if node.cause is not None:
self.write(' from ')
self.visit(node.cause)
def visit_Try(self, node):
self.write_line('try:')
with self.indented():
self.visit_statements(node.body)
for excepthandler in node.handlers:
self.visit(excepthandler)
if node.orelse:
self.write_line('else:')
with self.indented():
self.visit_statements(node.orelse)
if node.finalbody:
self.write_line('finally:')
with self.indented():
self.visit_statements(node.finalbody)
if PY2:
def visit_TryExcept(self, node):
self.write_line('try:')
with self.indented():
self.visit_statements(node.body)
for excepthandler in node.handlers:
self.visit(excepthandler)
if node.orelse:
self.write_line('else:')
with self.indented():
self.visit_statements(node.orelse)
def visit_TryFinally(self, node):
self.write_line('try:')
with self.indented():
self.visit_statements(node.body)
self.write_line('finally:')
with self.indented():
self.visit_statements(node.finalbody)
def visit_Assert(self, node):
with self.writing_statement():
self.write('assert ')
self.visit(node.test)
if node.msg is not None:
self.write(', ')
self.visit(node.msg)
def visit_Import(self, node):
with self.writing_statement():
self.write('import ')
self.write_comma_separated_nodes(node.names)
def visit_ImportFrom(self, node):
with self.writing_statement():
self.write('from ')
if node.module is None:
self.write('.')
else:
self.write_identifier(node.module)
self.write(' import ')
self.write_comma_separated_nodes(node.names)
def visit_Global(self, node):
with self.writing_statement():
self.write('global ')
for name in self.writing_comma_separated(node.names):
self.write_identifier(name)
def visit_Nonlocal(self, node):
with self.writing_statement():
self.write('nonlocal ')
for name in self.writing_comma_separated(node.names):
self.write_identifier(name)
def visit_Expr(self, node):
with self.writing_statement():
self.visit(node.value)
def visit_Pass(self, node):
self.write_line('pass')
def visit_Break(self, node):
self.write_line('break')
def visit_Continue(self, node):
self.write_line('continue')
def visit_BoolOp(self, node):
def write_value(value):
if _requires_parentheses(node, value):
self.write('(')
self.visit(value)
self.write(')')
else:
self.visit(value)
for value in node.values[:-1]:
write_value(value)
self.visit(node.op)
write_value(node.values[-1])
def visit_BinOp(self, node):
if (
_requires_parentheses(node, node.left) or
PY2 and isinstance(node.left, ast.Num) and node.left.n < 0
):
self.write('(')
self.visit(node.left)
self.write(')')
else:
self.visit(node.left)
self.write(u' ')
self.visit(node.op)
self.write(u' ')
if _requires_parentheses(
ast.Mult() if isinstance(node.op, ast.Pow) else node,
node.right
):
self.write('(')
self.visit(node.right)
self.write(')')
else:
self.visit(node.right)
def visit_UnaryOp(self, node):
self.visit(node.op)
if _requires_parentheses(node, node.operand):
self.write('(')
self.visit(node.operand)
self.write(')')
else:
self.visit(node.operand)
def visit_Lambda(self, node):
self.write('lambda ')
self.visit(node.args)
self.write(': ')
self.visit(node.body)
def visit_IfExp(self, node):
if _requires_parentheses(node, node.body):
self.write('(')
self.visit(node.body)
self.write(')')
else:
self.visit(node.body)
self.write(' if ')
if _requires_parentheses(node, node.test):
self.write('(')
self.visit(node.test)
self.write(')')
else:
self.visit(node.test)
self.write(' else ')
self.visit(node.orelse)
def visit_Dict(self, node):
self.write('{')
items = list(zip(node.keys, node.values))
for key, value in self.writing_comma_separated(items):
self.visit(key)
self.write(': ')
self.visit(value)
self.write('}')
def visit_Set(self, node):
self.write('{')
self.write_comma_separated_nodes(node.elts)
self.write('}')
def visit_ListComp(self, node):
self.write('[')
self.visit(node.elt)
for generator in node.generators:
self.visit(generator)
self.write(']')
def visit_SetComp(self, node):
self.write('{')
self.visit(node.elt)
for generator in node.generators:
self.visit(generator)
self.write('}')
def visit_DictComp(self, node):
self.write('{')
self.visit(node.key)
self.write(': ')
self.visit(node.value)
for generator in node.generators:
self.visit(generator)
self.write('}')
def visit_GeneratorExp(self, node):
self.write('(')
self.visit(node.elt)
for generator in node.generators:
self.visit(generator)
self.write(')')
def visit_Yield(self, node):
self.write('yield')
if node.value is not None:
self.write(' ')
self.visit(node.value)
def visit_YieldFrom(self, node):
self.write('yield from ')
self.visit(node.value)
def visit_Compare(self, node):
self.visit(node.left)
for op, comparator in zip(node.ops, node.comparators):
self.write(' ')
self.visit(op)
self.write(' ')
self.visit(comparator)
def visit_Call(self, node):
if _requires_parentheses(node, node.func):
self.write('(')
self.visit(node.func)
self.write(')')
else:
self.visit(node.func)
self.write('(')
self.write_comma_separated_nodes(node.args)
if node.keywords:
if node.args:
self.write(', ')
self.write_comma_separated_nodes(node.keywords)
if node.starargs is not None:
if node.args or node.keywords:
self.write(', ')
self.write('*')
self.visit(node.starargs)
if node.kwargs:
if node.args or node.keywords or node.starargs:
self.write(', ')
self.write('**')
self.visit(node.kwargs)
self.write(')')
if PY2:
def visit_Repr(self, node):
self.write('`')
self.visit(node.value)
self.write('`')
def visit_Num(self, node):
self.write_repr(node.n)
def visit_Str(self, node):
self.write_repr(node.s)
def visit_Bytes(self, node):
self.write_repr(node.s)
def visit_Ellipsis(self, node):
self.write('...')
def visit_Attribute(self, node):
if (
_requires_parentheses(node, node.value) and
not isinstance(node.value, ast.Attribute)
):
self.write('(')
self.visit(node.value)
self.write(')')
else:
self.visit(node.value)
self.write('.')
self.write_identifier(node.attr)
def visit_Subscript(self, node):
if (
_requires_parentheses(node, node.value) and
not isinstance(node.value, ast.Subscript)
):
self.write('(')
self.visit(node.value)
self.write(')')
else:
self.visit(node.value)
self.write('[')
self.visit(node.slice)
self.write(']')
def visit_Starred(self, node):
self.write('*')
self.visit(node.value)
def visit_Name(self, node):
self.write_identifier(node.id)
def visit_List(self, node):
self.write('[')
self.write_comma_separated_nodes(node.elts)
self.write(']')
def visit_Tuple(self, node):
self.write_comma_separated_nodes(node.elts)
def visit_Slice(self, node):
if node.lower is not None:
self.visit(node.lower)
self.write(':')
if node.upper is not None:
if node.lower is None:
self.write(':')
self.visit(node.upper)
if node.step is not None:
if node.lower is None and node.upper is None:
self.write('::')
if node.lower is not None or node.upper is not None:
self.write(':')
self.visit(node.step)
if node.lower is None and node.upper is None and node.step is None:
self.write(':')
def visit_And(self, node):
self.write(' and ')
def visit_Or(self, node):
self.write(' or ')
def visit_Add(self, node):
self.write('+')
def visit_Sub(self, node):
self.write('-')
def visit_Mult(self, node):
self.write('*')
def visit_Div(self, node):
self.write('/')
def visit_Mod(self, node):
self.write('%')
def visit_Pow(self, node):
self.write('**')
def visit_LShift(self, node):
self.write('<<')
def visit_RShift(self, node):
self.write('>>')
def visit_BitOr(self, node):
self.write('|')
def visit_BitXor(self, node):
self.write('^')
def visit_BitAnd(self, node):
self.write('&')
def visit_FloorDiv(self, node):
self.write('//')
def visit_Invert(self, node):
self.write('~')
def visit_Not(self, node):
self.write('not ')
def visit_UAdd(self, node):
self.write('+')
def visit_USub(self, node):
self.write('-')
def visit_Eq(self, node):
self.write('==')
def visit_NotEq(self, node):
self.write('!=')
def visit_Lt(self, node):
self.write('<')
def visit_LtE(self, node):
self.write('<=')
def visit_Gt(self, node):
self.write('>')
def visit_GtE(self, node):
self.write('>=')
def visit_Is(self, node):
self.write('is')
def visit_IsNot(self, node):
self.write('is not')
def visit_In(self, node):
self.write('in')
def visit_NotIn(self, node):
self.write('not in')
def visit_comprehension(self, node):
self.write(' for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
if node.ifs:
self.write(' if ')
for filter in node.ifs[:-1]:
self.visit(filter)
self.write(' if ')
self.visit(node.ifs[-1])
def visit_ExceptHandler(self, node):
self.write('except')
if node.type is not None:
self.write(' ')
self.visit(node.type)
if node.name is not None:
self.write(' as ')
if PY2:
self.visit(node.name)
else:
self.write(node.name)
self.write(':')
self.write_newline()
with self.indented():
self.visit_statements(node.body)
def visit_arguments(self, node):
if node.args:
if node.defaults:
non_defaults = node.args[:-len(node.defaults)]
defaults = node.args[-len(node.defaults):]
else:
non_defaults = node.args
defaults = []
if non_defaults:
self.write_comma_separated_nodes(non_defaults)
if defaults:
if non_defaults:
self.write(', ')
for argument, default in zip(defaults, node.defaults):
self.visit(argument)
self.write('=')
self.visit(default)
if node.vararg:
if node.args:
self.write(', ')
self.write('*')
self.write_identifier(node.vararg)
if not PY2 and node.kwonlyargs:
if not node.vararg:
self.write('*, ')
arguments = list(zip(node.kwonlyargs, node.kw_defaults))
if arguments:
for argument, default in self.writing_comma_separated(arguments):
self.visit(argument)
if | |
<reponame>centerorbit/cockroach<filename>scripts/release-notes.py
#! /usr/bin/env python3
#
# Show a compact release note summary of a range of Git commits.
#
# Example use: release-notes.py --help
#
# Note: the first commit in the range is excluded!
#
# Requires:
# - GitPython https://pypi.python.org/pypi/GitPython/
# - You need to configure your local repo to pull the PR refs from
# GitHub. To do this, add a line like:
# fetch = +refs/pull/*/head:refs/pull/origin/*
# to the GitHub remote section of .git/config.
#
# Disclaimer: this program is provided without warranties of any kind,
# including suitability for any purpose. The author(s) will not be
# responsible if this script eats your left sock.
#
# Known limitations:
#
# - if different people with the same name contribute, this script
# will be confused. (it will merge their work under one entry).
# - the list of aliases below must be manually modified when
# contributors change their git name and/or email address.
#
# Note: there are unit tests in the release-notes subdirectory!
import sys
import itertools
import re
import os
import datetime, time
import subprocess
from git import Repo
from optparse import OptionParser
from git.repo.fun import name_to_object
from git.util import Stats
### Global behavior constants ###
# minimum sha length to disambiguate
shamin = 9
# FIXME(knz): This probably needs to use the .mailmap.
author_aliases = {
'changangela': "<NAME>",
'dianasaur323': "<NAME>",
'kena': "Raphael 'kena' Poss",
'vivekmenezes': "<NAME>",
'RaduBerinde': "<NAME>",
'<NAME>': "<NAME>",
'marc': "<NAME>",
'Lauren': "<NAME>",
'lhirata' : "<NAME>",
'Emmanuel': "<NAME>",
'MBerhault': "<NAME>",
'Nate': "<NAME>",
'a6802739': "Song Hao",
'Abhemailk <EMAIL>': "<NAME>",
'rytaft': "<NAME>",
'songhao': "<NAME>",
'solongordon': "<NAME>",
'tim-o': "<NAME>",
'Amruta': "<NAME>",
'yuzefovich': "<NAME>",
'madhavsuresh': "<NAME>",
'<NAME>': "<NAME>",
}
# FIXME(knz): This too.
crdb_folk = set([
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
])
# Section titles for release notes.
relnotetitles = {
'cli change': "Command-line changes",
'sql change': "SQL language changes",
'admin ui change': "Admin UI changes",
'general change': "General changes",
'build change': "Build changes",
'enterprise change': "Enterprise edition changes",
'backward-incompatible change': "Backward-incompatible changes",
'performance improvement': "Performance improvements",
'bug fix': "Bug fixes",
}
# Order in which to show the sections.
relnote_sec_order = [
'backward-incompatible change',
'general change',
'enterprise change',
'sql change',
'cli change',
'admin ui change',
'bug fix',
'performance improvement',
'build change',
]
# Release note category common misspellings.
cat_misspells = {
'sql' : 'sql change',
'general': 'general change',
'core change': 'general change',
'bugfix': 'bug fix',
'performance change' : 'performance improvement',
'performance' : 'performance improvement',
'ui' : 'admin ui change',
'backwards-incompatible change': 'backward-incompatible change',
'enterprise': 'enterprise change'
}
## Release note format ##
# The following release note formats have been seen in the wild:
#
# Release note (xxx): yyy <- canonical
# Release Notes: None
# Release note (xxx): yyy
# Release note (xxx) : yyy
# Release note: (xxx): yyy
# Release note: xxx: yyy
# Release note: (xxx) yyy
# Release note: yyy (no category)
# Release note (xxx, zzz): yyy
norelnote = re.compile(r'^[rR]elease [nN]otes?: *[Nn]one', flags=re.M)
# Captures :? (xxx) ?: yyy
form1 = r':? *\((?P<cat1>[^)]*)\) *:?'
# Captures : xxx: yyy - this must be careful not to capture too much, we just accept one or two words
form2 = r': *(?P<cat2>[^ ]+(?: +[^ ]+)?) *:'
# Captures : yyy - no category
form3 = r':(?P<cat3>)'
relnote = re.compile(r'(?:^|[\n\r])[rR]elease [nN]otes? *(?:' + form1 + '|' + form2 + '|' + form3 + r') *(?P<note>.*)$', flags=re.S)
coauthor = re.compile(r'^Co-authored-by: (?P<name>[^<]*) <(?P<email>.*)>', flags=re.M)
fixannot = re.compile(r'^([fF]ix(es|ed)?|[cC]lose(d|s)?) #', flags=re.M)
## Merge commit format ##
# The following merge commits have been seen in the wild:
#
# Merge pull request #XXXXX from ... <- GitHub merges
# Merge #XXXXX #XXXXX #XXXXX <- Bors merges
merge_numbers = re.compile(r'^Merge( pull request)?(?P<numbers>( #[0-9]+)+)')
### Initialization / option parsing ###
parser = OptionParser()
parser.add_option("-k", "--sort-key", dest="sort_key", default="title",
help="sort by KEY (pr, title, insertions, deletions, files, sha, date; default: title)", metavar="KEY")
parser.add_option("-r", "--reverse", action="store_true", dest="reverse_sort", default=False,
help="reverse sort")
parser.add_option("-f", "--from", dest="from_commit",
help="list history from COMMIT. Note: the first commit is excluded.", metavar="COMMIT")
parser.add_option("-t", "--until", dest="until_commit", default="HEAD",
help="list history up and until COMMIT (default: HEAD)", metavar="COMMIT")
parser.add_option("-p", "--pull-ref", dest="pull_ref_prefix", default="refs/pull/origin",
help="prefix for pull request refs (default: refs/pull/origin)", metavar="PREFIX")
parser.add_option("--hide-unambiguous-shas", action="store_true", dest="hide_shas", default=False,
help="omit commit SHAs from the release notes and per-contributor sections")
parser.add_option("--hide-per-contributor-section", action="store_true", dest="hide_per_contributor", default=False,
help="omit the per-contributor section")
parser.add_option("--hide-downloads-section", action="store_true", dest="hide_downloads", default=False,
help="omit the email sign-up and downloads section")
parser.add_option("--hide-header", action="store_true", dest="hide_header", default=False,
help="omit the title and date header")
parser.add_option("--exclude-from", dest="exclude_from_commit",
help="exclude history starting after COMMIT. Note: COMMIT itself is excluded.", metavar="COMMIT")
parser.add_option("--exclude-until", dest="exclude_until_commit",
help="exclude history ending at COMMIT", metavar="COMMIT")
parser.add_option("--one-line", dest="one_line", action="store_true", default=False,
help="unwrap release notes on a single line")
(options, args) = parser.parse_args()
sortkey = options.sort_key
revsort = options.reverse_sort
pull_ref_prefix = options.pull_ref_prefix
hideshas = options.hide_shas
hidepercontributor = options.hide_per_contributor
hidedownloads = options.hide_downloads
hideheader = options.hide_header
repo = Repo('.')
heads = repo.heads
def reformat_note(note_lines):
sep = '\n'
if options.one_line:
sep = ' '
return sep.join(note_lines).strip()
# Check that pull_ref_prefix is valid
testrefname = "%s/1" % pull_ref_prefix
try:
repo.commit(testrefname)
except:
print("Unable to find pull request refs at %s." % pull_ref_prefix, file=sys.stderr)
print("Is your repo set up to fetch them? Try adding", file=sys.stderr)
print(" fetch = +refs/pull/*/head:%s/*" % pull_ref_prefix, file=sys.stderr)
print("to the GitHub remote section of .git/config.", file=sys.stderr)
exit(1)
def find_commits(from_commit_ref, until_commit_ref):
try:
firstCommit = repo.commit(from_commit_ref)
except:
print("Unable to find the first commit of the range.", file=sys.stderr)
print("No ref named %s." % from_commit_ref, file=sys.stderr)
exit(1)
try:
commit = repo.commit(until_commit_ref)
except:
print("Unable to find the last commit of the range.", file=sys.stderr)
print("No ref named %s." % until_commit_ref, file=sys.stderr)
exit(1)
return firstCommit, commit
firstCommit, commit = find_commits(options.from_commit, options.until_commit)
if commit == firstCommit:
print("Commit range is empty!", file=sys.stderr)
print(parser.get_usage(), file=sys.stderr)
print("Example use:", file=sys.stderr)
print(" %s --help" % sys.argv[0], file=sys.stderr)
print(" %s --from xxx >output.md" % sys.argv[0], file=sys.stderr)
print(" %s --from xxx --until yyy >output.md" % sys.argv[0], file=sys.stderr)
print("Note: the first commit is excluded. Use e.g.: --from <prev-release-tag> --until <new-release-candidate-sha>", file=sys.stderr)
exit(0)
excludedFirst, excludedLast = None, None
if options.exclude_from_commit or options.exclude_until_commit:
if not options.exclude_from_commit or not options.exclude_until_commit:
print("Both -xf and -xt must be specified, or not at all.")
exit(1)
excludedFirst, excludedLast = find_commits(options.exclude_from_commit, options.exclude_until_commit)
### Reading data from repository ###
def identify_commit(commit):
return '%s ("%s", %s)' % (
commit.hexsha, commit.message.split('\n',1)[0],
datetime.datetime.fromtimestamp(commit.committed_date).ctime())
def check_reachability(firstCommit, commit):
# Is the first commit reachable from the current one?
base = repo.merge_base(firstCommit, commit)
if len(base) == 0:
print("error: %s:%s\nand %s:%s\nhave no common ancestor" % (
options.from_commit, identify_commit(firstCommit),
options.until_commit, identify_commit(commit)), file=sys.stderr)
exit(1)
commonParent = base[0]
if firstCommit != commonParent:
print("warning: %s:%s\nis not an ancestor of %s:%s!" % (
options.from_commit, identify_commit(firstCommit),
options.until_commit, identify_commit(commit)), file=sys.stderr)
print(file=sys.stderr)
ageindays = int((firstCommit.committed_date - commonParent.committed_date)/86400)
prevlen = sum((1 for x in repo.iter_commits(commonParent.hexsha + '...' + firstCommit.hexsha)))
print("The first common ancestor is %s" % identify_commit(commonParent), file=sys.stderr)
print("which is %d commits older than %s:%s\nand %d days older. Using that as origin." %\
(prevlen, options.from_commit, identify_commit(firstCommit), ageindays), file=sys.stderr)
print(file=sys.stderr)
firstCommit = commonParent
return firstCommit, commit
firstCommit, commit = check_reachability(firstCommit, commit)
options.from_commit = firstCommit.hexsha
def extract_release_notes(commit):
msglines = commit.message.split('\n')
curnote = []
innote = False
foundnote = False
cat = None
notes = []
for line in msglines:
m = coauthor.search(line)
if m is not None:
# A Co-authored-line finishes the parsing of the commit message,
# because it's included at the end only.
break
m = fixannot.search(line)
if m is not None:
# Fix/Close etc. Ignore.
continue
m = norelnote.search(line)
if m is not None:
# Release note: None
#
# Remember we found a note (so the commit is not marked as "missing
# a release note"), but we won't collect it.
foundnote = True
continue
m = relnote.search(line)
if m is None:
# Current line does not contain a release note separator.
# If we were already collecting a note, continue collecting it.
if innote:
curnote.append(line)
continue
# We have a release note boundary. If we were collecting a
# note already, complete it.
if innote:
notes.append((cat, reformat_note(curnote)))
curnote = []
innote = False
# Start a new release note.
firstline = m.group('note').strip()
if firstline.lower() == 'none':
# Release note: none - there's no note yet.
continue
foundnote = True
innote = True
# Capitalize the first line.
if firstline != "":
firstline = firstline[0].upper() + firstline[1:]
curnote = [firstline]
cat = m.group('cat1')
if cat is None:
cat = m.group('cat2')
if cat | |
test_damping_torques = test_rod.damping_torques
# Compare damping forces and torques computed using in class functions and above
assert_allclose(test_damping_forces, damping_forces, atol=Tolerance.atol())
assert_allclose(test_damping_torques, damping_torques, atol=Tolerance.atol())
# alpha is base angle of isosceles triangle
@pytest.mark.parametrize("alpha", np.radians([22.5, 30, 45, 60, 70]))
def test_case_bend_straight_rod(self, alpha):
"""
In this test case we initialize a straight rod with 2 elements
and numerically bend the rod. We modify node positions and directors
to make a isosceles triangle. Then first we compute curvature
between two elements and compute the angle between them.
Finally, we compute bend twist couples and compare with
correct solution.
This test function tests
_compute_bending_twist_strains
_compute_internal_torques
only bend_twist_couple terms.
"""
n_elem = 2
initial, test_rod = constructor(n_elem, nu=0.0)
base_length = initial.base_length
# Change the coordinates of nodes, artificially bend the rod.
# /\
# ------ ==> / \
# / \
# Here I chose a isosceles triangle.
length = base_length / n_elem
position = np.zeros((MaxDimension.value(), n_elem + 1))
position[..., 0] = np.array([0.0, 0.0, 0.0])
position[..., 1] = length * np.array([0.0, np.sin(alpha), np.cos(alpha)])
position[..., 2] = length * np.array([0.0, 0.0, 2 * np.cos(alpha)])
test_rod.position_collection = position
# Set the directors manually. This is easy since we have two elements.
directors = np.zeros((MaxDimension.value(), MaxDimension.value(), n_elem))
directors[..., 0] = np.array(
(
[1.0, 0.0, 0.0],
[0.0, np.cos(alpha), -np.sin(alpha)],
[0.0, np.sin(alpha), np.cos(alpha)],
)
)
directors[..., -1] = np.array(
(
[1.0, 0.0, 0.0],
[0.0, np.cos(alpha), np.sin(alpha)],
[0, -np.sin(alpha), np.cos(alpha)],
)
)
test_rod.director_collection = directors
# Compute voronoi rest length. Since elements lengths are equal
# in this test case, rest voronoi length can be easily computed
# dividing base length to number of elements.
rest_voronoi_length = base_length / n_elem
# Now compute geometry and dilatation, which we need for curvature calculations.
_compute_all_dilatations(
test_rod.position_collection,
test_rod.volume,
test_rod.lengths,
test_rod.tangents,
test_rod.radius,
test_rod.dilatation,
test_rod.rest_lengths,
test_rod.rest_voronoi_lengths,
test_rod.voronoi_dilatation,
)
_compute_dilatation_rate(
test_rod.position_collection,
test_rod.velocity_collection,
test_rod.lengths,
test_rod.rest_lengths,
test_rod.dilatation_rate,
)
_compute_bending_twist_strains(
test_rod.director_collection, test_rod.rest_voronoi_lengths, test_rod.kappa
)
# Generalized rotation per unit length is given by rest_D_i * Kappa_i.
# Thus in order to get the angle between two elements, we need to multiply
# kappa with rest_D_i . But this will give the exterior vertex angle of the
# triangle. Think as, we rotate element 1 clockwise direction and align with
# the element 2.
#
# \
# /\ \ 1
# 1 / \ 2 ==> \
# / \ \
# \ 2
# \
#
# So for this transformation we use exterior vertex angle of isosceles triangle.
# Exterior vertex angle can be computed easily, it is the sum of base angles
# , since this is isosceles triangle it is 2*base_angle
correct_angle = np.degrees(np.array([2 * alpha, 0.0, 0.0]).reshape(3, 1))
test_angle = np.degrees(test_rod.kappa * test_rod.rest_voronoi_lengths)
assert_allclose(test_angle, correct_angle, atol=Tolerance.atol())
# Now lets test bending stress terms in internal torques equation.
# Here we will test bend twist couple 2D and bend twist couple 3D terms of the
# internal torques equation. Set the bending matrix to identity matrix for simplification.
test_rod.bend_matrix[:] = np.repeat(
np.identity(3)[:, :, np.newaxis], n_elem - 1, axis=2
)
# We need to compute shear stress, for internal torque equation.
# Shear stress is not used in this test case. In order to make sure shear
# stress do not contribute to the total torque we use assert check.
_compute_internal_bending_twist_stresses_from_model(
test_rod.director_collection,
test_rod.rest_voronoi_lengths,
test_rod.internal_couple,
test_rod.bend_matrix,
test_rod.kappa,
test_rod.rest_kappa,
)
assert_allclose(
test_rod.internal_stress,
np.zeros(3 * n_elem).reshape(3, n_elem),
atol=Tolerance.atol(),
)
# Make sure voronoi dilatation is 1
assert_allclose(
test_rod.voronoi_dilatation, np.array([1.0]), atol=Tolerance.atol()
)
# Compute correct torques, first compute correct kappa.
correct_kappa = np.radians(correct_angle / rest_voronoi_length)
# We only need to compute bend twist couple 2D term for comparison,
# because bend twist couple 3D term is already zero, due to cross product.
# TODO: Extended this test for multiple elements more than 2.
correct_torques = np.zeros((MaxDimension.value(), n_elem))
correct_torques[..., 0] = correct_kappa[..., 0]
correct_torques[..., -1] = -1.0 * correct_kappa[..., -1]
_compute_internal_torques(
test_rod.position_collection,
test_rod.velocity_collection,
test_rod.tangents,
test_rod.lengths,
test_rod.rest_lengths,
test_rod.director_collection,
test_rod.rest_voronoi_lengths,
test_rod.bend_matrix,
test_rod.rest_kappa,
test_rod.kappa,
test_rod.voronoi_dilatation,
test_rod.mass_second_moment_of_inertia,
test_rod.omega_collection,
test_rod.internal_stress,
test_rod.internal_couple,
test_rod.dilatation,
test_rod.dilatation_rate,
test_rod.dissipation_constant_for_torques,
test_rod.damping_torques,
test_rod.internal_torques,
)
assert_allclose(
test_rod.internal_torques, correct_torques, atol=Tolerance.atol()
)
def test_case_shear_torque(self):
"""
In this test case we initialize a straight rod with two elements
and set bending matrix to zero. This gives us opportunity decouple
shear torque from twist and bending torques in internal torques
equation. Then we modify node positions of second element and
introduce artificial bending. Finally, we compute shear torque
using internal torque function and compare with analytical value.
This test case is for testing shear torque term,
in internal torques equation.
Tested function
_compute_internal_torques
"""
n_elem = 2
initial, test_rod = constructor(n_elem, nu=0.0)
position = np.zeros((MaxDimension.value(), n_elem + 1))
position[..., 0] = np.array([0.0, 0.0, 0.0])
position[..., 1] = np.array([0.0, 0.0, 0.5])
position[..., 2] = np.array([0.0, -0.3, 0.9])
test_rod.position_collection = position
# Simplify the computations, and chose shear matrix as identity matrix.
test_rod.shear_matrix[:] = np.repeat(
np.identity(3)[:, :, np.newaxis], n_elem - 1, axis=2
)
# Internal shear stress function is tested previously
_compute_internal_shear_stretch_stresses_from_model(
test_rod.position_collection,
test_rod.volume,
test_rod.lengths,
test_rod.tangents,
test_rod.radius,
test_rod.rest_lengths,
test_rod.rest_voronoi_lengths,
test_rod.dilatation,
test_rod.voronoi_dilatation,
test_rod.director_collection,
test_rod.sigma,
test_rod.rest_sigma,
test_rod.shear_matrix,
test_rod.internal_stress,
)
correct_shear_torques = np.zeros((MaxDimension.value(), n_elem))
# Correct shear torques can be computed easily.
# Procedure:
# 1) Q = [1., 0., 0.; 0., 1., 0.; 0., 0., 1.]
# 2) t = [0., -0.6, 0.8]
# 3) sigma = (eQt-d3) = [0.0, -0.6, -0.2]
# 4) Qt = [0., -0.6, 0.8]
# 5) torque = Qt x sigma
# Note that this is not generic, but it does not to be, it is testing the functions.
correct_shear_torques[..., -1] = np.array([0.3, 0.0, 0.0])
# Set bending matrix to zero matrix, because we dont want
# any contribution from bending on total internal torques
test_rod.bend_matrix[:] = 0.0
_compute_internal_torques(
test_rod.position_collection,
test_rod.velocity_collection,
test_rod.tangents,
test_rod.lengths,
test_rod.rest_lengths,
test_rod.director_collection,
test_rod.rest_voronoi_lengths,
test_rod.bend_matrix,
test_rod.rest_kappa,
test_rod.kappa,
test_rod.voronoi_dilatation,
test_rod.mass_second_moment_of_inertia,
test_rod.omega_collection,
test_rod.internal_stress,
test_rod.internal_couple,
test_rod.dilatation,
test_rod.dilatation_rate,
test_rod.dissipation_constant_for_torques,
test_rod.damping_torques,
test_rod.internal_torques,
)
assert_allclose(
test_rod.internal_torques, correct_shear_torques, atol=Tolerance.atol()
)
def test_case_lagrange_transport_unsteady_dilatation(self):
"""
In this test case, we initialize a straight rod. Then we modify
angular velocity of elements and set mass moment of inertia
to identity matrix. By doing this we need to get zero torque
due lagrangian transport term, because of Jwxw, J=I, wxw=0.
Next we test unsteady dilatation contribution to internal
torques, by setting dilatation rate to 1 and recover initialized
angular velocity back, de/dt * Jw = w , de/dt=1 J=I.
This test function tests
_compute_internal_torques
only lagrange transport and
unsteady dilatation terms, tested numerically.
Note that, viscous dissipation set to 0,
since we don't want any contribution from
damping torque.
"""
n_elem = 2
initial, test_rod = constructor(n_elem, nu=0.0)
# TODO: find one more test in which you dont set J=I, may be some analytical test
# Set the mass moment of inertia matrix to identity matrix for simplification.
# When lagrangian transport tested, total torque computed by the function has
# to be zero, because (J.w/e)xw if J=I then wxw/e = 0.
test_rod.mass_second_moment_of_inertia[:] = np.repeat(
np.identity(3)[:, :, np.newaxis], n_elem, axis=2
)
_compute_shear_stretch_strains(
test_rod.position_collection,
test_rod.volume,
test_rod.lengths,
test_rod.tangents,
test_rod.radius,
test_rod.rest_lengths,
test_rod.rest_voronoi_lengths,
test_rod.dilatation,
test_rod.voronoi_dilatation,
test_rod.director_collection,
test_rod.sigma,
)
_compute_bending_twist_strains(
test_rod.director_collection, test_rod.rest_voronoi_lengths, test_rod.kappa
)
_compute_internal_forces(
test_rod.position_collection,
test_rod.volume,
test_rod.lengths,
test_rod.tangents,
test_rod.radius,
test_rod.rest_lengths,
test_rod.rest_voronoi_lengths,
test_rod.dilatation,
test_rod.voronoi_dilatation,
test_rod.director_collection,
test_rod.sigma,
test_rod.rest_sigma,
test_rod.shear_matrix,
test_rod.internal_stress,
test_rod.velocity_collection,
test_rod.dissipation_constant_for_forces,
test_rod.damping_forces,
test_rod.internal_forces,
)
# Lets set angular velocity omega to arbitray numbers
# Make sure shape of the random vector correct
omega = np.zeros(3 * n_elem).reshape(3, n_elem)
for i in range(0, n_elem):
omega[..., i] = np.random.rand(3)
test_rod.omega_collection = omega
_compute_internal_torques(
test_rod.position_collection,
test_rod.velocity_collection,
test_rod.tangents,
test_rod.lengths,
test_rod.rest_lengths,
test_rod.director_collection,
test_rod.rest_voronoi_lengths,
test_rod.bend_matrix,
test_rod.rest_kappa,
test_rod.kappa,
test_rod.voronoi_dilatation,
test_rod.mass_second_moment_of_inertia,
test_rod.omega_collection,
test_rod.internal_stress,
test_rod.internal_couple,
test_rod.dilatation,
test_rod.dilatation_rate,
test_rod.dissipation_constant_for_torques,
test_rod.damping_torques,
test_rod.internal_torques,
)
# computed internal torques has to be zero. Internal torques created by Lagrangian
# transport term is zero because mass moment of inertia is identity matrix and wxw=0.
# Torques due to unsteady dilatation has to be zero because dilatation rate is zero.
assert_allclose(
test_rod.internal_torques,
np.zeros(3 * n_elem).reshape(3, n_elem),
atol=Tolerance.atol(),
)
# Now lets test torques due to unsteady dilatation. For that, | |
# -*- coding: utf-8 -*-
DESC = "cme-2019-10-29"
INFO = {
"DescribeTasks": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "ProjectId",
"desc": "项目 Id。"
},
{
"name": "TaskTypeSet",
"desc": "任务类型集合,取值有:\n<li>VIDEO_EDIT_PROJECT_EXPORT:视频编辑项目导出。</li>"
},
{
"name": "StatusSet",
"desc": "任务状态集合,取值有:\n<li>PROCESSING:处理中;</li>\n<li>SUCCESS:成功;</li>\n<li>FAIL:失败。</li>"
},
{
"name": "Offset",
"desc": "分页返回的起始偏移量,默认值:0。"
},
{
"name": "Limit",
"desc": "分页返回的记录条数,默认值:10。"
}
],
"desc": "获取任务列表,支持条件筛选,返回对应的任务基础信息列表。"
},
"DescribeTeams": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "TeamIds",
"desc": "团队 ID 列表,限30个。"
}
],
"desc": "获取指定团队的信息。"
},
"ExportVideoEditProject": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "ProjectId",
"desc": "项目 Id。"
},
{
"name": "Definition",
"desc": "导出模板 Id,目前不支持自定义创建,只支持下面的预置模板 Id。\n<li>10:分辨率为 480P,输出视频格式为 MP4;</li>\n<li>11:分辨率为 720P,输出视频格式为 MP4;</li>\n<li>12:分辨率为 1080P,输出视频格式为 MP4。</li>"
},
{
"name": "ExportDestination",
"desc": "导出目标。\n<li>CME:云剪,即导出为云剪素材;</li>\n<li>VOD:云点播,即导出为云点播媒资。</li>"
},
{
"name": "CMEExportInfo",
"desc": "导出的云剪素材信息。指定 ExportDestination = CME 时有效。"
},
{
"name": "VODExportInfo",
"desc": "导出的云点播媒资信息。指定 ExportDestination = VOD 时有效。"
}
],
"desc": "导出视频编辑项目,支持指定输出的模板。"
},
"DescribeSharedSpace": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "Authorizee",
"desc": "被授权目标实体。"
},
{
"name": "Operator",
"desc": "操作者。填写用户的 Id,用于标识调用者及校验操作权限。"
}
],
"desc": "获取共享空间。当实体A对实体B授权某资源以后,实体B的共享空间就会增加实体A。"
},
"GrantResourceAuthorization": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "Owner",
"desc": "资源所属实体。"
},
{
"name": "Resources",
"desc": "被授权资源。"
},
{
"name": "Authorizees",
"desc": "被授权目标实体。"
},
{
"name": "Permissions",
"desc": "详细授权值。 取值有:\n<li>R:可读,可以浏览素材,但不能使用该素材(将其添加到 Project),或复制到自己的媒资库中</li>\n<li>X:可用,可以使用该素材(将其添加到 Project),但不能将其复制到自己的媒资库中,意味着被授权者无法将该资源进一步扩散给其他个人或团队。</li>\n<li>C:可复制,既可以使用该素材(将其添加到 Project),也可以将其复制到自己的媒资库中。</li>\n<li>W:可修改、删除媒资。</li>"
},
{
"name": "Operator",
"desc": "操作者。填写用户的 Id,用于标识调用者及校验操作权限。"
}
],
"desc": "资源所属实体对目标实体授予目标资源的相应权限。"
},
"SearchMaterial": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "SearchScopes",
"desc": "指定搜索空间,数组长度不得超过5。"
},
{
"name": "MaterialTypes",
"desc": "素材类型,取值:\n<li>AUDIO:音频;</li>\n<li>VIDEO:视频 ;</li>\n<li>IMAGE:图片。</li>"
},
{
"name": "Text",
"desc": "搜索文本,模糊匹配素材名称或描述信息,匹配项越多,匹配度越高,排序越优先。长度限制:64 个字符。"
},
{
"name": "Resolution",
"desc": "按画质检索,取值为:LD/SD/HD/FHD/2K/4K。"
},
{
"name": "DurationRange",
"desc": "按素材时长检索,单位s。"
},
{
"name": "CreateTimeRange",
"desc": "按照素材创建时间检索。"
},
{
"name": "Tags",
"desc": "标签集合,匹配集合中任意元素。单个标签长度限制:10 个字符。数组长度限制:10。"
},
{
"name": "Sort",
"desc": "排序方式。Sort.Field 可选值:CreateTime。指定 Text 搜索时,将根据匹配度排序,该字段无效。"
},
{
"name": "Offset",
"desc": "偏移量。默认值:0。"
},
{
"name": "Limit",
"desc": "返回记录条数,默认值:50。"
},
{
"name": "Operator",
"desc": "操作者。填写用户的 Id,用于标识调用者及校验操作权限。"
}
],
"desc": "根据检索条件搜索素材,返回素材的基本信息。"
},
"CreateProject": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "Category",
"desc": "项目类别,取值有:\n<li>VIDEO_EDIT:视频编辑。</li>"
},
{
"name": "Name",
"desc": "项目名称,不可超过30个字符。"
},
{
"name": "AspectRatio",
"desc": "画布宽高比,取值有:\n<li>16:9;</li>\n<li>9:16。</li>"
},
{
"name": "Owner",
"desc": "归属者。"
}
],
"desc": "创建云剪的编辑项目,支持创建视频剪辑及直播剪辑两大类项目。\n"
},
"DescribeJoinTeams": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "MemberId",
"desc": "团队成员 ID。"
},
{
"name": "Offset",
"desc": "分页偏移量,默认值:0"
},
{
"name": "Limit",
"desc": "返回记录条数,默认值:30,最大值:30。"
}
],
"desc": "获取指定的团队成员所加入的团队列表。"
},
"DescribeResourceAuthorization": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "Owner",
"desc": "归属者。"
},
{
"name": "Resource",
"desc": "资源。"
},
{
"name": "Operator",
"desc": "操作者。填写用户的 Id,用于标识调用者及校验操作权限。"
}
],
"desc": "查询指定资源的授权列表。"
},
"ImportMaterial": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "VodFileId",
"desc": "云点播媒资 FileId。"
},
{
"name": "Owner",
"desc": "素材归属者。"
},
{
"name": "Name",
"desc": "素材名称,不能超过30个字符。"
},
{
"name": "ClassPath",
"desc": "素材分类路径,形如:\"/a/b\",层级数不能超过10,每个层级长度不能超过15字符。若不填则默认为根路径。"
},
{
"name": "Tags",
"desc": "素材标签,单个标签长度不能超过10,数组长度不能超过10。"
},
{
"name": "PreProcessDefinition",
"desc": "素材预处理任务模板 ID。取值:\n<li>10:进行编辑预处理。</li>"
},
{
"name": "Operator",
"desc": "操作者。填写用户的 Id,用于标识调用者及校验操作权限。"
}
],
"desc": "将云点播媒资文件导入到云剪素材库。"
},
"DescribeTaskDetail": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "TaskId",
"desc": "任务 Id。"
}
],
"desc": "获取任务详情信息,包含下面几个部分:\n<li>任务基础信息:包括任务状态、错误信息、创建时间等;</li>\n<li>导出项目输出信息:包括输出的素材 Id 等。</li>"
},
"ModifyTeam": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "TeamId",
"desc": "团队 ID。"
},
{
"name": "Name",
"desc": "团队名称,不能超过 30 个字符。"
},
{
"name": "Operator",
"desc": "操作者。填写用户的 Id,用于标识调用者及校验操作权限。"
}
],
"desc": "修改团队信息,目前支持修改的操作有:\n<li>修改团队名称。</li>"
},
"DeleteMaterial": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "MaterialId",
"desc": "素材 Id。"
},
{
"name": "Operator",
"desc": "操作者。填写用户的 Id,用于标识调用者及校验操作权限。"
}
],
"desc": "根据素材 Id 删除素材。"
},
"CreateClass": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "Owner",
"desc": "归属者。"
},
{
"name": "ClassPath",
"desc": "分类路径。"
},
{
"name": "Operator",
"desc": "操作者。填写用户的 Id,用于标识调用者及校验操作权限。"
}
],
"desc": "新增分类,用于管理素材。\n<li>分类层数不能超过10;</li>\n<li>子分类数不能超过10。</li>"
},
"DeleteTeam": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问平台。"
},
{
"name": "TeamId",
"desc": "要删除的团队 ID。"
},
{
"name": "Operator",
"desc": "操作者。填写用户的 Id,用于标识调用者及校验操作权限。"
}
],
"desc": "删除一个团队。\n<li>要删除的团队必须没有归属的素材;</li>\n<li>要删除的团队必须没有归属的分类。</li>"
},
"AddTeamMember": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "TeamId",
"desc": "团队 ID。"
},
{
"name": "TeamMembers",
"desc": "要添加的成员列表,一次最多添加30个成员。"
},
{
"name": "Operator",
"desc": "操作者。填写用户的 Id,用于标识调用者及校验操作权限。"
}
],
"desc": "向一个团队中团队成员,并且指定成员的角色。"
},
"ModifyTeamMember": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "TeamId",
"desc": "团队 ID。"
},
{
"name": "MemberId",
"desc": "团队成员 ID。"
},
{
"name": "Remark",
"desc": "成员备注,允许设置备注为空,不为空时长度不能超过15个字符。"
},
{
"name": "Role",
"desc": "成员角色,取值:\n<li>Admin:团队管理员;</li>\n<li>Member:普通成员。</li>"
},
{
"name": "Operator",
"desc": "操作者。填写用户的 Id,用于标识调用者及校验操作权限。"
}
],
"desc": "修改团队成员信息,包括成员备注、角色等。"
},
"DeleteTeamMembers": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "TeamId",
"desc": "团队 ID。"
},
{
"name": "MemberIds",
"desc": "要删除的成员列表。"
},
{
"name": "Operator",
"desc": "操作者。填写用户的 Id,用于标识调用者及校验操作权限。"
}
],
"desc": "将团队成员从团队中删除,默认只有 Owner 及管理员才有此权限。"
},
"DeleteLoginStatus": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "UserIds",
"desc": "用户 Id 列表,N 从 0 开始取值,最大 19。"
}
],
"desc": "删除用户登录态,使用户登出云剪平台。"
},
"DescribeProjects": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "ProjectIds",
"desc": "项目 Id 列表,N 从 0 开始取值,最大 19。"
},
{
"name": "AspectRatioSet",
"desc": "画布宽高比集合。"
},
{
"name": "CategorySet",
"desc": "项目类别集合。"
},
{
"name": "Sort",
"desc": "列表排序,支持下列排序字段:\n<li>CreateTime:创建时间;</li>\n<li>UpdateTime:更新时间。</li>"
},
{
"name": "Owner",
"desc": "项目归属者。"
},
{
"name": "Offset",
"desc": "分页返回的起始偏移量,默认值:0。"
},
{
"name": "Limit",
"desc": "分页返回的记录条数,默认值:10。"
}
],
"desc": "支持根据多种条件过滤出项目列表。"
},
"DescribeLoginStatus": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "UserIds",
"desc": "用户 Id 列表,N 从 0 开始取值,最大 19。"
}
],
"desc": "查询指定用户的登录态。"
},
"DescribeMaterials": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "MaterialIds",
"desc": "素材 ID 列表,N 从 0 开始取值,最大 19。"
},
{
"name": "Sort",
"desc": "列表排序,支持下列排序字段:\n<li>CreateTime:创建时间;</li>\n<li>UpdateTime:更新时间。</li>"
},
{
"name": "Operator",
"desc": "操作者。填写用户的 Id,用于标识调用者及校验操作权限。"
}
],
"desc": "根据素材 Id 批量获取素材详情。"
},
"DescribeClass": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "Owner",
"desc": "归属者。"
},
{
"name": "Operator",
"desc": "操作者。填写用户的 Id,用于标识调用者及校验操作权限。"
}
],
"desc": "获取指定归属者下所有的分类信息。"
},
"CreateTeam": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "Name",
"desc": "团队名称,限30个字符。"
},
{
"name": "OwnerId",
"desc": "团队所有者,指定用户 ID。"
},
{
"name": "OwnerRemark",
"desc": "团队所有者的备注,限30个字符。"
},
{
"name": "TeamId",
"desc": "自定义团队 ID。创建后不可修改,限20个英文字符及\"-\"。同时不能以 cmetid_开头。不填会生成默认团队 ID。"
}
],
"desc": "创建一个团队。"
},
"ModifyProject": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "ProjectId",
"desc": "项目 Id。"
},
{
"name": "Name",
"desc": "项目名称,不可超过30个字符。"
},
{
"name": "AspectRatio",
"desc": "画布宽高比,取值有:\n<li>16:9;</li>\n<li>9:16。</li>"
},
{
"name": "Owner",
"desc": "归属者。"
}
],
"desc": "修改云剪编辑项目的信息。"
},
"ModifyMaterial": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "MaterialId",
"desc": "素材 Id。"
},
{
"name": "Owner",
"desc": "素材归属。"
},
{
"name": "Name",
"desc": "素材名称,不能超过30个字符。"
},
{
"name": "Tags",
"desc": "素材标签,单个标签长度不能超过10个字符,数组长度不能超过10。"
},
{
"name": "ClassPath",
"desc": "素材分类路径,例如填写\"/a/b\",则代表该素材存储的路径为\"/a/b\"。"
},
{
"name": "Operator",
"desc": "操作者。填写用户的 Id,用于标识调用者及校验操作权限。"
}
],
"desc": "修改素材信息,支持修改素材名称、分类路径、标签等信息。"
},
"ImportMediaToProject": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "ProjectId",
"desc": "项目 Id。"
},
{
"name": "VodFileId",
"desc": "云点播媒资 FileId。"
},
{
"name": "Name",
"desc": "素材名称,不能超过30个字符。"
},
{
"name": "PreProcessDefinition",
"desc": "素材预处理任务模板 ID,取值:\n<li>10:进行编辑预处理。</li>\n注意:如果填0则不进行处理。"
}
],
"desc": "将云点播中的媒资添加到素材库中,供后续视频编辑使用。"
},
"ListMedia": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "ClassPath",
"desc": "素材分类路径,例如填写\"/a/b\",则代表浏览该分类路径下的素材和子分类信息。"
},
{
"name": "Owner",
"desc": "素材和分类的归属者。"
},
{
"name": "Offset",
"desc": "分页偏移量,默认值:0。"
},
{
"name": "Limit",
"desc": "返回记录条数,默认值:10,最大值:50。"
},
{
"name": "Operator",
"desc": "操作者。填写用户的 Id,用于标识调用者及校验操作权限。"
}
],
"desc": " 浏览当前分类路径下的资源,包括素材和子分类。"
},
"RevokeResourceAuthorization": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "Owner",
"desc": "资源所属实体。"
},
{
"name": "Resources",
"desc": "被授权资源。"
},
{
"name": "Authorizees",
"desc": "被授权目标实体。"
},
{
"name": "Permissions",
"desc": "详细授权值。 取值有:\n<li>R:可读,可以浏览素材,但不能使用该素材(将其添加到 Project),或复制到自己的媒资库中</li>\n<li>X:可用,可以使用该素材(将其添加到 Project),但不能将其复制到自己的媒资库中,意味着被授权者无法将该资源进一步扩散给其他个人或团队。</li>\n<li>C:可复制,既可以使用该素材(将其添加到 Project),也可以将其复制到自己的媒资库中。</li>\n<li>W:可修改、删除媒资。</li>"
},
{
"name": "Operator",
"desc": "操作者。填写用户的 Id,用于标识调用者及校验操作权限。"
}
],
"desc": " 资源所属实体对目标实体回收目标资源的相应权限,若原本没有相应权限则不产生变更。"
},
"DeleteClass": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "Owner",
"desc": "归属者。"
},
{
"name": "ClassPath",
"desc": "分类路径。"
},
{
"name": "Operator",
"desc": "操作者。填写用户的 Id,用于标识调用者及校验操作权限。"
}
],
"desc": "删除分类信息,删除时检验下述限制:\n<li>分类路径必须存在;</li>\n<li>分类下没有绑定素材。</li>"
},
"DeleteProject": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "ProjectId",
"desc": "项目 Id。"
}
],
"desc": "删除云剪编辑项目。"
},
"FlattenListMedia": {
"params": [
{
"name": "Platform",
"desc": "平台名称,指定访问的平台。"
},
{
"name": "ClassPath",
"desc": "素材分类路径,例如填写\"/a/b\",则代表平铺该分类路径下及其子分类路径下的素材信息。"
},
{
"name": "Owner",
"desc": "素材路径的归属者。"
},
{
"name": "Offset",
"desc": "分页偏移量,默认值:0。"
},
{
"name": "Limit",
"desc": "返回记录条数,默认值:10,最大值:50。"
},
{
"name": "Operator",
"desc": "操作者。填写用户的 Id,用于标识调用者及校验操作权限。"
}
],
"desc": "平铺分类路径下及其子分类下的所有素材。"
},
| |
<filename>tests/test/mixed_vschema/vschema_pushdown.py
#!/usr/opt/bs-python-2.7/bin/python
import os
import sys
import datetime
import unittest
from multiprocessing import Process
from textwrap import dedent
sys.path.append(os.path.realpath(__file__ + '/../../../lib'))
import udf
from vschema_common import VSchemaTest, TestUtils
#TODO Consolidate testAggregate, testScalar, etc. with explain tests
<EMAIL>("skipped test")
class PushdownTest(VSchemaTest):
setupDone = False
def setUp(self):
# TODO This is another ugly workaround for the problem that the framework doesn't offer us a query in classmethod setUpClass. Rewrite!
if self.__class__.setupDone:
self.query(''' CLOSE SCHEMA ''')
return
self.createJdbcAdapter()
self.createNative()
self.commit() # We have to commit, otherwise the adapter won't see these tables
self.createVirtualSchemaJdbc("VS1", "NATIVE", "ADAPTER.JDBC_ADAPTER", True)
self.commit()
self.query(''' CLOSE SCHEMA ''')
self.__class__.setupDone = True
def testProjection(self):
# Single Group
self.compareWithNativeExtended('''
SELECT a, c FROM {v}.T;
''', ignoreOrder=True, explainResponse="SELECT A, C")
self.compareWithNativeExtended('''
SELECT b, c FROM {v}.T;
''', ignoreOrder=True, explainResponse="SELECT B, C")
self.compareWithNativeExtended('''
SELECT a FROM {v}.T;
''', ignoreOrder=True, explainResponse="SELECT A")
# Column only in filter. To be executed with and without fiter pushdown.
# If no filter-pushdown: lookup a requires being updated, and a needs to be included.
# Otherwise no need for a in projection
self.compareWithNativeExtended('''
SELECT c FROM {v}.t WHERE a=1;
''', ignoreOrder=True, explainResponse="SELECT C FROM NATIVE.T WHERE A = 1")
# Column only in order-by
self.compareWithNativeExtended('''
SELECT b, c, a FROM {v}.t ORDER BY a ;
''', partialOrder=2, explainResponse="SELECT * FROM NATIVE.T ORDER BY A")
# Column only in analytical query
self.compareWithNativeExtended('''
SELECT k, v1, COUNT(*) OVER(PARTITION BY k ORDER BY v1) AS COUNT FROM {v}.g;
''', partialOrder=1, explainResponse="SELECT K, V1 FROM NATIVE.G")
# Column only in on-clause of join
self.compareWithNativeExtended('''
SELECT vt.a FROM {v}.t vt JOIN {n}.t nt ON vt.c = nt.c;
''', ignoreOrder=True, explainResponse="SELECT A, C FROM NATIVE.T")
# Empty Projection special case (no columns required)
self.compareWithNativeExtended('''
SELECT count(*) FROM {v}.t;
''', ignoreOrder=True, explainResponse="SELECT COUNT(*) FROM NATIVE.T")
self.compareWithNativeExtended('''
SELECT t1.a FROM {v}.t t1, {v}.t t2;
''', ignoreOrder=True, explainResponse=["SELECT A FROM NATIVE.T", "SELECT true FROM NATIVE.T"])
self.compareWithNativeExtended('''
SELECT true FROM {v}.t;
''', ignoreOrder=True, explainResponse="SELECT true FROM NATIVE.T")
def testAliases(self):
# Table Aliases
self.compareWithNativeExtended('''
SELECT a FROM {v}.t t1
''', ignoreOrder=True, explainResponse="SELECT A FROM NATIVE.T")
self.compareWithNativeExtended('''
SELECT t1.a FROM {v}.t t1, {v}.t t2
''', ignoreOrder=True, explainResponse=["SELECT A FROM NATIVE.T", "SELECT true FROM NATIVE.T"])
self.compareWithNativeExtended('''
SELECT t1.a FROM (SELECT * FROM {v}.t t1) as t1
''', ignoreOrder=True, explainResponse="SELECT A FROM NATIVE.T")
self.compareWithNativeExtended('''
SELECT t1.a FROM {v}.t t1, {v}.t t2
''', ignoreOrder=True, explainResponse=["SELECT A FROM NATIVE.T", "SELECT true FROM NATIVE.T"])
# Column Aliases
self.compareWithNativeExtended('''
SELECT (a+1) a1 FROM {v}.t
''', ignoreOrder=True, explainResponse="SELECT (A + 1) FROM NATIVE.T")
# in subselect: must work for selectlist and projection pushdown!
self.compareWithNativeExtended('''
SELECT a1 FROM (SELECT a a1 FROM {v}.t ORDER BY false)
''', ignoreOrder=True, explainResponse="SELECT A FROM NATIVE.T ORDER BY false")
# with aggregation
self.compareWithNativeExtended('''
SELECT sum(a) AS suma FROM {v}.t
''', ignoreOrder=True, explainResponse="SELECT SUM(A) FROM NATIVE.T")
def testJoins(self):
self.compareWithNativeExtended('''
select t1.a FROM {v}.t t1 join {v}.t t2 on t1.b=t2.b
''', ignoreOrder=True, explainResponse=["SELECT A, B FROM NATIVE.T", "SELECT B FROM NATIVE.T"])
# with Projection
self.compareWithNativeExtended('''
select t1.a FROM {v}.t t1, {v}.t t2 where t1.c = t2.c
''', ignoreOrder=True, explainResponse=["SELECT A, C FROM NATIVE.T", "SELECT C FROM NATIVE.T"])
# with local filter
self.compareWithNativeExtended('''
select t1.a FROM {v}.t t1 join {v}.t t2 on t1.b=t2.b and t1.a=2
''', ignoreOrder=True, explainResponse=["SELECT A, B FROM NATIVE.T", "SELECT B FROM NATIVE.T"])
# with global filter
self.compareWithNativeExtended('''
select t1.a FROM {v}.t t1 join {v}.t t2 on t1.c=t2.c and t1.b<t2.b
''', ignoreOrder=True, explainResponse=["SELECT * FROM NATIVE.T","SELECT B, C FROM NATIVE.T"])
# with non-sql92 syntax
self.compareWithNativeExtended('''
select t1.a FROM {v}.t t1, {v}.t t2 where t1.c=t2.c and t1.b<t2.b
''', ignoreOrder=True, explainResponse=["SELECT * FROM NATIVE.T", "SELECT B, C FROM NATIVE.T"])
# equi join with using
self.compareWithNativeExtended('''
select * FROM {v}.t t1 join {v}.t t2 using (a)
''', ignoreOrder=True, explainResponse=["SELECT * FROM NATIVE.T", "SELECT * FROM NATIVE.T"])
self.compareWithNativeExtended('''
select t1.a FROM {v}.t t1 join {v}.g t2 on t1.a=t2.k join {v}.t_nulls t3 on t1.a = t3.a where t1.c < 3 and t3.a < 2 and t2.v1 < 4
''', ignoreOrder=True, explainResponse=["SELECT A FROM NATIVE.T WHERE C < 3","SELECT K FROM NATIVE.G WHERE V1 < 4","SELECT A FROM NATIVE.T_NULLS WHERE A < 2"])
# Outer Join
self.compareWithNativeExtended('''
select * FROM {v}.t t1 left join (select * FROM {v}.t where a=1) t2 on t1.a=t2.a
''', ignoreOrder=True, explainResponse=["SELECT * FROM NATIVE.T", "SELECT * FROM NATIVE.T"])
# outer join with coalesce (should not contain null rows, i.e. no filterpushdown and filter nulls after the join)
self.compareWithNativeExtended('''
select * FROM {v}.t t1 left join {v}.t t2 on t1.a=t2.a where coalesce(t2.a, 1) = 1
''', ignoreOrder=True, explainResponse=["SELECT * FROM NATIVE.T", "SELECT * FROM NATIVE.T"])
# same with other syntax
self.compareWithNativeExtended('''
select * FROM {v}.t t1 left join {v}.t t2 on t1.a=t2.a and coalesce(t2.a, 1) = 1
''', ignoreOrder=True, explainResponse=["SELECT * FROM NATIVE.T", "SELECT * FROM NATIVE.T"])
# This SHOULD contain null rows, which are produced by outer join
self.compareWithNativeExtended('''
select * FROM {v}.t t1 left join (select * FROM {v}.t where coalesce(a, 1) = 1) t2 on t1.a=t2.a
''', ignoreOrder=True, explainResponse=["SELECT * FROM NATIVE.T", "SELECT * FROM NATIVE.T"])
# Cross Join
self.compareWithNativeExtended('''
select t1.a FROM {v}.t t1, {v}.t t2
''', ignoreOrder=True, explainResponse=["SELECT A FROM NATIVE.T", "SELECT true FROM NATIVE.T"])
self.compareWithNativeExtended('''
select * FROM {v}.t t1 cross join {v}.t t2 where t2.a!=1
''', ignoreOrder=True, explainResponse=["SELECT * FROM NATIVE.T", "SELECT * FROM NATIVE.T WHERE A != 1"])
self.compareWithNativeExtended('''
select * FROM {v}.t t1, {v}.t t2 where t2.a!=1
''', ignoreOrder=True, explainResponse=["SELECT * FROM NATIVE.T", "SELECT * FROM NATIVE.T WHERE A != 1"])
# cross join via equi join syntax
self.compareWithNativeExtended('''
select * FROM {v}.t t1 inner join {v}.t t2 on t2.a!=1
''', ignoreOrder=True, explainResponse=["SELECT * FROM NATIVE.T", "SELECT * FROM NATIVE.T"])
# Multi Join Conditions
self.compareWithNativeExtended('''
select a, t2.c+1 FROM {v}.t t1 join {v}.t t2 using (a, b)
''', ignoreOrder=True, explainResponse=["SELECT A, B FROM NATIVE.T", "SELECT * FROM NATIVE.T"])
self.compareWithNativeExtended('''
select t1.a FROM {v}.t t1 join {v}.t t2 on t1.a = t2.a and t1.b = t2.b
''', ignoreOrder=True, explainResponse=["SELECT A, B FROM NATIVE.T", "SELECT A, B FROM NATIVE.T"])
# same with other syntax
self.compareWithNativeExtended('''
select t1.a FROM {v}.t t1, {v}.t t2 where t1.a = t2.a and t1.b = t2.b
''', ignoreOrder=True, explainResponse=["SELECT A, B FROM NATIVE.T", "SELECT A, B FROM NATIVE.T"])
# Equi Join using strange syntax
self.compareWithNativeExtended('''
select t1.a FROM {v}.t t1 inner join {v}.t t2 on true where t1.b = t2.b
''', ignoreOrder=True, explainResponse=["SELECT A, B FROM NATIVE.T", "SELECT B FROM NATIVE.T"])
# Join with native table
self.compareWithNativeExtended('''
select * from {v}.t vt join {n}.t nt on vt.a = nt.a where nt.a = 1
''', ignoreOrder=True, explainResponse="SELECT * FROM NATIVE.T")
def testSelectListExpressions(self):
# TODO Check partial ordering if possible
self.compareWithNativeExtended('''
select a+1, c from {v}.t order by c desc
''', partialOrder=1, explainResponse="SELECT (A + 1), C FROM NATIVE.T ORDER BY C DESC")
self.compareWithNativeExtended('''
select a+1, 3-c from {v}.t order by 3-c
''', partialOrder=1, explainResponse="SELECT (A + 1), (3 - C) FROM NATIVE.T ORDER BY (3 - C)")
# with additional col in filter
self.compareWithNativeExtended('''
select a+1 from {v}.t where c=1.1
''', ignoreOrder=True, explainResponse="SELECT (A + 1) FROM NATIVE.T WHERE C = 1.1")
# with additional col in ON-clause
self.compareWithNativeExtended('''
select t1.a+1 from {v}.t t1 join {v}.t t2 on t1.c = t2.c
''', ignoreOrder=True, explainResponse=["SELECT A, C FROM NATIVE.T", "SELECT C FROM NATIVE.T"])
# With UDF
self.query(udf.fixindent('''
CREATE OR REPLACE PYTHON SCALAR SCRIPT native.emit_dummy (a int) EMITS (a int, b varchar(100)) AS
def run(ctx):
ctx.emit(ctx[0],'a')
/
'''))
self.compareWithNativeExtended('''
select native.emit_dummy(a) from {v}.t
''', ignoreOrder=True, explainResponse="SELECT A FROM NATIVE.T")
# With scalar-emit and select list expressions (map_script->getCachedExpressions())
self.compareWithNativeExtended('''
select native.emit_dummy(a), b from {v}.t
''', ignoreOrder=True, explainResponse="SELECT A, B FROM NATIVE.T")
# With VarEmit
self.query(udf.fixindent('''
CREATE OR REPLACE PYTHON SET SCRIPT native.var_emit_dummy (...) EMITS (...) AS
def run(ctx):
ctx.emit(ctx[0],'a')
def default_output_columns(ctx):
return ("a int, b varchar(100)")
/
'''))
self.compareWithNativeExtended('''
select native.var_emit_dummy(a) emits (a int, b varchar(100)) from {v}.t
''', ignoreOrder=True, explainResponse="SELECT A FROM NATIVE.T")
# TODO Crashes! Extend getSingleCallEXScriptVMContainer to get types differently | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = [
'LaunchProfileStreamConfigurationSessionStorageArgs',
'LaunchProfileStreamConfigurationArgs',
'LaunchProfileStreamingSessionStorageRootArgs',
'LaunchProfileTagsArgs',
'StreamingImageTagsArgs',
'StudioComponentActiveDirectoryComputerAttributeArgs',
'StudioComponentActiveDirectoryConfigurationArgs',
'StudioComponentComputeFarmConfigurationArgs',
'StudioComponentConfigurationArgs',
'StudioComponentInitializationScriptArgs',
'StudioComponentLicenseServiceConfigurationArgs',
'StudioComponentScriptParameterKeyValueArgs',
'StudioComponentSharedFileSystemConfigurationArgs',
'StudioComponentTagsArgs',
'StudioEncryptionConfigurationArgs',
'StudioTagsArgs',
]
@pulumi.input_type
class LaunchProfileStreamConfigurationSessionStorageArgs:
def __init__(__self__, *,
mode: Optional[pulumi.Input[Sequence[pulumi.Input['LaunchProfileStreamingSessionStorageMode']]]] = None,
root: Optional[pulumi.Input['LaunchProfileStreamingSessionStorageRootArgs']] = None):
"""
<p>The configuration for a streaming session’s upload storage.</p>
:param pulumi.Input[Sequence[pulumi.Input['LaunchProfileStreamingSessionStorageMode']]] mode: <p>Allows artists to upload files to their workstations. The only valid option is
<code>UPLOAD</code>.</p>
"""
if mode is not None:
pulumi.set(__self__, "mode", mode)
if root is not None:
pulumi.set(__self__, "root", root)
@property
@pulumi.getter
def mode(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LaunchProfileStreamingSessionStorageMode']]]]:
"""
<p>Allows artists to upload files to their workstations. The only valid option is
<code>UPLOAD</code>.</p>
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LaunchProfileStreamingSessionStorageMode']]]]):
pulumi.set(self, "mode", value)
@property
@pulumi.getter
def root(self) -> Optional[pulumi.Input['LaunchProfileStreamingSessionStorageRootArgs']]:
return pulumi.get(self, "root")
@root.setter
def root(self, value: Optional[pulumi.Input['LaunchProfileStreamingSessionStorageRootArgs']]):
pulumi.set(self, "root", value)
@pulumi.input_type
class LaunchProfileStreamConfigurationArgs:
def __init__(__self__, *,
clipboard_mode: pulumi.Input['LaunchProfileStreamingClipboardMode'],
ec2_instance_types: pulumi.Input[Sequence[pulumi.Input['LaunchProfileStreamingInstanceType']]],
streaming_image_ids: pulumi.Input[Sequence[pulumi.Input[str]]],
max_session_length_in_minutes: Optional[pulumi.Input[float]] = None,
max_stopped_session_length_in_minutes: Optional[pulumi.Input[float]] = None,
session_storage: Optional[pulumi.Input['LaunchProfileStreamConfigurationSessionStorageArgs']] = None):
"""
<p>A configuration for a streaming session.</p>
:param pulumi.Input[Sequence[pulumi.Input['LaunchProfileStreamingInstanceType']]] ec2_instance_types: <p>The EC2 instance types that users can select from when launching a streaming session
with this launch profile.</p>
:param pulumi.Input[Sequence[pulumi.Input[str]]] streaming_image_ids: <p>The streaming images that users can select from when launching a streaming session
with this launch profile.</p>
:param pulumi.Input[float] max_session_length_in_minutes: <p>The length of time, in minutes, that a streaming session can be active before it is
stopped or terminated. After this point, Nimble Studio automatically terminates or
stops the session. The default length of time is 690 minutes, and the maximum length of
time is 30 days.</p>
:param pulumi.Input[float] max_stopped_session_length_in_minutes: <p>Integer that determines if you can start and stop your sessions and how long a session
can stay in the STOPPED state. The default value is 0. The maximum value is 5760.</p>
<p>If the value is missing or set to 0, your sessions can’t be stopped. If you then call
StopStreamingSession, the session fails. If the time that a session stays in the READY
state exceeds the maxSessionLengthInMinutes value, the session will automatically be
terminated by AWS (instead of stopped).</p>
<p>If the value is set to a positive number, the session can be stopped. You can call
StopStreamingSession to stop sessions in the READY state. If the time that a session
stays in the READY state exceeds the maxSessionLengthInMinutes value, the session will
automatically be stopped by AWS (instead of terminated).</p>
"""
pulumi.set(__self__, "clipboard_mode", clipboard_mode)
pulumi.set(__self__, "ec2_instance_types", ec2_instance_types)
pulumi.set(__self__, "streaming_image_ids", streaming_image_ids)
if max_session_length_in_minutes is not None:
pulumi.set(__self__, "max_session_length_in_minutes", max_session_length_in_minutes)
if max_stopped_session_length_in_minutes is not None:
pulumi.set(__self__, "max_stopped_session_length_in_minutes", max_stopped_session_length_in_minutes)
if session_storage is not None:
pulumi.set(__self__, "session_storage", session_storage)
@property
@pulumi.getter(name="clipboardMode")
def clipboard_mode(self) -> pulumi.Input['LaunchProfileStreamingClipboardMode']:
return pulumi.get(self, "clipboard_mode")
@clipboard_mode.setter
def clipboard_mode(self, value: pulumi.Input['LaunchProfileStreamingClipboardMode']):
pulumi.set(self, "clipboard_mode", value)
@property
@pulumi.getter(name="ec2InstanceTypes")
def ec2_instance_types(self) -> pulumi.Input[Sequence[pulumi.Input['LaunchProfileStreamingInstanceType']]]:
"""
<p>The EC2 instance types that users can select from when launching a streaming session
with this launch profile.</p>
"""
return pulumi.get(self, "ec2_instance_types")
@ec2_instance_types.setter
def ec2_instance_types(self, value: pulumi.Input[Sequence[pulumi.Input['LaunchProfileStreamingInstanceType']]]):
pulumi.set(self, "ec2_instance_types", value)
@property
@pulumi.getter(name="streamingImageIds")
def streaming_image_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
<p>The streaming images that users can select from when launching a streaming session
with this launch profile.</p>
"""
return pulumi.get(self, "streaming_image_ids")
@streaming_image_ids.setter
def streaming_image_ids(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "streaming_image_ids", value)
@property
@pulumi.getter(name="maxSessionLengthInMinutes")
def max_session_length_in_minutes(self) -> Optional[pulumi.Input[float]]:
"""
<p>The length of time, in minutes, that a streaming session can be active before it is
stopped or terminated. After this point, Nimble Studio automatically terminates or
stops the session. The default length of time is 690 minutes, and the maximum length of
time is 30 days.</p>
"""
return pulumi.get(self, "max_session_length_in_minutes")
@max_session_length_in_minutes.setter
def max_session_length_in_minutes(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "max_session_length_in_minutes", value)
@property
@pulumi.getter(name="maxStoppedSessionLengthInMinutes")
def max_stopped_session_length_in_minutes(self) -> Optional[pulumi.Input[float]]:
"""
<p>Integer that determines if you can start and stop your sessions and how long a session
can stay in the STOPPED state. The default value is 0. The maximum value is 5760.</p>
<p>If the value is missing or set to 0, your sessions can’t be stopped. If you then call
StopStreamingSession, the session fails. If the time that a session stays in the READY
state exceeds the maxSessionLengthInMinutes value, the session will automatically be
terminated by AWS (instead of stopped).</p>
<p>If the value is set to a positive number, the session can be stopped. You can call
StopStreamingSession to stop sessions in the READY state. If the time that a session
stays in the READY state exceeds the maxSessionLengthInMinutes value, the session will
automatically be stopped by AWS (instead of terminated).</p>
"""
return pulumi.get(self, "max_stopped_session_length_in_minutes")
@max_stopped_session_length_in_minutes.setter
def max_stopped_session_length_in_minutes(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "max_stopped_session_length_in_minutes", value)
@property
@pulumi.getter(name="sessionStorage")
def session_storage(self) -> Optional[pulumi.Input['LaunchProfileStreamConfigurationSessionStorageArgs']]:
return pulumi.get(self, "session_storage")
@session_storage.setter
def session_storage(self, value: Optional[pulumi.Input['LaunchProfileStreamConfigurationSessionStorageArgs']]):
pulumi.set(self, "session_storage", value)
@pulumi.input_type
class LaunchProfileStreamingSessionStorageRootArgs:
def __init__(__self__, *,
linux: Optional[pulumi.Input[str]] = None,
windows: Optional[pulumi.Input[str]] = None):
"""
<p>The upload storage root location (folder) on streaming workstations where files are
uploaded.</p>
:param pulumi.Input[str] linux: <p>The folder path in Linux workstations where files are uploaded.</p>
:param pulumi.Input[str] windows: <p>The folder path in Windows workstations where files are uploaded.</p>
"""
if linux is not None:
pulumi.set(__self__, "linux", linux)
if windows is not None:
pulumi.set(__self__, "windows", windows)
@property
@pulumi.getter
def linux(self) -> Optional[pulumi.Input[str]]:
"""
<p>The folder path in Linux workstations where files are uploaded.</p>
"""
return pulumi.get(self, "linux")
@linux.setter
def linux(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "linux", value)
@property
@pulumi.getter
def windows(self) -> Optional[pulumi.Input[str]]:
"""
<p>The folder path in Windows workstations where files are uploaded.</p>
"""
return pulumi.get(self, "windows")
@windows.setter
def windows(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "windows", value)
@pulumi.input_type
class LaunchProfileTagsArgs:
def __init__(__self__):
pass
@pulumi.input_type
class StreamingImageTagsArgs:
def __init__(__self__):
pass
@pulumi.input_type
class StudioComponentActiveDirectoryComputerAttributeArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
<p>An LDAP attribute of an Active Directory computer account, in the form of a name:value pair.</p>
:param pulumi.Input[str] name: <p>The name for the LDAP attribute.</p>
:param pulumi.Input[str] value: <p>The value for the LDAP attribute.</p>
"""
if name is not None:
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
<p>The name for the LDAP attribute.</p>
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
<p>The value for the LDAP attribute.</p>
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class StudioComponentActiveDirectoryConfigurationArgs:
def __init__(__self__, *,
computer_attributes: Optional[pulumi.Input[Sequence[pulumi.Input['StudioComponentActiveDirectoryComputerAttributeArgs']]]] = None,
directory_id: Optional[pulumi.Input[str]] = None,
organizational_unit_distinguished_name: Optional[pulumi.Input[str]] = None):
"""
<p>The configuration for a Microsoft Active Directory (Microsoft AD) studio resource.</p>
:param pulumi.Input[Sequence[pulumi.Input['StudioComponentActiveDirectoryComputerAttributeArgs']]] computer_attributes: <p>A collection of custom attributes for an Active Directory computer.</p>
:param pulumi.Input[str] directory_id: <p>The directory ID of the Directory Service for Microsoft Active Directory to access using this studio component.</p>
:param pulumi.Input[str] organizational_unit_distinguished_name: <p>The distinguished name (DN) and organizational unit (OU) of an Active Directory computer.</p>
"""
if computer_attributes is not None:
pulumi.set(__self__, "computer_attributes", computer_attributes)
if directory_id is not None:
pulumi.set(__self__, "directory_id", directory_id)
if organizational_unit_distinguished_name is not None:
pulumi.set(__self__, "organizational_unit_distinguished_name", organizational_unit_distinguished_name)
@property
@pulumi.getter(name="computerAttributes")
def computer_attributes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['StudioComponentActiveDirectoryComputerAttributeArgs']]]]:
"""
<p>A collection of custom attributes for an Active Directory computer.</p>
"""
return pulumi.get(self, "computer_attributes")
@computer_attributes.setter
def computer_attributes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['StudioComponentActiveDirectoryComputerAttributeArgs']]]]):
pulumi.set(self, "computer_attributes", value)
@property
@pulumi.getter(name="directoryId")
def directory_id(self) -> Optional[pulumi.Input[str]]:
"""
<p>The directory ID of the Directory Service for Microsoft Active Directory to access using this studio component.</p>
"""
return pulumi.get(self, "directory_id")
@directory_id.setter
def directory_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "directory_id", value)
@property
@pulumi.getter(name="organizationalUnitDistinguishedName")
def organizational_unit_distinguished_name(self) -> Optional[pulumi.Input[str]]:
"""
<p>The distinguished name (DN) and organizational unit (OU) of an Active Directory computer.</p>
"""
return pulumi.get(self, "organizational_unit_distinguished_name")
@organizational_unit_distinguished_name.setter
def organizational_unit_distinguished_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "organizational_unit_distinguished_name", value)
@pulumi.input_type
class StudioComponentComputeFarmConfigurationArgs:
def __init__(__self__, *,
active_directory_user: Optional[pulumi.Input[str]] = None,
endpoint: Optional[pulumi.Input[str]] = None):
"""
<p>The configuration for a render farm that is associated with a studio resource.</p>
:param pulumi.Input[str] active_directory_user: <p>The name of an Active Directory user that is used on ComputeFarm worker instances.</p>
:param pulumi.Input[str] endpoint: <p>The endpoint of the ComputeFarm | |
isinstance(item, basestring):
raise Exception("Key is trying to index a h2o frame with a string? %s" % item)
elif isinstance(item, dict):
raise Exception("Key is trying to index a h2o frame with a dict? %s" % item)
elif isinstance( item, slice):
# debugprint("Key item start", str(item.start))
# debugprint("Key item stop", str(item.stop))
# debugprint("Key item step", str(item.step))
# assume step is always None..
assert item.step==None, "Key assuming step should be None %s" % item.step
return Colon(item.start, item.stop)
else:
raise Exception("Key.add_indexing item(%s) must be int/Seq/Colon/list/tuple/slice" % item)
if isinstance(items, (list, tuple)):
itemsList = list(items)
# if there's a list, it better be just one or two dimensions
# if length 0, ignore
# one is row, two is row/col
if len(itemsList)==0:
debugprint("Key ignoring length 0 items list/tuple) %s" % itemsList)
elif len(itemsList)==1:
# we return another python object, which inherits the h2o key name
return(KeyIndexed(
frame=self.frame,
row=indexer(itemsList[0])
))
elif len(itemsList)==2:
return(KeyIndexed(
frame=self.frame,
row=indexer(itemsList[0]),
col=indexer(itemsList[1])
))
else:
raise Exception("Key itemsList is >2 %s" % itemsList)
else:
return(KeyIndexed(
frame=self.frame,
row=indexer(items),
dim=1, # one dimensional if using the single style?
))
# FIX! should return an instance of the key with the updated row/col values
return self
# __call__ = __str__
#*******************************************************************************
# maybe do some reading here
# http://python-3-patterns-idioms-test.readthedocs.org/en/latest/Factory.html
# row/col can be numbers or strings or not specified
# Key can do indexing/slicing. KeyIndexed is fixed at row/col
class KeyIndexed(Key):
# row 0/col0 should always exist if we init keys to 0?
def __init__(self, frame=None, row=0, col=0, dim=2):
super(KeyIndexed, self).__init__()
# can have row/col?
legalKey(frame, "KeyIndexed")
self.frame = frame
# if it's not a string, turn the assumed number into a number string
self.row = Item(row)
self.col = Item(col)
self.dim = dim # dimensions
# how to decide whether to send 2d or 1d references to h2o
# is there no such thing as a row vector, only a column vector (or data frame)
# row and col can be Seq, Colon, Item (could return a Cbind?)
# or should it pass the python construct to h2o?
# it could put the list into a h2o key, and then do a[b] in hto?
# row extracts problematic?
def __str__(self):
frame = self.frame
row = self.row
col = self.col
# these could be slice objects, strings, ints
# row and col == None is okay too?
if row is not None:
assert isinstance(row, (Seq, Colon, Item)), "KeyIndexed unexpected row type. %s %s" % (type(row), row)
if col is not None:
assert isinstance(col, (Seq, Colon, Item)), "KeyIndexed unexpected col type. %s %s" % (type(col), col)
# 'row in' will use __eq__ method
if row is None:
row = '"null"'
if col is None:
col = '"null"'
# detect the case where row/col say "everything"
# have to use str() because they could be objects and don't want to use __eq__ (which is used for ast resolution?)
if str(row)=='"null"' and str(col)=='"null"':
return "%{}".format(frame)
# does it already start with '%' ?
# we always add % to a here?. Suppose could detect whether it's already there
# does it already start with '%' ?
if not re.match('\%', frame):
frame = "%{}".format(self.frame)
# is a 1 dimensional frame all rows (1 col?)
if self.dim==1:
return '([ %s %s %s)' % (frame, row, '#0')
else:
return '([ %s %s %s)' % (frame, row, col)
__repr__ = __str__
# slicing/indexing magic methods
# http://www.siafoo.net/article/57
#********************************************************************************
# like Assign with constant rhs, but doesn't inherit from Key or KeyIndexed
# no indexing is allowed on key..it's just the whole key that get's initted, not some of it
# KeyInit() should only be used by Key() with a .do() ...so it executes
# GENIUS or INSANITY: it's good to have the init to have zero rows, to see what blows up
# create a zero row result with a row slice that is never true.
class KeyInit(Xbase):
def __init__(self, frame):
super(KeyInit, self).__init__()
# guaranteed to be string
assert isinstance(frame, basestring)
self.frame = frame
def __str__(self):
# This should give zero row key result. Does that result in Scalar?
# return "(= !%s %s)" % (self.frame, '(is.na (c {#0}))' )
return astForInit(self.frame)
# this shouldn't be used with any of the setiem/getitem type stuff..add stuff to make that illegal?
# or any operators?
#********************************************************************************
# Users uses this? it adds an init
class DF(Key):
def __init__(self, key=None, existing=False):
super(DF, self).__init__(key)
if not existing:
# actually make the key in h2o with 0 rows
KeyInit(self.frame).do()
# if you don't init it, it assumes the name can be use for indexed write, or normal write
# normal writes always work, even if it really wasn't existing.
def __str__(self):
frame = self.frame
# no % prefix
return '%s' % frame
#********************************************************************************
def legalFunction(function):
# return required operands
if function in xFcnOp1Set: return 1
if function in xFcnOp2Set: return 2
if function in xFcnOp3Set: return 3
if function in xFcnOpBinSet: return 2
if function in xFcnUser: return 3
else: return 0
# function is a string. operands is a list of items
class Fcn(Xbase):
# Attach an Assign to all root Fcn's
# And put it on the pending Assign list, which is flushed at appropriate times.
# figure out if this is a root function. Only the root function can create an Assign, which accomplishes a .do()
def __init__(self, function='sum', *operands):
super(Fcn, self).__init__()
operandList = unpackOperands(operands, parent="Fcn operands")
# no checking for correct number of params
debugprint("Fcn %s has %s operands" % (function, len(operands)))
# see if we should translate the function name
if function in xFcnXlate:
function = xFcnXlate[function]
required = legalFunction(function)
if required==0:
print "Fcn legalFunction not found...maybe future defined user function?: %s" % function
# FIX! currently not checking any.
# only check 1 and 2. not sure of the 3 group. cbind is conditional..need to do that special
if False and len(operandList)!=required and required<3 and function!='cbind':
raise Exception("Fcn wrong # of operands: %s %s" % (required, len(operandList)))
self.operandList = operandList
self.function = function
# can I do a str() here before everything has been initted?
debugprint("Fcn:", str(self))
def __str__(self):
return "(%s %s)" % (self.function, " ".join(map(str, self.operandList)))
__repr__ = __str__
def __getitem__(self, items):
raise Exception("trying to __getitem__ index a Seq? doesn't make sense? %s %s" % (self, items))
def __setitem__(self, items, rhs):
raise Exception("trying to __setitem__ index a Seq? doesn't make sense? %s %s" % (self, items))
class Return(Xbase):
# return only has one expression?
def __init__(self, expr):
super(Return, self).__init__()
self.expr = Item(expr)
def __str__(self):
return "%s" % self.expr
__repr__ = __str__
def __getitem__(self, items):
raise Exception("trying to __getitem__ index a Return? doesn't make sense? %s %s" % (self, items))
def __setitem__(self, items, rhs):
raise Exception("trying to __setitem__ index a Return? doesn't make sense? %s %s" % (self, items))
from weakref import WeakSet
# always does a .do() on init
class Assign(Key):
# want to use weak references for tracking instances.
# Otherwise the class could likely end up keeping track of instances
# that were meant to have been deleted.
# A weakref.WeakSet will automatically remove any dead instances from its set.
# http://stackoverflow.com/questions/12101958/keep-track-of-instances-in-python
# 1) Each subclass of ... will keep track of its own instances separately.
# 2) The instances set uses weak references to the classs instances,
# so if you del or reassign all the other references to an instance elsewhere in your code,
# the bookkeeping code will not prevent it from being garbage collected.
# can put this in the Xbase base class if I want?
# pass the instances set to list() before printing.
def __new__(cls, *args, **kwargs):
instance = Key.__new__(cls, *args, **kwargs)
if "instances" not in cls.__dict__:
cls.instances = WeakSet()
cls.instances.add(instance)
return instance
# can create a dict from the list with:
# foo_vars = {id(instance): instance.foo for instance in Assign.instances}
@classmethod
def get_instances(cls):
# the list should go empty after del ... of the instance
return list(Assign.instances) #Returns list of all current instances
def __init__(self, lhs=None, rhs=None, do=True, assignDisable=False, | |
import sys
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KDTree
from mpl_toolkits.mplot3d import Axes3D
from sklearn.mixture import GaussianMixture
class PrincipalCurve:
"""Subspace Constrained Mean Shift algorithm based on the paper
"Locally Defined Principal Curves and Surfaces" from Ozertem et al..
"""
def __init__(self, tolerance: float = 0.0001, maxiter: int = 600, d: int = 1, h: int = 3) -> None:
"""Initiate the class.
Parameters
----------
tolerance: float
Used to check convergence of the projection. 0.0001 by default.
maxiter: int
Maximal number of iterations for the projection of a point onto the principal curve. 600 by default.
d: int
Principal curve dimension. 1 by default.
h: int
Gaussian kernel bandwidth used if density=='KDE' (see below). 3 by default.
"""
self.tolerance = tolerance
self.maxiter = maxiter
self.d = d
self.h = h
def fit(self, data: np.ndarray, points: np.ndarray, density: str=None, localcov: str=None, nbneighbors: int=None, nbcomponents: int=None, plot: bool=False) -> np.ndarray:
"""Find the principal curve of a dataset given a mesh of points.
Parameters
----------
data: array_like
Must has size (points, dimension).
points: array_like
In the most common use, data and points are the same array. More generally points can be any grid of points.
Must has size (points, dimension).
density: str
Density estimation method used. Can be 'KDE' or 'GMM'.
If density=='KDE'
localcov: str
Name of the local covariance matrix used for the eigenvalue decomposition.
Can be 'hess', 'inversecov', 'localcov1' or 'localcov2'.
nbneighbors: int
Number of nearest neighbors used to compute 'localcov1' or 'localcov2'.
If density=='GMM'
nbcomponents: int
Number of mixture components. Optional.
plot: bool
If True, the GMM pdf is plotted (only works in 2 dimensions). False by default.
Returns
-------
points: array_like
Points projected onto the principal curve. Has size (points, dimension).
"""
# Transpose and copy the input data and points
data, points = data.T, points.T
data = np.copy(np.atleast_2d(data)) # view inputs as arrays with at least two dimensions
n, N = data.shape
points = np.copy(np.atleast_2d(points))
m, M = points.shape
if m == 1 and M == n: # row vector
points = np.reshape(points, (n, 1)) # transform it in column vector
m, M = points.shape
# Check the parameters
if density != 'KDE' and density != 'GMM':
sys.exit("Density estimation method should be specified. Can be 'KDE' or 'GMM'.")
if density == 'KDE':
if localcov != 'hess' and localcov != 'inversecov' and localcov != 'localcov1' and localcov != 'localcov2':
sys.exit("Local covariance matrix used for the eigenvalue decomposition should be specified. Can be 'hess', 'inversecov', 'localcov1' or 'localcov2'.")
if localcov == 'localcov1' or localcov == 'localcov2':
if nbneighbors is None:
sys.exit("The number of nearest neighbors used to compute 'localcov1' or 'localcov2' should be specified.")
elif density == 'GMM':
if nbcomponents is None:
# Find the best number of mixture components using a BIC criterion
nbcomponents = self._find_nbcomponents(data)
print("The best number of mixture components found is " + str(nbcomponents) + ".")
if density == 'KDE' and localcov == 'localcov1':
tree = KDTree(points.T, leaf_size=2)
alphas, means, mcovs = None, None, None
if density == 'GMM':
# Fit a gaussian mixture model to the data
gmm = GaussianMixture(n_components=nbcomponents)
gmm.fit(data.T)
alphas, means, mcovs = gmm.weights_, gmm.means_.T, gmm.covariances_.T
# Plot the probability density function
if plot:
self._plotpdf(data[0].min(axis=0), data[0].max(axis=0), data[1].min(axis=0), data[1].max(axis=0), 10, alphas, means, mcovs)
# For every point
for k in range(M):
if k == int(N / 4): print("25% complete")
if k == int(N / 2): print("50% complete")
if k == int(3 * N / 4): print("75% complete")
# Find the nearest neighbors of the point in case matrix is 'localcov1' or 'localcov2'
neighbors = None
if density == 'KDE':
if localcov == 'localcov1':
_, neighborsID = tree.query(np.reshape(points[:, k], (1, -1)), k=nbneighbors)
neighbors = np.take(data, neighborsID, axis=1) # neighbors are found from the original data points
elif localcov == 'localcov2':
tree = KDTree(points.T, leaf_size=2)
_, neighborsID = tree.query(np.reshape(points[:, k], (1, -1)), k=nbneighbors)
neighbors = np.take(points, neighborsID, axis=1) # neighbors are found from the output points
# Replace input point by its projection on the principal curve
points[:, k] = self._projectpoint_KDE(data, points[:, k], localcov, neighbors)
else:
points[:, k] = self._projectpoint_GMM(data, points[:, k], alphas, means, mcovs)
return points.T
# =============================================================================
# # Methods for the KDE-based algorithm
# =============================================================================
def _projectpoint_KDE(self, data: np.ndarray, point: np.ndarray, localcov: str=None, neighbors: np.ndarray=None) -> np.ndarray:
"""Project a point onto the principal curve, using a KDE"""
n, N = data.shape
converged = False
citer = 0
while not(converged):
# Calculate the local covariance matrix
if localcov == 'hess': cov = self._hess(data, point)
elif localcov == 'inversecov': cov = self._inversecov_KDE(data, point)
elif localcov == 'localcov1' or localcov == 'localcov2': cov = self._localcov(neighbors)
w, v = np.linalg.eigh(cov) # get the eigenvalues in ascending order and the corresponding normalized eigenvectors
index = np.argsort(w) # arguments that sort from small to large
V = np.reshape(v[:, index[:(n - self.d)]], (n, n - self.d)) # take the (n-d) smallest eigenvectors
ospace = np.dot(V, V.T) # projection matrix
proj = np.reshape(self._ms_KDE(data, point), (n, 1)) - np.reshape(point, (n, 1)) # evaluate the mean shift update
proj = np.dot(ospace, proj) + np.reshape(point, (n, 1))
diff = np.linalg.norm(np.reshape(point, (n, 1)) - proj)
point = np.reshape(proj, (n, ))
citer = citer + 1
if diff < self.tolerance: # stopping condition based on distance
converged = True
if citer > self.maxiter: # stopping condition based on the number of iterations
converged = True
print("maximum iterations exceeded")
return point
def _hess(self, data: np.ndarray, x: np.ndarray) -> float:
"""Calculate the hessian (numpy-broadcasting speedup calculation)"""
data = np.atleast_2d(data)
n, N = data.shape
x = np.atleast_2d(x)
m, M = x.shape
if M != 1:
x = x.T
Sigmainv = np.identity(n) * (1 / (self.h**2))
cs = self._c(data, x)
us = self._u(data, x)
Hx = np.sum(cs * ((us[:, None, :] * us) - Sigmainv[:, :, None]), 2) / N
return Hx
def _inversecov_KDE(self, data: np.ndarray, x: np.ndarray) -> float:
"""Calculate the inverse covariance matrix
cov(x) = H(x)/p(x) - g(x)g(x).T/p(x)^2"""
data = np.atleast_2d(data)
n, N = data.shape
x = np.atleast_2d(x)
m, M = x.shape
if M != 1:
x = x.T
H = self._hess(data, x)
g = self._grad2(data, x)
p = self._p_KDE(data, x)
cov = H / p - g.dot(g.T) / p**2
return cov
def _localcov(self, neighbors: np.ndarray) -> float:
"""Calculate the local covariance matrix
as described in the paper "On Some Convergence Properties of the Subspace
Constrained Mean Shift" from <NAME> al.
if matrix='localcov1', neighbors are found from the original data points
if matrix='localcov2', neighbors are found from the output points"""
mean = np.mean(neighbors, axis=2)
diff = neighbors[:, 0, :] - mean
cov = diff.dot(diff.T) / (neighbors.shape[2]-1)
return cov
def _kern(self, x: np.ndarray) -> np.ndarray:
"""Gaussian Kernel Profile"""
return np.exp(-x / 2.0)
def _p_KDE(self, data: np.ndarray, points: np.ndarray) -> np.ndarray:
"""Evaluate KDE on a set of points based on data"""
data = np.atleast_2d(data)
n, N = data.shape
points = np.atleast_2d(points)
m, M = points.shape
if m == 1 and M == n: # row vector
points = np.reshape(points, (n, 1))
m, M = points.shape
const = (1.0 / N) * ((self.h)**(-n)) * (2.0 * np.pi)**(-n / 2.0)
probs = np.zeros((M,), dtype=np.float)
for i in range(M):
diff = (data - points[:, i, None]) / self.h
x = np.sum(diff * diff, axis=0)
probs[i] = np.sum(self._kern(x), axis=0) * const
return probs
def _u(self, data: np.ndarray, x: np.ndarray) -> np.ndarray:
data = np.atleast_2d(data)
n, N = data.shape
x = np.atleast_2d(x)
m, M = x.shape
if M != 1:
x = np.reshape(x, (n, 1))
us = (data - x) / (self.h**2)
return us
def _c(self, data: np.ndarray, x: np.ndarray) -> np.ndarray:
data = np.atleast_2d(data)
n, N = data.shape
x = np.atleast_2d(x)
m, M = x.shape
if M != 1:
x = x.T
us = self._u(data, | |
<filename>engine/modules.py
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from typing import List, Tuple, Dict
import numpy as np
from torch.autograd import Variable
class DepGCN(nn.Module):
"""
Label-aware Dependency Convolutional Neural Network Layer
"""
def __init__(self, dep_num, dep_dim, in_features, out_features):
super(DepGCN, self).__init__()
self.dep_dim = dep_dim
self.in_features = in_features
self.out_features = out_features
self.dep_embedding = nn.Embedding(dep_num, dep_dim, padding_idx=0)
self.dep_attn = nn.Linear(dep_dim + in_features, out_features)
self.dep_fc = nn.Linear(dep_dim, out_features)
self.relu = nn.ReLU()
def forward(self, text, dep_mat, dep_labels):
dep_label_embed = self.dep_embedding(dep_labels)
batch_size, seq_len, feat_dim = text.shape
val_us = text.unsqueeze(dim=2)
val_us = val_us.repeat(1, 1, seq_len, 1)
val_sum = torch.cat([val_us, dep_label_embed], dim=-1)
r = self.dep_attn(val_sum)
p = torch.sum(r, dim=-1)
mask = (dep_mat == 0).float() * (-1e30)
p = p + mask
p = torch.softmax(p, dim=2)
p_us = p.unsqueeze(3).repeat(1, 1, 1, feat_dim)
output = val_us + self.dep_fc(dep_label_embed)
output = torch.mul(p_us, output)
output_sum = torch.sum(output, dim=2)
output_sum = self.relu(output_sum)
return output_sum
class ConstGCN(nn.Module):
"""
Label-aware Constituency Convolutional Neural Network Layer
"""
def __init__(
self,
num_inputs,
num_units,
num_labels,
dropout=0.0,
in_arcs=True,
out_arcs=True,
batch_first=False,
use_gates=True,
residual=False,
no_loop=False,
non_linearity="relu",
edge_dropout=0.0,
):
super(ConstGCN, self).__init__()
self.in_arcs = in_arcs
self.out_arcs = out_arcs
self.no_loop = no_loop
self.retain = 1.0 - edge_dropout
self.num_inputs = num_inputs
self.num_units = num_units
self.num_labels = num_labels
self.batch_first = batch_first
self.non_linearity = non_linearity
self.sigmoid = nn.Sigmoid()
self.use_gates = use_gates
self.residual = residual
self.dropout = nn.Dropout(p=dropout)
self.layernorm = nn.LayerNorm(num_units)
if in_arcs:
self.V_in = Parameter(torch.Tensor(self.num_inputs, self.num_units))
nn.init.xavier_normal_(self.V_in)
self.b_in = Parameter(torch.Tensor(num_labels, self.num_units))
nn.init.constant_(self.b_in, 0)
if self.use_gates:
self.V_in_gate = Parameter(torch.Tensor(self.num_inputs, 1))
nn.init.xavier_normal_(self.V_in_gate)
self.b_in_gate = Parameter(torch.Tensor(num_labels, 1))
nn.init.constant_(self.b_in_gate, 1)
if out_arcs:
# self.V_out = autograd.Variable(torch.FloatTensor(self.num_inputs, self.num_units))
self.V_out = Parameter(torch.Tensor(self.num_inputs, self.num_units))
nn.init.xavier_normal_(self.V_out)
# self.b_out = autograd.Variable(torch.FloatTensor(num_labels, self.num_units))
self.b_out = Parameter(torch.Tensor(num_labels, self.num_units))
nn.init.constant_(self.b_out, 0)
if self.use_gates:
self.V_out_gate = Parameter(torch.Tensor(self.num_inputs, 1))
nn.init.xavier_normal_(self.V_out_gate)
self.b_out_gate = Parameter(torch.Tensor(num_labels, 1))
nn.init.constant_(self.b_out_gate, 1)
if not self.no_loop:
self.W_self_loop = Parameter(torch.Tensor(self.num_inputs, self.num_units))
nn.init.xavier_normal_(self.W_self_loop)
if self.use_gates:
self.W_self_loop_gate = Parameter(torch.Tensor(self.num_inputs, 1))
nn.init.xavier_normal_(self.W_self_loop_gate)
def forward(
self,
src,
arc_tensor_in=None,
arc_tensor_out=None,
label_tensor_in=None,
label_tensor_out=None,
mask_in=None,
mask_out=None,
mask_loop=None,
sent_mask=None,
):
if not self.batch_first:
encoder_outputs = src.permute(1, 0, 2).contiguous()
else:
encoder_outputs = src.contiguous()
batch_size = encoder_outputs.size()[0]
seq_len = encoder_outputs.size()[1]
max_degree = 1
input_ = encoder_outputs.view(
(batch_size * seq_len, self.num_inputs)
) # [b* t, h]
input_ = self.dropout(input_)
if self.in_arcs:
input_in = torch.mm(input_, self.V_in) # [b* t, h] * [h,h] = [b*t, h]
first_in = input_in.index_select(
0, arc_tensor_in[0] * seq_len + arc_tensor_in[1]
) # [b* t* degr, h]
second_in = self.b_in.index_select(0, label_tensor_in[0]) # [b* t* degr, h]
in_ = first_in + second_in
degr = int(first_in.size()[0] / batch_size // seq_len)
in_ = in_.view((batch_size, seq_len, degr, self.num_units))
if self.use_gates:
# compute gate weights
input_in_gate = torch.mm(
input_, self.V_in_gate
) # [b* t, h] * [h,h] = [b*t, h]
first_in_gate = input_in_gate.index_select(
0, arc_tensor_in[0] * seq_len + arc_tensor_in[1]
) # [b* t* mxdeg, h]
second_in_gate = self.b_in_gate.index_select(0, label_tensor_in[0])
in_gate = (first_in_gate + second_in_gate).view(
(batch_size, seq_len, degr)
)
max_degree += degr
if self.out_arcs:
input_out = torch.mm(input_, self.V_out) # [b* t, h] * [h,h] = [b* t, h]
first_out = input_out.index_select(
0, arc_tensor_out[0] * seq_len + arc_tensor_out[1]
) # [b* t* mxdeg, h]
second_out = self.b_out.index_select(0, label_tensor_out[0])
degr = int(first_out.size()[0] / batch_size // seq_len)
max_degree += degr
out_ = (first_out + second_out).view(
(batch_size, seq_len, degr, self.num_units)
)
if self.use_gates:
# compute gate weights
input_out_gate = torch.mm(
input_, self.V_out_gate
) # [b* t, h] * [h,h] = [b* t, h]
first_out_gate = input_out_gate.index_select(
0, arc_tensor_out[0] * seq_len + arc_tensor_out[1]
) # [b* t* mxdeg, h]
second_out_gate = self.b_out_gate.index_select(0, label_tensor_out[0])
out_gate = (first_out_gate + second_out_gate).view(
(batch_size, seq_len, degr)
)
if self.no_loop:
if self.in_arcs and self.out_arcs:
potentials = torch.cat((in_, out_), dim=2) # [b, t, mxdeg, h]
if self.use_gates:
potentials_gate = torch.cat(
(in_gate, out_gate), dim=2
) # [b, t, mxdeg, h]
mask_soft = torch.cat((mask_in, mask_out), dim=1) # [b* t, mxdeg]
elif self.out_arcs:
potentials = out_ # [b, t, 2*mxdeg+1, h]
if self.use_gates:
potentials_gate = out_gate # [b, t, mxdeg, h]
mask_soft = mask_out # [b* t, mxdeg]
elif self.in_arcs:
potentials = in_ # [b, t, 2*mxdeg+1, h]
if self.use_gates:
potentials_gate = in_gate # [b, t, mxdeg, h]
mask_soft = mask_in # [b* t, mxdeg]
max_degree -= 1
else:
same_input = torch.mm(input_, self.W_self_loop).view(
encoder_outputs.size(0), encoder_outputs.size(1), -1
)
same_input = same_input.view(
encoder_outputs.size(0),
encoder_outputs.size(1),
1,
self.W_self_loop.size(1),
)
if self.use_gates:
same_input_gate = torch.mm(input_, self.W_self_loop_gate).view(
encoder_outputs.size(0), encoder_outputs.size(1), -1
)
if self.in_arcs and self.out_arcs:
potentials = torch.cat(
(in_, out_, same_input), dim=2
) # [b, t, mxdeg, h]
if self.use_gates:
potentials_gate = torch.cat(
(in_gate, out_gate, same_input_gate), dim=2
) # [b, t, mxdeg, h]
mask_soft = torch.cat(
(mask_in, mask_out, mask_loop), dim=1
) # [b* t, mxdeg]
elif self.out_arcs:
potentials = torch.cat(
(out_, same_input), dim=2
) # [b, t, 2*mxdeg+1, h]
if self.use_gates:
potentials_gate = torch.cat(
(out_gate, same_input_gate), dim=2
) # [b, t, mxdeg, h]
mask_soft = torch.cat((mask_out, mask_loop), dim=1) # [b* t, mxdeg]
elif self.in_arcs:
potentials = torch.cat(
(in_, same_input), dim=2
) # [b, t, 2*mxdeg+1, h]
if self.use_gates:
potentials_gate = torch.cat(
(in_gate, same_input_gate), dim=2
) # [b, t, mxdeg, h]
mask_soft = torch.cat((mask_in, mask_loop), dim=1) # [b* t, mxdeg]
else:
potentials = same_input # [b, t, 2*mxdeg+1, h]
if self.use_gates:
potentials_gate = same_input_gate # [b, t, mxdeg, h]
mask_soft = mask_loop # [b* t, mxdeg]
potentials_resh = potentials.view(
(batch_size * seq_len, max_degree, self.num_units)
) # [h, b * t, mxdeg]
if self.use_gates:
potentials_r = potentials_gate.view(
(batch_size * seq_len, max_degree)
) # [b * t, mxdeg]
probs_det_ = (self.sigmoid(potentials_r) * mask_soft).unsqueeze(
2
) # [b * t, mxdeg]
potentials_masked = potentials_resh * probs_det_ # [b * t, mxdeg,h]
else:
# NO Gates
potentials_masked = potentials_resh * mask_soft.unsqueeze(2)
if self.retain == 1 or not self.training:
pass
else:
mat_1 = torch.Tensor(mask_soft.data.size()).uniform_(0, 1)
ret = torch.Tensor([self.retain])
mat_2 = (mat_1 < ret).float()
drop_mask = Variable(mat_2, requires_grad=False)
if potentials_resh.is_cuda:
drop_mask = drop_mask.cuda()
potentials_masked *= drop_mask.unsqueeze(2)
potentials_masked_ = potentials_masked.sum(dim=1) # [b * t, h]
potentials_masked_ = self.layernorm(potentials_masked_) * sent_mask.view(
batch_size * seq_len
).unsqueeze(1)
potentials_masked_ = self.non_linearity(potentials_masked_) # [b * t, h]
result_ = potentials_masked_.view(
(batch_size, seq_len, self.num_units)
) # [ b, t, h]
result_ = result_ * sent_mask.unsqueeze(2) # [b, t, h]
memory_bank = result_ # [t, b, h]
if self.residual:
memory_bank += src
return memory_bank
class BilinearScorer(nn.Module):
def __init__(self, hidden_dim, role_vocab_size, dropout=0.0, gpu_id=-1):
super(BilinearScorer, self).__init__()
if gpu_id > -1:
self.use_gpu = True
else:
self.use_gpu = False
self.hidden_dim = hidden_dim
self.role_vocab_size = role_vocab_size
self.dropout = nn.Dropout(p=dropout)
self.U = Parameter(
torch.Tensor(self.hidden_dim, self.role_vocab_size, self.hidden_dim)
)
nn.init.orthogonal_(self.U)
self.bias1 = Parameter(torch.Tensor(1, self.hidden_dim * self.role_vocab_size))
nn.init.constant_(self.bias1, 0)
self.bias2 = Parameter(torch.Tensor(1, self.role_vocab_size))
nn.init.constant_(self.bias2, 0)
def forward(self, pred_input, args_input):
b, t, h = pred_input.data.shape
pred_input = self.dropout(pred_input)
args_input = self.dropout(args_input)
first = (
torch.mm(pred_input.view(-1, h), self.U.view(h, -1)) + self.bias1
) # [b*t, h] * [h,r*h] = [b*t,r*h]
out = torch.bmm(
first.view(-1, self.role_vocab_size, h), args_input.view(-1, h).unsqueeze(2)
) # [b*t,r,h] [b*t, h, 1] = [b*t, r]
out = out.squeeze(2) + self.bias2
return out
class ScaledDotProductAttention(nn.Module):
def __init__(self, d_k):
super(ScaledDotProductAttention, self).__init__()
self.d_k = d_k
def forward(self, q, k, v, attn_mask):
attn_score = torch.matmul(q, k.transpose(-1, -2)) / np.sqrt(self.d_k)
attn_score.masked_fill_(attn_mask, -1e9)
attn_weights = nn.Softmax(dim=-1)(attn_score)
output = torch.matmul(attn_weights, v)
return output, attn_weights
class MultiHeadAttention(nn.Module):
def __init__(self, d_model, n_heads):
super(MultiHeadAttention, self).__init__()
self.n_heads = n_heads
self.d_k = self.d_v = d_model // n_heads
self.WQ = nn.Linear(d_model, d_model)
self.WK = nn.Linear(d_model, d_model)
self.WV = nn.Linear(d_model, d_model)
self.scaled_dot_product_attn = ScaledDotProductAttention(self.d_k)
self.linear = nn.Linear(n_heads * self.d_v, d_model)
def forward(self, Q, K, V, attn_mask):
batch_size = Q.size(0)
q_heads = self.WQ(Q).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
k_heads = self.WK(K).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
v_heads = self.WV(V).view(batch_size, -1, self.n_heads, self.d_v).transpose(1, 2)
attn_mask = attn_mask.unsqueeze(1).repeat(1, self.n_heads, 1, 1)
attn, attn_weights = self.scaled_dot_product_attn(q_heads, k_heads, v_heads, attn_mask)
attn = attn.transpose(1, 2).contiguous().view(batch_size, -1, self.n_heads * self.d_v)
output = self.linear(attn)
return output, attn_weights
class PositionWiseFeedForwardNetwork(nn.Module):
def __init__(self, d_model, d_ff):
super(PositionWiseFeedForwardNetwork, self).__init__()
self.linear1 = nn.Linear(d_model, d_ff)
self.linear2 = nn.Linear(d_ff, d_model)
self.relu = nn.ReLU()
def forward(self, inputs):
output = self.relu(self.linear1(inputs))
output = self.linear2(output)
return output
class EncoderLayer(nn.Module):
def __init__(self, d_model, n_heads, p_drop, d_ff):
super(EncoderLayer, self).__init__()
self.mha = MultiHeadAttention(d_model, n_heads)
self.dropout1 = nn.Dropout(p_drop)
self.layernorm1 = nn.LayerNorm(d_model, eps=1e-6)
self.ffn = PositionWiseFeedForwardNetwork(d_model, d_ff)
self.dropout2 = nn.Dropout(p_drop)
self.layernorm2 = nn.LayerNorm(d_model, eps=1e-6)
def forward(self, inputs, attn_mask):
attn_outputs, attn_weights = self.mha(inputs, inputs, inputs, attn_mask)
attn_outputs = self.dropout1(attn_outputs)
attn_outputs | |
'b16b6f8f',
'us liffré': '0bed5d02',
'us liffre': '0bed5d02',
'us lillebonne': 'e6f31b38',
'us lormont': '76174677',
'us lusitanos de saint-maur': '95d038b3',
'us lusitanos de saint maur': '95d038b3',
'us macouria': '6b66b09e',
'us marignane': 'c29fb6f0',
'us marseille endoume': '26d18deb',
'us matoury': '204cd7e7',
'us maubeuge': 'c67e834f',
'us montagnarde': 'e2f6ddd0',
'us mozac': 'e474ed57',
'us noeux-les-mines': '32ae5aa8',
'us noeux les mines': '32ae5aa8',
'us nogent': '21793e71',
'us oberlauterbach-eberbach': '897890de',
'us oberlauterbach eberbach': '897890de',
'us oberschaeffolsheim': 'd60ff0db',
'us orléans': '48dd5b1b',
'orleans': '48dd5b1b',
'us pont sainte-maxence': '25aa43e7',
'us pont sainte maxence': '25aa43e7',
'us quevilly-rouen': '34e7850d',
'quevilly rouen': '34e7850d',
"us raon-l'étape": 'db9f975e',
'us raon letape': 'db9f975e',
'us reipertswiller': '60c910f8',
'us roye-noyon': 'c9678f6c',
'us roye noyon': 'c9678f6c',
'us rungis': '5839618e',
'us saint galmier chamboeuf': '27840bc2',
'us saint-berthevin football': '66579888',
'us saint berthevin football': '66579888',
'us saint-malo': '040e70e5',
'us saint malo': '040e70e5',
'us saint-maximin': '0d49cda3',
'us saint maximin': '0d49cda3',
'us saint-omer': '36acd614',
'us saint omer': '36acd614',
'us saint-sernin-du-bois': '4c629304',
'us saint sernin du bois': '4c629304',
'us sainte-marienne': 'e927ded0',
'us sainte marienne': 'e927ded0',
'us salinière aigues-mortes': 'ca76324a',
'us saliniere aigues mortes': 'ca76324a',
'us sanfloraine saint-flour': '2ec629d8',
'us sanfloraine saint flour': '2ec629d8',
'us sarre-union': '3ac615e7',
'us sarre union': '3ac615e7',
'us sénart-moissy': '5c720efb',
'us senart moissy': '5c720efb',
'us st. sauveur': 'f01b6651',
'us st sauveur': 'f01b6651',
'us tourcoing': '0b7f9005',
'us vandoeuvre': 'e155ce69',
'us vic le comte': 'c9a20d40',
'us vimy': '8a3d84bf',
'usja carquefou': '725c1caa',
'usl dunkerque': '1740a29b',
'usm saran': '80bfb060',
'usm senlis': 'bdfa5bda',
'uson mondeville': 'dfbc46d5',
'ussa vertou': '91e68ea5',
'valenciennes': '259d3345',
'vannes oc': '52807106',
'vannes': '52807106',
'vendée fontenay foot': '5f56a9a6',
'vendee fontenay foot': '5f56a9a6',
'vendée luçon football': '7150bd35',
'vendee lucon football': '7150bd35',
'vendée poiré-sur-vie football': '2b4a0e5e',
'vendee poire sur vie football': '2b4a0e5e',
'vierzon foot 18': 'fc2d8fd5',
'villemomble sports': '3dc1ffe5',
"villeneuve-d'ascq métropole": 'c78ae282',
'villeneuve dascq metropole': 'c78ae282',
'voltigeurs de châteaubriant': '8acf845e',
'voltigeurs de chateaubriant': '8acf845e',
'ytrac foot': '12bdcd41',
'yvetot ac': 'e3e8ec55',
'as etoile de matoury': '16d603bf',
'asc agouado': '9e661299',
'ef iracoubo': '661e458a',
'chikhura sachkhere': 'e839c85b',
'dila gori': 'ae098b55',
'dinamo batumi': '840ff2b7',
'dinamo tbilisi': 'e2fa043d',
'gagra': '659990fa',
'martve': '646b9825',
'rustavi': '5fccdf60',
'saburtalo tbilisi': '672ebb3d',
'samtredia': 'b8b2d55e',
'sioni bolnisi': '88ee782c',
'spartaki tskhinvali': 'e6561daf',
'<NAME>': '231f4a61',
'wit georgia': '36257e6e',
'zestafoni': 'c1322cab',
'wnike': '57010727',
'1. cfr pforzheim': 'cf1eed21',
'1 cfr pforzheim': 'cf1eed21',
'1. germania egestorf/langreder': '75515a4a',
'1 germania egestorflangreder': '75515a4a',
'1. heidenheim': '18d9d2a7',
'heidenheim': '18d9d2a7',
'1. heidenheim 1846 u19': 'c0cb86fe',
'1 heidenheim 1846 u19': 'c0cb86fe',
'1. heidenheim u17': '6569155a',
'1 heidenheim u17': '6569155a',
'1. kaiserslautern': '73a27a73',
'kaiserslautern': '73a27a73',
'1. kaiserslautern u17': 'f5369b66',
'1 kaiserslautern u17': 'f5369b66',
'1. kaiserslautern u19': 'b58231eb',
'1 kaiserslautern u19': 'b58231eb',
'1. köln': 'bc357bf7',
'koln': '88ddc98e',
'1. köln frauen': '88ddc98e',
'1. köln u17': '5b499073',
'1 koln u17': '5b499073',
'1. köln u19': '7978e9a3',
'1 koln u19': '7978e9a3',
'1. lok stendal': '743a93f0',
'1 lok stendal': '743a93f0',
'1. magdeburg': 'e18a73da',
'magdeburg': 'e18a73da',
'1. magdeburg u17': '0d59f5f5',
'1 magdeburg u17': '0d59f5f5',
'1. magdeburg u19': 'ebf5a2a9',
'1 magdeburg u19': 'ebf5a2a9',
'1. neubrandenburg 04': 'dfc25351',
'1 neubrandenburg 04': 'dfc25351',
'1. nürnberg': 'b6d83168',
'nurnberg': '6f2c108c',
'1 nurnberg': 'b6d83168',
'1. nürnberg u17': '8724a375',
'1 nurnberg u17': '8724a375',
'1. nürnberg u19': '6ed99f94',
'1 nurnberg u19': '6ed99f94',
'1. rielasingen-arlen': '01f618d1',
'1 rielasingen arlen': '01f618d1',
'1. saarbrücken': 'eb4b278c',
'1 saarbrucken': '7af2ec06',
'saarbrucken': 'eb4b278c',
'1. saarbrücken u19': '13ecb521',
'1 saarbrucken u19': '13ecb521',
'1. schweinfurt 05': 'c4375806',
'1 schweinfurt 05': 'c4375806',
'1. union berlin': '7a41008f',
'1 union berlin': '31429172',
'union berlin': '7a41008f',
'1. union berlin u17': '50e620c4',
'1 union berlin u17': '50e620c4',
'1. union berlin u19': '5f618561',
'1 union berlin u19': '5f618561',
'1. f08 niederkirchen': 'd60ee088',
'1 f08 niederkirchen': 'd60ee088',
'1. ffrankfurt': '77d2e598',
'ffrankfurt': '77d2e598',
'1. fturbine potsdam': 'de550500',
'turbine potsdam': 'de550500',
'1. ffv erfurt': '66060a29',
'1 ffv erfurt': '66060a29',
'1. fsv mainz 05': 'a224b06a',
'mainz 05': 'a224b06a',
'1. fsv mainz 05 ii': 'fd7718b7',
'mainz 05 ii': 'fd7718b7',
'1. fsv mainz 05 u17': '35596f77',
'1 fsv mainz 05 u17': '35596f77',
'1. fsv mainz 05 u19': 'ce4c058d',
'1 fsv mainz 05 u19': 'ce4c058d',
'1.mönchengladbach': '5fa36836',
'1monchengladbach': '5fa36836',
'1.mönchengladbach u17': '89b05c5a',
'1monchengladbach u17': '89b05c5a',
'1.mönchengladbach u19': 'd127e5eb',
'1monchengladbach u19': 'd127e5eb',
'<NAME>': '3659060d',
'alemannia aachen u17': 'fc4124c9',
'alemannia aachen u19': '0a2bc34c',
'<NAME>': '247c4b67',
'bahlinger sc': 'a4134f68',
'bayer 04 leverkusen': 'c7a9f859',
'bayer leverkusen': 'c7a9f859',
'bayer 04 leverkusen u17': '45a67fe8',
'bayer 04 leverkusen u19': 'ecf53eea',
'berliner dynamo': '1bfc385f',
'bpreussen': 'f6b80d0f',
'bonner sc': 'd6d0a4e1',
'<NAME>': 'add600ae',
'borussia dortmund ii': '40347053',
'dortmund ii': '40347053',
'borussia mönchengladbach': 'cbefe26c',
'borussia monchengladbach': 'cbefe26c',
'borussia mönchengladbach u17': '50eaaf29',
'borussia monchengladbach u17': '50eaaf29',
'borussia mönchengladbach u19': 'df6f068b',
'borussia monchengladbach u19': 'df6f068b',
'bremer sv': 'd2727b1d',
'bsc hastedt': 'eb36bfaf',
'bsg chemie leipzig': 'ab817d18',
'bsv schwarz-weiß rehden': 'b6502c90',
'bsv schwarz weiss rehden': 'b6502c90',
'bv borussia 09 dortmund u17': '6c254a38',
'bv borussia 09 dortmund u19': 'd943111d',
'bv borussia bocholt': '7ee8b384',
'bv cloppenburg': 'a770626b',
'chemnitzer': '707b3614',
'chemnitzer u17': 'e76a63dd',
'chemnitzer u19': 'fc4bf6b5',
'djk arminia klosterhardt u19': '64ee4f5e',
'dsc arminia bielefeld': '62838af6',
'dsc arminia bielefeld u17': 'b76f6237',
'dsc arminia bielefeld u19': '6d14c1f7',
'dynamo dresden': 'ac36c181',
'eimsbütteler tv u17': '4b8fcb92',
'eimsbutteler tv u17': '4b8fcb92',
'eintracht braunschweig': 'eec82247',
'eintracht braunschweig u17': 'b2a8da6b',
'eintracht braunschweig u19': 'f1161405',
'eintracht frankfurt': 'f0ac8ee6',
'eintracht frankfurt u17': '34640cc6',
'eintracht frankfurt u19': '7cc68edf',
'08 homburg': '89a86c55',
'08 villingen': 'f554f7de',
'<NAME>': '7d5a4a78',
'augsburg': '0cdc4311',
'augsburg u17': 'bf9fcad9',
'augsburg u19': '41d91186',
'bayern münchen u17': '8b10a09d',
'bayern munchen u17': '8b10a09d',
'bayern münchen u19': 'a4a1c462',
'bayern munchen u19': 'a4a1c462',
'bayern munich': '51ec22be',
'bayern munich ii': '9133b975',
'bayern munich women': '51ec22be',
'carl zeiss jena': 'b49d1b16',
'carl zeiss jena u17': 'a55b20b2',
'carl zeiss jena u19': '3c042330',
'eintracht norderstedt 03': 'b404f71e',
'energie cottbus': '7675ab36',
'energie cottbus u17': 'aa20a5e5',
'energie cottbus u19': '263c3bad',
'erzgebirge aue': '6724656e',
'forstern': 'd9647eb7',
'hansa rostock': 'bc31a6e4',
'hansa rostock u17': '77bbd37d',
'ingolstadt 04': '5c9a9164',
'ingolstadt u17': '391bc1a3',
'ingolstadt u19': '32f8bc7d',
'nöttingen': 'f520dfc3',
'nottingen': 'f520dfc3',
'oberneuland': '8e55bf2c',
'rot-weiß erfurt': '93c4ce7b',
'rot weiss erfurt': '93c4ce7b',
'rot-weiß erfurt u19': '3b10a9c7',
'rot weiss erfurt u19': '3b10a9c7',
'schalke 04': 'c539e393',
'schalke 04 u17': '04bf3c00',
'schalke 04 u19': '01ef4cf1',
'st. pauli': '54864664',
'st pauli': '54864664',
'st. pauli 1910 u17': 'd9dd39c9',
'st pauli 1910 u17': 'd9dd39c9',
'st. pauli 1910 u19': '22c72f0b',
'st pauli 1910 u19': '22c72f0b',
'viktoria 1889 berlin': '9b243fd2',
'viktoria 1889 berlin u19': '15c4c0d2',
'viktoria köln': '827a03db',
'viktoria koln': '827a03db',
'viktoria köln 1904 u17': 'b864db53',
'viktoria koln 1904 u17': 'b864db53',
'viktoria köln 1904 u19': '299dbac7',
'viktoria koln 1904 u19': '299dbac7',
'ff usv jena': '765472c9',
'usv jena': '765472c9',
'fk pirmasens': '87ebdc12',
'fortuna düsseldorf': 'b1278397',
'fortuna dusseldorf': 'b1278397',
'fsv babelsberg 74': '4632a4d2',
'fsv frankfurt': '63330a03',
'fsv frankfurt u19': 'ded6a438',
'fsv gütersloh 2009': 'aafa94d7',
'fsv gutersloh 2009': 'aafa94d7',
'fsv hessen wetzlar': '37c5bcba',
'fsv optik rathenow': 'b61af4c7',
'fsv salmrohr': '0005114f',
'fsv wacker 90 nordhausen': '4c65082c',
'fsv zwickau': 'e0279c6f',
'zwickau': 'e0279c6f',
'ft braunschweig': 'e3ac2a08',
'fv illertissen': 'd2bbc6d8',
'fv ravensburg': 'cea3fb28',
'hallescher': 'd69d0df7',
'hallescher u17': '3693fee3',
'hamburger sv': '623bb13e',
'hamburger sv u17': 'd84fbee9',
'hamburger sv u19': 'e5460de3',
'hannover 96': '60b5e41f',
'hannover 96 u17': '651b3d20',
'hannover 96 u19': '0b55fe1b',
'hegauer fv': '95f1aac7',
'hertha bsc': '2818f8bc',
'hertha bsc u17': '81d83299',
'hertha bsc u19': 'b7f03da0',
'holstein kiel': 'dcef9a01',
'holstein kiel u17': '70f446ce',
'holstein kiel u19': '90537983',
'hombrucher sv 09/72 u17': 'df07b3e5',
'hombrucher sv 0972 u17': 'df07b3e5',
'hsg warnemünde': '9fef2a00',
'hsg warnemunde': '9fef2a00',
'hsv barmbek-uhlenhorst': '76073277',
'hsv barmbek uhlenhorst': '76073277',
'karlsruher sc': '33ba9d7b',
'karlsruher': '33ba9d7b',
'karlsruher sc u17': '2640d362',
'karlsruher sc u19': 'c880e925',
'kuerdingen 05': '08610664',
'kickers offenbach': 'cda2e192',
'ksv baunatal': '627f4ffc',
'ksv hessen kassel': '557fd964',
'leher ts': 'e28a40ad',
'lüneburger sk hansa': '9a9e0c9d',
'luneburger sk hansa': '9a9e0c9d',
'magdeburger ffc': 'd3bb6f17',
'msv duisburg': 'bbfd364f',
'msv duisburg u17': '05ebd479',
'msv duisburg u19': '6eda181d',
'niendorfer tsv u17': 'd1f12614',
'niendorfer tsv u19': '4bc14514',
'offenbacher kickers u17': '63b23f2d',
'offenbacher kickers u19': 'a63dd24b',
'rb leipzig': 'acbb6a5b',
'rb leipzig u17': 'a4988f5e',
'rb leipzig u19': 'e850256c',
'rot weiss ahlen': '9cfbc3a9',
'rot-weiß oberhausen': '9340e7a9',
'rot weiss oberhausen': '9340e7a9',
'rot-weiss essen': '28147f65',
'rot weiss essen': '28147f65',
'rot-weiss essen u17': '598306fb',
'rot weiss essen u17': '598306fb',
'rot-weiss essen u19': '12062e3e',
'rot weiss essen u19': '12062e3e',
'sc borgfeld u17': '02af48fc',
'sc fortuna köln': 'b39df29f',
'fortuna koln': '6e479b8a',
'sc fortuna koln': 'b39df29f',
'sc fortuna köln u19': '03e29091',
'sc fortuna koln u19': '03e29091',
'sc freiburg': 'b4de690d',
'freiburg': 'b4de690d',
'sc freiburg u17': '32dd63ea',
'sc freiburg | |
<filename>kivy/core/text/__init__.py<gh_stars>1-10
'''
Text
====
An abstraction of text creation. Depending of the selected backend, the accuracy
of text rendering may vary.
.. versionchanged:: 1.5.0
:data:`LabelBase.line_height` added.
.. versionchanged:: 1.0.7
The :class:`LabelBase` does not generate any texture if the text has a
width <= 1.
'''
__all__ = ('LabelBase', 'Label')
import re
import os
from kivy import kivy_data_dir
from kivy.graphics.texture import Texture
from kivy.core import core_select_lib
from kivy.resources import resource_find
from kivy.compat import PY2
DEFAULT_FONT = 'DroidSans'
FONT_REGULAR = 0
FONT_ITALIC = 1
FONT_BOLD = 2
FONT_BOLDITALIC = 3
class LabelBase(object):
'''Core text label.
This is the abstract class used by different backends to render text.
.. warning::
The core text label can't be changed at runtime. You must recreate one.
:Parameters:
`font_size`: int, defaults to 12
Font size of the text
`font_name`: str, defaults to DEFAULT_FONT
Font name of the text
`bold`: bool, defaults to False
Activate "bold" text style
`italic`: bool, defaults to False
Activate "italic" text style
`text_size`: tuple, defaults to (None, None)
Add constraint to render the text (inside a bounding box).
If no size is given, the label size will be set to the text size.
`padding`: float, defaults to None
If it's a float, it will set padding_x and padding_y
`padding_x`: float, defaults to 0.0
Left/right padding
`padding_y`: float, defaults to 0.0
Top/bottom padding
`halign`: str, defaults to "left"
Horizontal text alignment inside the bounding box
`valign`: str, defaults to "bottom"
Vertical text alignment inside the bounding box
`shorten`: bool, defaults to False
Indicate whether the label should attempt to shorten its textual
contents as much as possible if a `size` is given.
Setting this to True without an appropriately set size will lead to
unexpected results.
`max_lines`: int, defaults to 0 (unlimited)
If set, this indicate how maximum line are allowed to render the
text. Works only if a limitation on text_size is set.
`mipmap` : bool, defaults to False
Create a mipmap for the texture
.. versionchanged:: 1.8.0
`max_lines` parameters has been added.
.. versionchanged:: 1.0.8
`size` have been deprecated and replaced with `text_size`.
.. versionchanged:: 1.0.7
The `valign` is now respected. This wasn't the case previously
so you might have an issue in your application if you have not
considered this.
'''
__slots__ = ('options', 'texture', '_label', '_text_size')
_cache_glyphs = {}
_fonts = {}
_fonts_cache = {}
_texture_1px = None
def __init__(self, text='', font_size=12, font_name=DEFAULT_FONT,
bold=False, italic=False, halign='left', valign='bottom',
shorten=False, text_size=None, mipmap=False, color=None,
line_height=1.0, **kwargs):
options = {'text': text, 'font_size': font_size,
'font_name': font_name, 'bold': bold, 'italic': italic,
'halign': halign, 'valign': valign, 'shorten': shorten,
'mipmap': mipmap, 'line_height': line_height}
options['color'] = color or (1, 1, 1, 1)
options['padding'] = kwargs.get('padding', 0)
options['padding_x'] = kwargs.get('padding_x', options['padding'])
options['padding_y'] = kwargs.get('padding_y', options['padding'])
if 'size' in kwargs:
options['text_size'] = kwargs['size']
else:
if text_size is None:
options['text_size'] = (None, None)
else:
options['text_size'] = text_size
text_width, text_height = options['text_size']
if text_width is not None:
self._text_size = (
text_width - options['padding_x'] * 2,
text_height)
else:
self._text_size = options['text_size']
self._text = options['text']
self._internal_height = 0
self.options = options
self.texture = None
self.resolve_font_name()
@staticmethod
def register(name, fn_regular, fn_italic=None, fn_bold=None,
fn_bolditalic=None):
'''Register an alias for a Font.
.. versionadded:: 1.1.0
If you're using a ttf directly, you might not be able to use the
bold/italic properties of
the ttf version. If the font is delivered in multiple files
(one regular, one italic and one bold), then you need to register these
files and use the alias instead.
All the fn_regular/fn_italic/fn_bold parameters are resolved with
:func:`kivy.resources.resource_find`. If fn_italic/fn_bold are None,
fn_regular will be used instead.
'''
fonts = []
for font_type in fn_regular, fn_italic, fn_bold, fn_bolditalic:
if font_type is not None:
font = resource_find(font_type)
if font is None:
raise IOError('File {0}s not found'.format(font_type))
else:
fonts.append(font)
else:
fonts.append(fonts[-1]) # add regular font to list again
LabelBase._fonts[name] = tuple(fonts)
def resolve_font_name(self):
options = self.options
fontname = options['font_name']
fonts = self._fonts
fontscache = self._fonts_cache
# is the font is registered ?
if fontname in fonts:
# return the prefered font for the current bold/italic combinaison
italic = int(options['italic'])
if options['bold']:
bold = FONT_BOLD
else:
bold = FONT_REGULAR
options['font_name_r'] = fonts[fontname][italic | bold]
elif fontname in fontscache:
options['font_name_r'] = fontscache[fontname]
else:
filename = resource_find(fontname)
if filename is None:
# XXX for compatibility, check directly in the data dir
filename = os.path.join(kivy_data_dir, fontname)
if not os.path.exists(filename):
raise IOError('Label: File %r not found' % fontname)
fontscache[fontname] = filename
options['font_name_r'] = filename
def get_extents(self, text):
'''Return a tuple (width, height) indicating the size of the specified
text'''
return (0, 0)
def _render_begin(self):
pass
def _render_text(self, text, x, y):
pass
def _render_end(self):
pass
def shorten(self, text, margin=2):
# Just a tiny shortcut
textwidth = self.get_extents
if self.text_size[0] is None:
width = 0
else:
width = int(self.text_size[0])
letters = '_..._' + text
while textwidth(letters)[0] > width:
letters = letters[:letters.rfind(' ')]
max_letters = len(letters) - 2
segment = (max_letters // 2)
if segment - margin > 5:
segment -= margin
return u'{0}...{1}'.format(text[:segment].strip(),
text[-segment:].strip())
else:
segment = max_letters - 3 # length of '...'
return u'{0}...'.format(text[:segment].strip())
def render(self, real=False):
'''Return a tuple (width, height) to create the image
with the user constraints.
2 differents methods are used:
* if the user does not set the width, split the line
and calculate max width + height
* if the user sets a width, blit per glyph
'''
options = self.options
render_text = self._render_text
get_extents = self.get_extents
uw, uh = self.text_size
max_lines = int(options.get('max_lines', 0))
w, h = 0, 0
x, y = 0, 0
if real:
self._render_begin()
halign = options['halign']
valign = options['valign']
if valign == 'bottom':
y = self.height - self._internal_height
elif valign == 'middle':
y = int((self.height - self._internal_height) / 2)
else:
self._internal_height = 0
# no width specified, faster method
if uw is None:
index = 0
for line in self.text.split('\n'):
index += 1
if max_lines > 0 and index > max_lines:
break
lw, lh = get_extents(line)
lh = lh * options['line_height']
if real:
x = 0
if halign[0] == 'c':
# center
x = int((self.width - lw) / 2.)
elif halign[0] == 'r':
# right
x = int(self.width - lw)
if len(line):
render_text(line, x, y)
y += int(lh)
else:
w = max(w, int(lw))
self._internal_height += int(lh)
h = self._internal_height if uh is None else uh
# constraint
else:
# precalculate id/name
if not self.fontid in self._cache_glyphs:
self._cache_glyphs[self.fontid] = {}
cache = self._cache_glyphs[self.fontid]
if not real:
# verify that each glyph have size
glyphs = list(set(self.text)) + ['.']
for glyph in glyphs:
if not glyph in cache:
cache[glyph] = get_extents(glyph)
# Shorten the text that we actually display
text = self.text
last_word_width = get_extents(text[text.rstrip().rfind(' '):])[0]
if (options['shorten'] and get_extents(text)[0] >
uw - last_word_width):
text = self.shorten(text)
# first, split lines
glyphs = []
lines = []
lw = lh = 0
for word in re.split(r'( |\n)', text):
# calculate the word width
ww, wh = 0, 0
if word == '':
ww, wh = get_extents(' ')
for glyph in word:
gw, gh = cache[glyph]
ww += gw
wh = max(gh, wh)
wh = wh * options['line_height']
# is the word fit on the uw ?
if ww > uw:
lines.append(((ww, wh), 0, word))
lw = lh = x = 0
if max_lines > 0 and len(lines) >= max_lines:
break
continue
# get the maximum height for this line
lh = max(wh, lh)
# is the word fit on the line ?
if (word == '\n' or x + ww > uw) and lw != 0:
# no, push actuals glyph
# lw, lh), is_last_line, glyphs)
last_line = 1 if word == '\n' else 0
lines.append(((lw, lh), last_line, glyphs))
glyphs = []
# reset size
lw = lh = x = 0
# new line ? don't render
if word == '\n' or word == ' ':
continue
# advance the width
lw += ww
x += ww
lh = max(wh, lh)
glyphs += list(word)
# got some char left ?
if lw != 0:
lines.append(((lw, lh), | |
save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minInclusive with
value -440277848538184635.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minInclusive-2.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minInclusive-2-1.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinInclusive2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_inclusive_1_nistxml_sv_iv_atomic_negative_integer_min_inclusive_2_2(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minInclusive with
value -440277848538184635.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minInclusive-2.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minInclusive-2-2.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinInclusive2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_inclusive_1_nistxml_sv_iv_atomic_negative_integer_min_inclusive_2_3(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minInclusive with
value -440277848538184635.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minInclusive-2.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minInclusive-2-3.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinInclusive2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_inclusive_1_nistxml_sv_iv_atomic_negative_integer_min_inclusive_2_4(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minInclusive with
value -440277848538184635.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minInclusive-2.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minInclusive-2-4.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinInclusive2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_inclusive_1_nistxml_sv_iv_atomic_negative_integer_min_inclusive_2_5(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minInclusive with
value -440277848538184635.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minInclusive-2.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minInclusive-2-5.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinInclusive2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_inclusive_nistxml_sv_iv_atomic_negative_integer_min_inclusive_1_1(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minInclusive with
value -999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minInclusive-1.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minInclusive-1-1.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinInclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_inclusive_nistxml_sv_iv_atomic_negative_integer_min_inclusive_1_2(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minInclusive with
value -999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minInclusive-1.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minInclusive-1-2.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinInclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_inclusive_nistxml_sv_iv_atomic_negative_integer_min_inclusive_1_3(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minInclusive with
value -999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minInclusive-1.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minInclusive-1-3.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinInclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_inclusive_nistxml_sv_iv_atomic_negative_integer_min_inclusive_1_4(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minInclusive with
value -999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minInclusive-1.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minInclusive-1-4.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinInclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_inclusive_nistxml_sv_iv_atomic_negative_integer_min_inclusive_1_5(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minInclusive with
value -999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minInclusive-1.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minInclusive-1-5.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinInclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_exclusive_4_nistxml_sv_iv_atomic_negative_integer_min_exclusive_5_1(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minExclusive with
value -2.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minExclusive-5.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minExclusive-5-1.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinExclusive5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_exclusive_3_nistxml_sv_iv_atomic_negative_integer_min_exclusive_4_1(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minExclusive with
value -495295756372066909.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minExclusive-4.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minExclusive-4-1.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinExclusive4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_exclusive_3_nistxml_sv_iv_atomic_negative_integer_min_exclusive_4_2(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minExclusive with
value -495295756372066909.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minExclusive-4.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minExclusive-4-2.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinExclusive4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_exclusive_3_nistxml_sv_iv_atomic_negative_integer_min_exclusive_4_3(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minExclusive with
value -495295756372066909.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minExclusive-4.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minExclusive-4-3.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinExclusive4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_exclusive_3_nistxml_sv_iv_atomic_negative_integer_min_exclusive_4_4(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minExclusive with
value -495295756372066909.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minExclusive-4.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minExclusive-4-4.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinExclusive4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_exclusive_3_nistxml_sv_iv_atomic_negative_integer_min_exclusive_4_5(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minExclusive with
value -495295756372066909.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minExclusive-4.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minExclusive-4-5.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinExclusive4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_exclusive_2_nistxml_sv_iv_atomic_negative_integer_min_exclusive_3_1(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minExclusive with
value -900435039333670416.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minExclusive-3.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minExclusive-3-1.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinExclusive3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_exclusive_2_nistxml_sv_iv_atomic_negative_integer_min_exclusive_3_2(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minExclusive with
value -900435039333670416.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minExclusive-3.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minExclusive-3-2.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinExclusive3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_exclusive_2_nistxml_sv_iv_atomic_negative_integer_min_exclusive_3_3(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minExclusive with
value -900435039333670416.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minExclusive-3.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minExclusive-3-3.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinExclusive3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_exclusive_2_nistxml_sv_iv_atomic_negative_integer_min_exclusive_3_4(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minExclusive with
value -900435039333670416.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minExclusive-3.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minExclusive-3-4.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinExclusive3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_exclusive_2_nistxml_sv_iv_atomic_negative_integer_min_exclusive_3_5(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minExclusive with
value -900435039333670416.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minExclusive-3.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minExclusive-3-5.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinExclusive3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_exclusive_1_nistxml_sv_iv_atomic_negative_integer_min_exclusive_2_1(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minExclusive with
value -435976618086570511.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minExclusive-2.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minExclusive-2-1.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinExclusive2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_exclusive_1_nistxml_sv_iv_atomic_negative_integer_min_exclusive_2_2(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minExclusive with
value -435976618086570511.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minExclusive-2.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minExclusive-2-2.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinExclusive2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_exclusive_1_nistxml_sv_iv_atomic_negative_integer_min_exclusive_2_3(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minExclusive with
value -435976618086570511.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minExclusive-2.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minExclusive-2-3.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinExclusive2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_exclusive_1_nistxml_sv_iv_atomic_negative_integer_min_exclusive_2_4(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minExclusive with
value -435976618086570511.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minExclusive-2.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minExclusive-2-4.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinExclusive2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_exclusive_1_nistxml_sv_iv_atomic_negative_integer_min_exclusive_2_5(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minExclusive with
value -435976618086570511.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minExclusive-2.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minExclusive-2-5.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinExclusive2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_exclusive_nistxml_sv_iv_atomic_negative_integer_min_exclusive_1_1(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minExclusive with
value -999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minExclusive-1.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minExclusive-1-1.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinExclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_exclusive_nistxml_sv_iv_atomic_negative_integer_min_exclusive_1_2(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minExclusive with
value -999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minExclusive-1.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minExclusive-1-2.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinExclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_exclusive_nistxml_sv_iv_atomic_negative_integer_min_exclusive_1_3(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minExclusive with
value -999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minExclusive-1.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minExclusive-1-3.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinExclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_exclusive_nistxml_sv_iv_atomic_negative_integer_min_exclusive_1_4(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minExclusive with
value -999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minExclusive-1.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minExclusive-1-4.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinExclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_negative_integer_min_exclusive_nistxml_sv_iv_atomic_negative_integer_min_exclusive_1_5(mode, save_output, output_format):
"""
Type atomic/negativeInteger is restricted by facet minExclusive with
value -999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/negativeInteger/Schema+Instance/NISTSchema-SV-IV-atomic-negativeInteger-minExclusive-1.xsd",
instance="nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minExclusive-1-5.xml",
class_name="NistschemaSvIvAtomicNegativeIntegerMinExclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_white_space_nistxml_sv_iv_atomic_non_positive_integer_white_space_1_1(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet whiteSpace with
value collapse.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-whiteSpace-1.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-whiteSpace-1-1.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_white_space_nistxml_sv_iv_atomic_non_positive_integer_white_space_1_2(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet whiteSpace with
value collapse.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-whiteSpace-1.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-whiteSpace-1-2.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_white_space_nistxml_sv_iv_atomic_non_positive_integer_white_space_1_3(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet whiteSpace with
value collapse.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-whiteSpace-1.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-whiteSpace-1-3.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_white_space_nistxml_sv_iv_atomic_non_positive_integer_white_space_1_4(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet whiteSpace with
value collapse.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-whiteSpace-1.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-whiteSpace-1-4.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_white_space_nistxml_sv_iv_atomic_non_positive_integer_white_space_1_5(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet whiteSpace with
value collapse.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-whiteSpace-1.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-whiteSpace-1-5.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_enumeration_4_nistxml_sv_iv_atomic_non_positive_integer_enumeration_5_1(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-enumeration-5.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-enumeration-5-1.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerEnumeration5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_enumeration_4_nistxml_sv_iv_atomic_non_positive_integer_enumeration_5_2(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-enumeration-5.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-enumeration-5-2.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerEnumeration5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_enumeration_4_nistxml_sv_iv_atomic_non_positive_integer_enumeration_5_3(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-enumeration-5.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-enumeration-5-3.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerEnumeration5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_enumeration_4_nistxml_sv_iv_atomic_non_positive_integer_enumeration_5_4(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-enumeration-5.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-enumeration-5-4.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerEnumeration5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_enumeration_4_nistxml_sv_iv_atomic_non_positive_integer_enumeration_5_5(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-enumeration-5.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-enumeration-5-5.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerEnumeration5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_enumeration_3_nistxml_sv_iv_atomic_non_positive_integer_enumeration_4_1(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-enumeration-4.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-enumeration-4-1.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerEnumeration4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_enumeration_3_nistxml_sv_iv_atomic_non_positive_integer_enumeration_4_2(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-enumeration-4.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-enumeration-4-2.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerEnumeration4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_enumeration_3_nistxml_sv_iv_atomic_non_positive_integer_enumeration_4_3(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-enumeration-4.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-enumeration-4-3.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerEnumeration4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_enumeration_3_nistxml_sv_iv_atomic_non_positive_integer_enumeration_4_4(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-enumeration-4.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-enumeration-4-4.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerEnumeration4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_enumeration_3_nistxml_sv_iv_atomic_non_positive_integer_enumeration_4_5(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-enumeration-4.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-enumeration-4-5.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerEnumeration4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_enumeration_2_nistxml_sv_iv_atomic_non_positive_integer_enumeration_3_1(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-enumeration-3.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-enumeration-3-1.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerEnumeration3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_enumeration_2_nistxml_sv_iv_atomic_non_positive_integer_enumeration_3_2(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-enumeration-3.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-enumeration-3-2.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerEnumeration3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_enumeration_2_nistxml_sv_iv_atomic_non_positive_integer_enumeration_3_3(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-enumeration-3.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-enumeration-3-3.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerEnumeration3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_enumeration_2_nistxml_sv_iv_atomic_non_positive_integer_enumeration_3_4(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-enumeration-3.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-enumeration-3-4.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerEnumeration3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_enumeration_2_nistxml_sv_iv_atomic_non_positive_integer_enumeration_3_5(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-enumeration-3.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-enumeration-3-5.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerEnumeration3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_enumeration_1_nistxml_sv_iv_atomic_non_positive_integer_enumeration_2_1(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-enumeration-2.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-enumeration-2-1.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerEnumeration2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_enumeration_1_nistxml_sv_iv_atomic_non_positive_integer_enumeration_2_2(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-nonPositiveInteger-enumeration-2.xsd",
instance="nistData/atomic/nonPositiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonPositiveInteger-enumeration-2-2.xml",
class_name="NistschemaSvIvAtomicNonPositiveIntegerEnumeration2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_non_positive_integer_enumeration_1_nistxml_sv_iv_atomic_non_positive_integer_enumeration_2_3(mode, save_output, output_format):
"""
Type atomic/nonPositiveInteger is | |
<filename>openprocurement/audit/monitoring/tests/test_document.py
# -*- coding: utf-8 -*-
import unittest
from hashlib import sha512
from unittest import mock
from openprocurement.audit.monitoring.tests.base import BaseWebTest, DSWebTestMixin
from openprocurement.audit.monitoring.tests.test_elimination import MonitoringEliminationBaseTest
from openprocurement.audit.monitoring.tests.utils import get_errors_field_names
class MonitoringDecisionDocumentResourceTest(BaseWebTest, DSWebTestMixin):
def setUp(self):
super(MonitoringDecisionDocumentResourceTest, self).setUp()
self.app.app.registry.docservice_url = 'http://localhost'
self.create_monitoring()
self.test_docservice_document_data = {
'title': 'lorem.doc',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/msword',
'documentType': 'notice',
'id': '0' * 32
}
self.test_monitoring_activation_data = {
"status": "active",
"decision": {
"date": "2015-05-10T23:11:39.720908+03:00",
"description": "text",
"documents": [self.test_docservice_document_data]
}
}
def test_document_get_single(self):
self.app.authorization = ('Basic', (self.sas_name, self.sas_pass))
response = self.app.patch_json(
'/monitorings/{}'.format(self.monitoring_id),
{'data': self.test_monitoring_activation_data})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type, 'application/json')
document_id = response.json['data']['decision']['documents'][-1]['id']
response = self.app.get('/monitorings/{}/decision/documents/{}'.format(self.monitoring_id, document_id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type, 'application/json')
document_data = response.json['data']
self.assertEqual(document_data['title'], 'lorem.doc')
self.assertIn('Signature=', document_data["url"])
self.assertIn('KeyID=', document_data["url"])
self.assertNotIn('Expires=', document_data["url"])
self.assertEqual(document_data["documentType"], "notice")
def test_document_get_list(self):
self.app.authorization = ('Basic', (self.sas_name, self.sas_pass))
response = self.app.patch_json(
'/monitorings/{}'.format(self.monitoring_id),
{'data': self.test_monitoring_activation_data})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type, 'application/json')
response = self.app.get('/monitorings/{}/decision/documents'.format(self.monitoring_id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']), 1)
document_data = response.json['data'][-1]
self.assertEqual(document_data['title'], 'lorem.doc')
def test_document_download(self):
self.app.authorization = ('Basic', (self.sas_name, self.sas_pass))
response = self.app.patch_json(
'/monitorings/{}'.format(self.monitoring_id),
{'data': self.test_monitoring_activation_data})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type, 'application/json')
document_data = response.json['data']['decision']['documents'][-1]
key = document_data["url"].split('/')[-1].split('?')[0]
document_id = document_data['id']
response = self.app.get('/monitorings/{}/decision/documents/{}?download=some_id'.format(
self.monitoring_id, document_id), status=404)
self.assertEqual(response.status_code, 404)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{'description': 'Not Found', 'location': 'url', 'name': 'download'}
])
response = self.app.get('/monitorings/{}/decision/documents/{}?download={}'.format(
self.monitoring_id, document_id, key))
self.assertEqual(response.status_code, 302)
self.assertIn('http://localhost/get/', response.location)
self.assertIn('Signature=', response.location)
self.assertIn('KeyID=', response.location)
self.assertNotIn('Expires=', response.location)
def test_document_upload(self):
self.app.authorization = ('Basic', (self.sas_name, self.sas_pass))
response = self.app.patch_json(
'/monitorings/{}'.format(self.monitoring_id),
{'data': {'decision': self.test_monitoring_activation_data['decision']}})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type, 'application/json')
response = self.app.post_json('/monitorings/{}/decision/documents'.format(
self.monitoring_id),
{'data': self.test_docservice_document_data})
self.assertEqual(response.status_code, 201)
self.assertEqual(response.content_type, 'application/json')
def test_document_upload_forbidden(self):
self.app.authorization = ('Basic', (self.sas_name, self.sas_pass))
response = self.app.patch_json(
'/monitorings/{}'.format(self.monitoring_id),
{'data': self.test_monitoring_activation_data})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type, 'application/json')
response = self.app.post_json('/monitorings/{}/decision/documents'.format(
self.monitoring_id),
{'data': self.test_docservice_document_data}, status=403)
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
('body', 'data'),
next(get_errors_field_names(response, 'Can\'t add document in current active monitoring status.')))
class MonitoringPostActiveDocumentResourceTest(BaseWebTest, DSWebTestMixin):
def setUp(self):
super(MonitoringPostActiveDocumentResourceTest, self).setUp()
self.app.app.registry.docservice_url = 'http://localhost'
self.create_monitoring()
self.test_docservice_document_data = {
'title': 'lorem.doc',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/msword',
}
self.app.authorization = ('Basic', (self.sas_name, self.sas_pass))
self.app.patch_json(
'/monitorings/{}'.format(self.monitoring_id),
{'data': {
"status": "active",
"decision": {
"date": "2015-05-10T23:11:39.720908+03:00",
"description": "text",
"documents": [self.test_docservice_document_data]
}
}}
)
self.post_data = {
'title': 'Lorem ipsum',
'description': 'Lorem ipsum dolor sit amet',
'documents': [self.test_docservice_document_data]
}
def test_document_get_single(self):
self.app.authorization = ('Basic', (self.sas_name, self.sas_pass))
response = self.app.post_json(
'/monitorings/{}/posts'.format(self.monitoring_id),
{'data': self.post_data})
self.assertEqual(response.status_code, 201)
self.assertEqual(response.content_type, 'application/json')
post_id = response.json['data']['id']
document_id = response.json['data']['documents'][-1]['id']
response = self.app.get('/monitorings/{}/posts/{}/documents/{}'.format(self.monitoring_id, post_id, document_id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type, 'application/json')
document_data = response.json['data']
self.assertEqual(document_data['title'], 'lorem.doc')
self.assertIn('Signature=', document_data["url"])
self.assertIn('KeyID=', document_data["url"])
self.assertNotIn('Expires=', document_data["url"])
def test_document_get_list(self):
self.app.authorization = ('Basic', (self.sas_name, self.sas_pass))
response = self.app.post_json(
'/monitorings/{}/posts'.format(self.monitoring_id),
{'data': self.post_data})
self.assertEqual(response.status_code, 201)
self.assertEqual(response.content_type, 'application/json')
post_id = response.json['data']['id']
response = self.app.get('/monitorings/{}/posts/{}/documents'.format(self.monitoring_id, post_id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']), 1)
document_data = response.json['data'][-1]
self.assertEqual(document_data['title'], 'lorem.doc')
def test_document_download(self):
self.app.authorization = ('Basic', (self.sas_name, self.sas_pass))
response = self.app.post_json(
'/monitorings/{}/posts'.format(self.monitoring_id),
{'data': self.post_data})
self.assertEqual(response.status_code, 201)
self.assertEqual(response.content_type, 'application/json')
post_id = response.json['data']['id']
document_data = response.json['data']['documents'][-1]
key = document_data["url"].split('/')[-1].split('?')[0]
document_id = document_data['id']
response = self.app.get('/monitorings/{}/posts/{}/documents/{}?download=some_id'.format(
self.monitoring_id, post_id, document_id), status=404)
self.assertEqual(response.status_code, 404)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{'description': 'Not Found', 'location': 'url', 'name': 'download'}
])
response = self.app.get('/monitorings/{}/posts/{}/documents/{}?download={}'.format(
self.monitoring_id, post_id, document_id, key))
self.assertEqual(response.status_code, 302)
self.assertIn('http://localhost/get/', response.location)
self.assertIn('Signature=', response.location)
self.assertIn('KeyID=', response.location)
self.assertNotIn('Expires=', response.location)
def test_document_upload(self):
self.app.authorization = ('Basic', (self.sas_name, self.sas_pass))
response = self.app.post_json(
'/monitorings/{}/posts'.format(self.monitoring_id),
{'data': self.post_data})
self.assertEqual(response.status_code, 201)
self.assertEqual(response.content_type, 'application/json')
post_id = response.json['data']['id']
response = self.app.post_json('/monitorings/{}/posts/{}/documents'.format(
self.monitoring_id, post_id),
{'data': self.test_docservice_document_data})
self.assertEqual(response.status_code, 201)
self.assertEqual(response.content_type, 'application/json')
def test_document_upload_forbidden(self):
self.app.authorization = ('Basic', (self.sas_name, self.sas_pass))
response = self.app.post_json(
'/monitorings/{}/posts'.format(self.monitoring_id),
{'data': self.post_data})
self.assertEqual(response.status_code, 201)
self.assertEqual(response.content_type, 'application/json')
post_id = response.json['data']['id']
self.app.authorization = ('Basic', (self.sas_name, self.sas_pass))
self.app.patch_json(
'/monitorings/{}'.format(self.monitoring_id),
{'data': {
"status": "addressed",
"conclusion": {
"violationOccurred": True,
"violationType": ["corruptionProcurementMethodType"],
}
}}
)
self.app.authorization = ('Basic', (self.broker_name, self.broker_pass))
response = self.app.post_json('/monitorings/{}/posts/{}/documents'.format(
self.monitoring_id, post_id),
{'data': self.test_docservice_document_data}, status=403)
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content_type, 'application/json')
class MonitoringPostAddressedDocumentResourceTest(BaseWebTest, DSWebTestMixin):
def setUp(self):
super(MonitoringPostAddressedDocumentResourceTest, self).setUp()
self.app.app.registry.docservice_url = 'http://localhost'
self.create_monitoring()
self.test_docservice_document_data = {
'title': 'lorem.doc',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/msword',
}
self.app.authorization = ('Basic', (self.sas_name, self.sas_pass))
self.app.patch_json(
'/monitorings/{}'.format(self.monitoring_id),
{'data': {
"status": "active",
"decision": {
"date": "2015-05-10T23:11:39.720908+03:00",
"description": "text",
"documents": [self.test_docservice_document_data]
}
}}
)
self.app.patch_json(
'/monitorings/{}'.format(self.monitoring_id),
{'data': {
"conclusion": {
"description": "Some text",
"violationOccurred": True,
"violationType": ["corruptionProcurementMethodType", "corruptionAwarded"],
"documents": [self.test_docservice_document_data]
},
"status": "addressed",
}}
)
self.post_data = {
'title': 'Lorem ipsum',
'description': 'Lorem ipsum dolor sit amet',
'documents': [self.test_docservice_document_data]
}
@mock.patch('openprocurement.audit.monitoring.validation.TendersClient')
def test_document_get_single(self, mock_api_client):
mock_api_client.return_value.extract_credentials.return_value = {
'data': {'tender_token': sha512(b'tender_token').hexdigest()}
}
self.app.authorization = ('Basic', (self.broker_name, self.broker_pass))
response = self.app.patch_json(
'/monitorings/{}/credentials?acc_token={}'.format(self.monitoring_id, 'tender_token')
)
tender_owner_token = response.json['access']['token']
response = self.app.post_json(
'/monitorings/{}/posts?acc_token={}'.format(self.monitoring_id, tender_owner_token),
{'data': self.post_data})
self.assertEqual(response.status_code, 201)
self.assertEqual(response.content_type, 'application/json')
post_id = response.json['data']['id']
document_id = response.json['data']['documents'][-1]['id']
response = self.app.get('/monitorings/{}/posts/{}/documents/{}'.format(self.monitoring_id, post_id, document_id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type, 'application/json')
document_data = response.json['data']
self.assertEqual(document_data['title'], 'lorem.doc')
self.assertIn('Signature=', document_data["url"])
self.assertIn('KeyID=', document_data["url"])
self.assertNotIn('Expires=', document_data["url"])
@mock.patch('openprocurement.audit.monitoring.validation.TendersClient')
def test_document_get_list(self, mock_api_client):
mock_api_client.return_value.extract_credentials.return_value = {
'data': {'tender_token': sha512(b'tender_token').hexdigest()}
}
self.app.authorization = ('Basic', (self.broker_name, self.broker_pass))
response = self.app.patch_json(
'/monitorings/{}/credentials?acc_token={}'.format(self.monitoring_id, 'tender_token')
)
tender_owner_token = response.json['access']['token']
response = self.app.post_json(
'/monitorings/{}/posts?acc_token={}'.format(self.monitoring_id, tender_owner_token),
{'data': self.post_data})
self.assertEqual(response.status_code, 201)
self.assertEqual(response.content_type, 'application/json')
post_id = response.json['data']['id']
response = self.app.get('/monitorings/{}/posts/{}/documents'.format(self.monitoring_id, post_id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']), 1)
document_data = response.json['data'][-1]
self.assertEqual(document_data['title'], 'lorem.doc')
@mock.patch('openprocurement.audit.monitoring.validation.TendersClient')
def test_document_download(self, mock_api_client):
mock_api_client.return_value.extract_credentials.return_value = {
'data': {'tender_token': sha512(b'tender_token').hexdigest()}
}
self.app.authorization = ('Basic', (self.broker_name, self.broker_pass))
response = self.app.patch_json(
'/monitorings/{}/credentials?acc_token={}'.format(self.monitoring_id, 'tender_token')
)
tender_owner_token = response.json['access']['token']
response = self.app.post_json(
'/monitorings/{}/posts?acc_token={}'.format(self.monitoring_id, tender_owner_token),
{'data': self.post_data})
self.assertEqual(response.status_code, 201)
self.assertEqual(response.content_type, 'application/json')
post_id = response.json['data']['id']
document_data = response.json['data']['documents'][-1]
key = document_data["url"].split('/')[-1].split('?')[0]
document_id = document_data['id']
response = self.app.get('/monitorings/{}/posts/{}/documents/{}?download=some_id'.format(
self.monitoring_id, post_id, document_id), status=404)
self.assertEqual(response.status_code, 404)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{'description': 'Not Found', 'location': 'url', 'name': 'download'}
])
response = self.app.get('/monitorings/{}/posts/{}/documents/{}?download={}'.format(
self.monitoring_id, post_id, document_id, key))
self.assertEqual(response.status_code, 302)
self.assertIn('http://localhost/get/', response.location)
self.assertIn('Signature=', response.location)
self.assertIn('KeyID=', response.location)
self.assertNotIn('Expires=', response.location)
@mock.patch('openprocurement.audit.monitoring.validation.TendersClient')
def test_document_upload_no_token(self, mock_api_client):
mock_api_client.return_value.extract_credentials.return_value = {
'data': {'tender_token': sha512(b'tender_token').hexdigest()}
}
self.app.authorization = ('Basic', (self.broker_name, self.broker_pass))
response = self.app.patch_json(
'/monitorings/{}/credentials?acc_token={}'.format(self.monitoring_id, 'tender_token')
)
tender_owner_token = response.json['access']['token']
response = self.app.post_json(
'/monitorings/{}/posts?acc_token={}'.format(self.monitoring_id, tender_owner_token),
{'data': self.post_data})
self.assertEqual(response.status_code, 201)
self.assertEqual(response.content_type, 'application/json')
post_id = response.json['data']['id']
response = self.app.post_json('/monitorings/{}/posts/{}/documents'.format(
self.monitoring_id, post_id),
{'data': self.test_docservice_document_data}, status=403)
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content_type, 'application/json')
@mock.patch('openprocurement.audit.monitoring.validation.TendersClient')
def test_document_upload(self, mock_api_client):
mock_api_client.return_value.extract_credentials.return_value = {
'data': {'tender_token': sha512(b'tender_token').hexdigest()}
}
self.app.authorization = ('Basic', (self.broker_name, self.broker_pass))
response = self.app.patch_json(
'/monitorings/{}/credentials?acc_token={}'.format(self.monitoring_id, 'tender_token')
)
tender_owner_token = response.json['access']['token']
response = self.app.post_json(
'/monitorings/{}/posts?acc_token={}'.format(self.monitoring_id, tender_owner_token),
{'data': self.post_data})
self.assertEqual(response.status_code, 201)
self.assertEqual(response.content_type, 'application/json')
post_id = response.json['data']['id']
response = self.app.post_json('/monitorings/{}/posts/{}/documents?acc_token={}'.format(
self.monitoring_id, post_id, tender_owner_token),
{'data': self.test_docservice_document_data})
self.assertEqual(response.status_code, 201)
self.assertEqual(response.content_type, 'application/json')
@mock.patch('openprocurement.audit.monitoring.validation.TendersClient')
def test_document_upload_author_forbidden(self, mock_api_client):
mock_api_client.return_value.extract_credentials.return_value = {
'data': {'tender_token': sha512(b'tender_token').hexdigest()}
}
self.app.authorization = ('Basic', (self.broker_name, self.broker_pass))
response = self.app.patch_json(
'/monitorings/{}/credentials?acc_token={}'.format(self.monitoring_id, 'tender_token')
)
tender_owner_token = response.json['access']['token']
response = self.app.post_json(
'/monitorings/{}/posts?acc_token={}'.format(self.monitoring_id, tender_owner_token),
{'data': self.post_data})
self.assertEqual(response.status_code, 201)
self.assertEqual(response.content_type, 'application/json')
post_id = response.json['data']['id']
self.app.authorization = ('Basic', (self.sas_name, self.sas_pass))
self.app.post_json('/monitorings/{}/posts/{}/documents'.format(
self.monitoring_id, post_id),
{'data': self.test_docservice_document_data}, status=403)
class MonitoringDocumentResourceTest(BaseWebTest, DSWebTestMixin):
def setUp(self):
super(MonitoringDocumentResourceTest, self).setUp()
self.app.app.registry.docservice_url = 'http://localhost'
self.create_monitoring()
self.test_docservice_document_data = {
'title': 'lorem.doc',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/msword',
}
self.test_monitoring_activation_data = {
"documents": [self.test_docservice_document_data],
}
self.app.authorization = ('Basic', (self.sas_name, self.sas_pass))
self.app.patch_json(
'/monitorings/{}'.format(self.monitoring_id),
{'data': {
'status': "cancelled",
'cancellation': {
"description": "text"
}
}})
def test_document_get_single(self):
response = self.app.patch_json(
'/monitorings/{}'.format(self.monitoring_id),
{'data': self.test_monitoring_activation_data})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type, 'application/json')
document_id = response.json['data']['documents'][-1]['id']
response = self.app.get('/monitorings/{}/documents/{}'.format(self.monitoring_id, document_id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type, 'application/json')
document_data = response.json['data']
self.assertEqual(document_data['title'], 'lorem.doc')
self.assertIn('Signature=', document_data["url"])
self.assertIn('KeyID=', document_data["url"])
self.assertNotIn('Expires=', document_data["url"])
def test_document_get_list(self):
response = self.app.patch_json(
'/monitorings/{}'.format(self.monitoring_id),
{'data': self.test_monitoring_activation_data})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type, 'application/json')
response = self.app.get('/monitorings/{}/documents'.format(self.monitoring_id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']), 1)
document_data = response.json['data'][-1]
self.assertEqual(document_data['title'], 'lorem.doc')
def test_document_download(self):
response = self.app.patch_json(
'/monitorings/{}'.format(self.monitoring_id),
{'data': self.test_monitoring_activation_data})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type, 'application/json')
document_data = response.json['data']['documents'][-1]
key = document_data["url"].split('/')[-1].split('?')[0]
document_id = document_data['id']
response = self.app.get('/monitorings/{}/documents/{}?download=some_id'.format(
self.monitoring_id, document_id), status=404)
self.assertEqual(response.status_code, 404)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{'description': 'Not Found', 'location': 'url', 'name': 'download'}
])
response = self.app.get('/monitorings/{}/documents/{}?download={}'.format(
self.monitoring_id, document_id, key))
self.assertEqual(response.status_code, 302)
self.assertIn('http://localhost/get/', response.location)
self.assertIn('Signature=', response.location)
self.assertIn('KeyID=', response.location)
self.assertNotIn('Expires=', response.location)
def test_document_upload(self):
response = self.app.patch_json(
'/monitorings/{}'.format(self.monitoring_id),
{'data': self.test_monitoring_activation_data})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type, 'application/json')
response = self.app.post_json('/monitorings/{}/documents'.format(
self.monitoring_id),
{'data': self.test_docservice_document_data})
self.assertEqual(response.status_code, 201)
self.assertEqual(response.content_type, 'application/json')
class MonitoringCancellationDocumentResourceTest(BaseWebTest, DSWebTestMixin):
def setUp(self):
super(MonitoringCancellationDocumentResourceTest, self).setUp()
self.app.app.registry.docservice_url = 'http://localhost'
self.create_monitoring()
self.test_docservice_document_data = {
'title': 'lorem.doc',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/msword',
}
self.test_monitoring_activation_data = {
'status': "cancelled",
'cancellation': {
"description": "text",
"documents": []
}
}
self.app.authorization = ('Basic', (self.sas_name, self.sas_pass))
self.app.patch_json(
'/monitorings/{}'.format(self.monitoring_id),
{'data': self.test_monitoring_activation_data})
self.end_point = '/monitorings/%s/cancellation/documents' % self.monitoring_id
def test_get_single(self):
response = self.app.post_json(
'/monitorings/{}/cancellation/documents'.format(self.monitoring_id),
{'data': self.test_docservice_document_data})
self.assertEqual(response.status_code, 201)
self.assertEqual(response.content_type, 'application/json')
document_id = response.json['data']['id']
response = self.app.get('/monitorings/{}/cancellation/documents/{}'.format(self.monitoring_id, document_id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type, 'application/json')
document_data = response.json['data']
| |
import filecmp
import os
import sys
import shutil
import subprocess
import time
import unittest
if (sys.version_info > (3, 0)):
import urllib.request, urllib.parse, urllib.error
else:
import urllib
from optparse import OptionParser
from PyQt4 import QtCore,QtGui
parser = OptionParser()
parser.add_option("-r", "--root", dest="web_root",
default="http://portal.nersc.gov/project/visit/",
help="Root of web URL where baselines are")
parser.add_option("-d", "--date", dest="web_date",
help="Date of last good run, in YYMonDD form")
parser.add_option("-m", "--mode", dest="mode",
help="Mode to run in: serial, parallel, sr")
parser.add_option("-w", "--web-url", dest="web_url",
help="Manual URL specification; normally generated "
"automatically based on (-r, -d, -m)")
parser.add_option("-g", "--git", dest="git", action="store_true",
help="Use git to ignore images with local modifications")
parser.add_option("-s", "--svn", dest="svn", action="store_true",
help="Use svn to ignore images with local modifications")
(options, args) = parser.parse_args()
if options.web_url is not None:
uri = options.web_url
else:
uri = options.web_root + options.web_date + "/"
mode = ""
if options.mode == "sr" or options.mode == "scalable,parallel" or \
options.mode == "scalable_parallel":
mode="davinci_scalable_parallel_icet"
else:
mode="".join([ s for s in ("davinci_", options.mode) ])
uri += mode + "/"
parser.destroy()
print("uri:", uri)
class MW(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
def real_dirname(path):
"""Python's os.path.dirname is not dirname."""
return path.rsplit('/', 1)[0]
def real_basename(path):
"""Python's os.path.basename is not basename."""
if path.rsplit('/', 1)[1] is '': return None
return path.rsplit('/', 1)[1]
def baseline_current(serial_baseline):
"""Given the path to the serial baseline image, determine if there is a mode
specific baseline. Return a 2-tuple of the baseline image and the path to
the 'current' image."""
dname = real_dirname(serial_baseline)
bname = real_basename(serial_baseline)
baseline = serial_baseline
if options.mode is not None:
# Check for a mode specific baseline.
mode_spec = os.path.join(dname + "/", options.mode + "/", bname)
if os.path.exists(mode_spec):
baseline = mode_spec
# `Current' image never has a mode-specific path; filename/dir is always
# based on the serial baseline's directory.
no_baseline = serial_baseline.split('/', 1) # path without "baseline/"
current = os.path.join("current/", no_baseline[1])
return (baseline, current)
def mode_specific(baseline):
"""Given a baseline image path, return a path to the mode specific baseline,
even if said baseline does not exist (yet)."""
if options.mode is None or options.mode == "serial":
return baseline
dname = real_dirname(baseline)
bname = real_basename(baseline)
if options.mode == "parallel":
if baseline.find("/parallel") != -1:
# It's already got parallel in the path; this IS a mode specific
# baseline.
return baseline
return os.path.join(dname, options.mode, bname)
if options.mode.find("scalable") != -1:
if baseline.find("scalable_parallel") != -1:
# Already is mode-specific.
return baseline
return os.path.join(dname, "scalable_parallel", bname)
# Ruh roh. options.mode must be garbage.
raise NotImplementedError("Unknown mode '%s'" % options.mode)
def local_modifications_git(file):
vcs_diff = subprocess.call(["git", "diff", "--quiet", file])
if vcs_diff == 1:
return True
return False
def local_modifications_svn(file):
svnstat = subprocess.Popen("svn stat %s" % file, shell=True,
stdout=subprocess.PIPE)
diff = svnstat.communicate()[0]
if diff != '':
return True
return False
def local_modifications(filepath):
"""Returns true if the file has local modifications. Always false if the
user did not supply the appropriate VCS option."""
if options.git: return local_modifications_git(filepath)
if options.svn: return local_modifications_svn(filepath)
return False
def equivalent(baseline, image):
"""True if the files are the same."""
if not os.path.exists(image): return False
# Note this is `shallow' by default, but that's fine for our usage.
return filecmp.cmp(baseline, image)
def trivial_pass(baseline, image):
"""True if we can determine that this image is OK without querying the
network."""
return equivalent(baseline, image) or local_modifications(baseline)
class RebaselinePTests(unittest.TestCase):
def test_dirname(self):
input_and_results = [
("baseline/category/test/a.png", "baseline/category/test"),
("b/c/t/q.png", "b/c/t"),
("b/c/t/longfn.png", "b/c/t"),
("b/c/t/", "b/c/t")
]
for tst in input_and_results:
self.assertEqual(real_dirname(tst[0]), tst[1])
def test_basename(self):
input_and_results = [
("baseline/category/test/a.png", "a.png"),
("b/c/t/q.png", "q.png"),
("b/c/t/longfn.png", "longfn.png"),
("b/c/t/", None)
]
for tst in input_and_results:
self.assertEqual(real_basename(tst[0]), tst[1])
class Image(QtGui.QWidget):
def __init__(self, path, parent=None):
self._filename = path
self._parent = parent
self._display = QtGui.QLabel(self._parent)
self._load()
def _load(self):
pixmap = QtGui.QPixmap(300,300)
pixmap.load(self._filename)
self._display.resize(pixmap.size())
self._display.setPixmap(pixmap)
def widget(self): return self._display
def width(self): return self._display.width()
def height(self): return self._display.height()
def update(self, path):
self._filename = path
self._load()
class Layout(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self._mainwin = parent
self._mainwin.statusBar().insertPermanentWidget(0,QtGui.QLabel())
self.status("Initializing...")
quit = QtGui.QPushButton('Quit', self)
quit.setMaximumWidth(80)
if parent is None: parent = self
parent.connect(quit, QtCore.SIGNAL('clicked()'), QtGui.qApp,
QtCore.SLOT('quit()'))
parent.connect(self, QtCore.SIGNAL('closeApp()'), self._die)
self._init_signals()
self._bugs = [] # list which keeps track of which images we think are bugs.
# guess an initial size; we don't know a real size until we've downloaded
# images.
self.resize_this_and_mainwin(600, 600)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.setFocus()
self._baseline = None
self._current = None
self._diff = None
self._images = [None, None, None]
self._next_set_of_images()
self._images[0] = Image(self._baseline, self)
self._images[1] = Image(self._current, self)
self._images[2] = Image(self._diff, self)
grid = QtGui.QGridLayout()
label_baseline = QtGui.QLabel(grid.widget())
label_current = QtGui.QLabel(grid.widget())
label_diff = QtGui.QLabel(grid.widget())
label_baseline.setText("Baseline image:")
label_current.setText("Davinci's current:")
label_diff.setText("difference between them:")
label_baseline.setMaximumSize(QtCore.QSize(160,35))
label_current.setMaximumSize(QtCore.QSize(160,35))
label_diff.setMaximumSize(QtCore.QSize(200,35))
label_directions = QtGui.QLabel(grid.widget())
label_directions.setText("Keyboard shorcuts:\n\n"
"y: yes, rebaseline\n"
"n: no, current image is wrong\n"
"u: unknown, I can't/don't want to decide now\n"
"q: quit")
label_directions.setMaximumSize(QtCore.QSize(300,300))
grid.addWidget(label_baseline, 0,0)
grid.addWidget(label_current, 0,1)
grid.addWidget(self._images[0].widget(), 1,0)
grid.addWidget(self._images[1].widget(), 1,1)
grid.addWidget(label_diff, 2,0)
grid.addWidget(quit, 2,1)
grid.addWidget(self._images[2].widget(), 3,0)
grid.addWidget(label_directions, 3,1)
rows = (
(0, (label_baseline, label_current)),
(1, (self._images[0], self._images[1])),
(2, (label_diff, quit)),
(3, (self._images[2], label_directions))
)
cols = (
(0, (label_baseline, self._images[0], label_diff, self._images[2])),
(1, (label_current, self._images[1], quit, label_directions))
)
for r in rows:
grid.setRowMinimumHeight(r[0], max([x.height() for x in r[1]]))
for c in cols:
grid.setColumnMinimumWidth(c[0], max([x.height() for x in c[1]]))
self.setLayout(grid)
self.resize_this_and_mainwin(self.calc_width(), self.calc_height())
self.show()
self.setFocus()
def resize_this_and_mainwin(self, w, h):
self.resize(w,h)
# make sure it can't shrink too much
self._mainwin.setMinimumWidth(w)
self._mainwin.setMinimumHeight(h+30) # +30: for the status bar
# try not to resize the mainwin if we don't need to; it's annoying.
cur_w = self._mainwin.width()
cur_h = self._mainwin.height()
self._mainwin.resize(max(w,cur_w), max(h,cur_h))
self._mainwin.update()
def _die(self):
print("You thought these test results were bugs:")
for f in self._bugs:
print("\t", f)
self._mainwin.close()
def calc_width(self):
w = 0
for col in range(0,self.layout().columnCount()):
w += self.layout().columnMinimumWidth(col)
return w
def calc_height(self):
h = 0
for row in range(0,self.layout().rowCount()):
h += self.layout().rowMinimumHeight(row)
return h
def _update_images(self):
self._images[0].update(self._baseline)
self._images[1].update(self._current)
self._images[2].update(self._diff)
self.resize_this_and_mainwin(self.calc_width(), self.calc_height())
self.update()
def _rebaseline(self):
self.status("".join(["rebaselining ", self._current, "..."]))
baseline = mode_specific(self._baseline)
print("moving", self._current, "on top of", baseline)
# We might be creating the first mode specific baseline for that test. If
# so, it'll be missing the baseline specific dir.
if not os.path.exists(real_dirname(baseline)):
print(real_dirname(baseline), "does not exist, creating...")
os.mkdir(real_dirname(baseline))
shutil.move(self._current, baseline) # do the rebaseline!
self._next_set_of_images()
self._update_images()
def _ignore(self):
self.status("".join(["ignoring ", self._baseline, "..."]))
self._bugs.append(self._baseline)
self._next_set_of_images()
self._update_images()
def _unknown(self):
self.status("".join(["unknown ", self._baseline, "..."]))
self._next_set_of_images()
self._update_images()
def status(self, msg):
self._mainwin.statusBar().showMessage(msg)
self._mainwin.statusBar().update()
QtCore.QCoreApplication.processEvents() # we're single threaded
def _next_set_of_images(self):
"""Figures out the next set of images to display. Downloads 'current' and
'diff' results from davinci. Sets filenames corresponding to baseline,
current and diff images."""
if self._baseline is None: # first call, build list.
self._imagelist = []
print("Building initial file list... please wait.")
self.status("Building initial file list... please wait.")
for root, dirs, files in os.walk("baseline"):
for f in files:
fn, ext = os.path.splitext(f)
if ext == ".png":
# In some cases, we can trivially reject a file. Don't bother
# adding it to our list in that case.
serial_baseline_fn = os.path.join(root, f)
# Does this path contain "parallel" or "scalable_parallel"? Then
# we've got a mode specific baseline. We'll handle those based on
# the serial filenames, so ignore them for now.
if serial_baseline_fn.find("parallel") != -1: continue
baseline_fn, current_fn = baseline_current(serial_baseline_fn)
assert os.path.exists(baseline_fn)
if not trivial_pass(baseline_fn, current_fn):
self._imagelist.append(baseline_fn)
try:
while len(self._imagelist) > 0:
self._baseline = self._imagelist.pop()
# now derive other filenames based on that one.
filename = None
# os.path.split fails if there's no /
try:
filename = os.path.split(self._baseline)
filename = filename[1]
except AttributeError as e:
self.status("No slash!")
break
current_url = uri + "/c_" + filename
if (sys.version_info > (3, 0)):
f,info = urllib.request.urlretrieve(current_url, "local_current.png")
else:
f,info = urllib.urlretrieve(current_url, "local_current.png")
self.status("".join(["Checking ", current_url, "..."]))
if info.getheader("Content-Type").startswith("text/html"):
# then it's a 404 or other error; skip this image.
continue
else:
# We found the next image.
self._current = "local_current.png"
diff_url = uri + "/d_" + filename
if (sys.version_info > (3, 0)):
f,info = urllib.request.urlretrieve(diff_url, "local_diff.png")
else:
f,info = urllib.urlretrieve(diff_url, "local_diff.png")
if info.getheader("Content-Type").startswith("text/html"):
raise Exception("Could not download diff image.")
self._diff = "local_diff.png"
self.status("Waiting for input on " + filename)
break
except KeyError as e:
print(e)
print("No more images!")
self.emit(QtCore.SIGNAL('closeApp()'))
def _init_signals(self):
self.connect(self, QtCore.SIGNAL('rebaseline()'), self._rebaseline)
self.connect(self, QtCore.SIGNAL('ignore()'), self._ignore)
self.connect(self, QtCore.SIGNAL('unknown()'), self._unknown)
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Q:
self.emit(QtCore.SIGNAL('closeApp()'))
if event.key() == QtCore.Qt.Key_Y:
self.emit(QtCore.SIGNAL('rebaseline()'))
if event.key() == QtCore.Qt.Key_N:
self.emit(QtCore.SIGNAL('ignore()'))
if event.key() == QtCore.Qt.Key_U:
self.emit(QtCore.SIGNAL('unknown()'))
QtCore.QCoreApplication.processEvents()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(RebaselinePTests)
results = | |
sorted. Sorting `labels`.")
return sorted(self.labels)
def _is_sorted(self, iterable):
return all(iterable[i] <= iterable[i + 1] for i in range(len(iterable) - 1))
def fit_transform(self, y):
"""Fit label encoder and return encoded labels.
Parameters
----------
y : array-like of shape [n_samples]
Label values.
Returns
-------
y_encoded : array-like of shape [n_samples]
Encoded label values.
"""
return self.fit(y).transform(y)
def transform(self, y):
"""Transform labels to normalized encoding.
If ``self.fill_unseen_labels`` is ``True``, use ``self.fill_encoded_label_value`` for unseen values.
Seen labels are encoded with value between 0 and n_classes-1. Unseen labels are encoded with
``self.fill_encoded_label_value`` with a default value of n_classes.
Parameters
----------
y : array-like of shape [n_samples]
Label values.
Returns
-------
y_encoded : array-like of shape [n_samples]
Encoded label values.
"""
check_is_fitted(self, "classes_")
y = column_or_1d(y, warn=True)
# transform of empty array is empty array
if _num_samples(y) == 0:
return np.array([])
if self.fill_unseen_labels:
_, mask = _encode_check_unknown(y, self.classes_, return_mask=True)
y_encoded = np.searchsorted(self.classes_, y)
fill_encoded_label_value = self.fill_encoded_label_value or len(self.classes_)
y_encoded[~mask] = fill_encoded_label_value
else:
_, y_encoded = _encode(y, uniques=self.classes_, encode=True)
return y_encoded
def inverse_transform(self, y):
"""Transform labels back to original encoding.
If ``self.fill_unseen_labels`` is ``True``, use ``self.fill_label_value`` for unseen values.
Parameters
----------
y : numpy array of shape [n_samples]
Encoded label values.
Returns
-------
y_decoded : numpy array of shape [n_samples]
Label values.
"""
check_is_fitted(self, "classes_")
y = column_or_1d(y, warn=True)
if y.dtype.kind not in ("i", "u"):
try:
y = y.astype(np.float).astype(np.int)
except ValueError:
raise ValueError("`y` contains values not convertible to integer.")
# inverse transform of empty array is empty array
if _num_samples(y) == 0:
return np.array([])
labels = np.arange(len(self.classes_))
diff = np.setdiff1d(y, labels)
if diff.size > 0 and not self.fill_unseen_labels:
raise ValueError("y contains previously unseen labels: %s" % str(diff))
y_decoded = [self.classes_[idx] if idx in labels else self.fill_label_value for idx in y]
return y_decoded
def get_classes(self):
"""Returns the values of the unencoded classes.
If ``self.include_unseen_class`` is ``True`` include ``self.fill_label_value`` as a class.
Returns
-------
classes : array of shape (n_classes,)
"""
if self.include_unseen_class and self.fill_unseen_labels:
return np.append(self.classes_, [self.fill_label_value])
return self.classes_
class NALabelEncoder(BaseEstimator, TransformerMixin):
"""Encoder for transforming labels to NA values.
Uses `RobustImputer` on 1D inputs of labels
- Uses `is_finite_numeric` mask for encoding by default
- Only uses the `RobustImputer` strategy `constant` and fills using `np.nan`
- Default behavior encodes non-float and non-finite values as nan values in
the target column of a given regression dataset
Parameters
----------
mask_function : callable -> np.array, dtype('bool') (default=None)
A vectorized python function, accepts np.array, returns np.array
with dtype('bool')
For each value, if mask_function(val) == False, that value will
be imputed. mask_function is used to create a boolean mask that determines
which values in the input to impute.
Use np.vectorize to vectorize singular python functions.
"""
def __init__(self, mask_function=None):
self.mask_function = mask_function
def fit(self, y):
"""Fit the encoder on y.
Parameters
----------
y : {array-like}, shape (n_samples,)
Input column, where `n_samples` is the number of samples.
Returns
-------
self : NALabelEncoder
"""
self.model_ = RobustImputer(strategy="constant", fill_values=np.nan, mask_function=self.mask_function)
y = y.reshape(-1, 1)
self.model_.fit(X=y)
return self
def transform(self, y):
"""Encode all non-float and non-finite values in y as NA values.
Parameters
----------
y : {array-like}, shape (n_samples)
The input column to encode.
Returns
-------
yt : {ndarray}, shape (n_samples,)
The encoded input column.
"""
check_is_fitted(self, "model_")
y = y.reshape(-1, 1)
return self.model_.transform(y).flatten()
def inverse_transform(self, y):
"""Returns input column"""
return y
def _more_tags(self):
return {"X_types": ["1dlabels"]}
class RobustOrdinalEncoder(OrdinalEncoder):
"""Encode categorical features as an integer array.
The input should be a 2D, array-like input of categorical features. Each column of categorical features will be
converted to ordinal integers. For a given column of n unique values, seen values will be mapped to integers 0 to
n-1 and unseen values will be mapped to the integer n (or to np.nan when unknown_as_nan is True). An unseen value
is a value that was passed in during the transform step, but not present in the fit step input.
This encoder supports inverse_transform, transforming ordinal integers back into categorical features. Unknown
integers are transformed to None.
Similar to ``sklearn.preprocessing.OrdinalEncoder`` with the additional feature of handling unseen values.
Parameters
----------
categories : 'auto' or a list of lists/arrays of values.
Categories (unique values) per feature:
- 'auto' : Determine categories automatically from the training data.
- list : ``categories[i]`` holds the categories expected in the ith
column. The passed categories should not mix strings and numeric
values, and should be sorted in case of numeric values.
The used categories can be found in the ``categories_`` attribute.
dtype : number type, default np.float32
Desired dtype of output.
unknown_as_nan : boolean, default False
When unknown_as_nan is false, unknown values are transformed to n, where n-1 is the last category
When unknown_as_nan is true, unknown values are transformed to np.nan
threshold : 'auto' or float, default = 1
The threshold for encoding a value as its own label in the result. Default value 1. If `threshold='auto'`, the
maximum of `10` or`n_features / 1000` where `n_features` is the number of columns of input X is calculated as
the `threshold`. How this parameter is interpreted depends on whether it is more than or equal to or less
than 1.
- If `threshold` is more than or equal to one, it represents the number of times a value must appear to be
one hot encoded in the result.
- If `threshold` is less than one, it represents the fraction of rows which must contain the value for it to be
one hot encoded in the result. The values is rounded up, so if `threshold` is 0.255 and there are 100 rows, a
value must appear at least 26 times to be included.
max_categories : int or np.inf, default = np.inf
Maximum number of categories to encode per feature. Default value is np.inf and does not place an upper bound on
the number of categories. If the number of observed categories is greater than ``max_categories``, the encoder
will take the top ``max_categories`` observed categories, sorted by count. All remaining values will be
encoded as the last category. Note this means that the number of categories will be ``max_categories + 1``.
In the case of a tie between categories, the category whose label is higher will be chosen.
Attributes
----------
categories_ : list of arrays
The categories of each feature determined during fitting
(in order of the features in X and corresponding with the output
of ``transform``).
feature_idxs_no_categories_ : list of ints
A list of indexes of features who have no categories with a frequency
greater than or equal to the value of ``threshold``.
Examples
--------
Given a dataset with two features, we let the encoder find the unique
values per feature and transform the data to an ordinal encoding.
>>> from sagemaker_sklearn_extension.preprocessing import RobustOrdinalEncoder
>>> enc = RobustOrdinalEncoder()
>>> X = [['Male', 1], ['Female', 3], ['Female', 2]]
>>> enc.fit(X)
RobustOrdinalEncoder(categories='auto', dtype=<class 'numpy.float32'>)
>>> enc.categories_
[array(['Female', 'Male'], dtype=object), array([1, 2, 3], dtype=object)]
>>> enc.transform([['Female', 3], ['Male', 1], ['Other', 15]])
array([[0., 2.],
[1., 0.],
[2., 3.]], dtype=float32)
>>> enc.inverse_transform([[1, 0], [0, 1]])
array([['Male', 1],
['Female', 2]], dtype=object)
>>> enc.inverse_transform([[1, 0], [0, 1], [2, 3]])
array([['Male', 1],
['Female', 2],
[None, None]], dtype=object)
"""
def __init__(self, categories="auto", dtype=np.float32, unknown_as_nan=False, threshold=1, max_categories=np.inf):
super(RobustOrdinalEncoder, self).__init__(categories=categories, dtype=dtype)
self.categories = categories
self.dtype = dtype
self.unknown_as_nan = unknown_as_nan
self.threshold = threshold
self.max_categories = max_categories
def fit(self, X, y=None):
"""Fit the RobustOrdinalEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to determine the categories of each feature, assuming the input parameter categories equals 'auto'
Returns
-------
self
"""
self._fit(X, handle_unknown="ignore")
assert self.max_categories >= 1
self.feature_idxs_no_categories_ = []
if isinstance(self.max_categories, int) or self.threshold != 1:
X_columns, n_samples, n_features = self._check_X(X)
if self.threshold == "auto":
threshold = max(10, n_samples / 1000)
elif self.threshold >= 1:
threshold = self.threshold
else:
threshold = ceil(self.threshold * n_samples)
for i in range(n_features):
dtype = X_columns[i].dtype
items, counts = np.unique(X_columns[i].astype(str), return_counts=True)
categories_to_encode = items[counts | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Browser based Web App
Purpose: Battery Monitoring Demo
Version: 4/2018 Roboball (MattK.)
"""
import dash
from dash.dependencies import Input, Output, State, Event
import dash_core_components as dcc
import dash_html_components as html
import base64
from plotly import tools
import plotly.plotly as py
import plotly.graph_objs as go
from collections import deque
import numpy as np
import random
########## globals ##########
# queque for Y Values
Y = deque(maxlen=20)
Y.appendleft(1)
for pos in range(19):
Y.appendleft(0)
colors = {
'background': '#424242',
'text': '#7FDBFF',
'head': '#7996c4',
'c1': '#e5eefd',
}
# layouts
text_style_1 = dict(color='#444', textAlign = 'left', fontFamily='sans-serif', fontWeight=300)
text_style_2 = dict(color='#444', textAlign = 'center', fontFamily='sans-serif', fontWeight=600)
div_style_01 = {'backgroundColor': colors['background'], 'color': colors['text'],
'marginTop': 0, 'marginLeft': 0, 'marginRight': 0}
app = dash.Dash(__name__)
app.layout = html.Div([
########## navbar header ##########
html.Div([
html.Div([
html.Div([html.H3(" Battery Monitoring System", style={
'padding-left' : '-1',
'margin-left' : '-1',
'margin-top': '15',
'color': '#FFFFFF',
'textAlign' : 'center',
'height' : '45'
}, )]),
], className="col-lg-12"),
], className="row", style={
'backgroundColor': colors['background'],},),
html.Div([
html.Div([
########## battery control ##########
html.Div([
html.Div([
html.Div([
html.H3(" Control Panel",className="card-title text-center", style={},),
html.Hr(),
html.H4(" Battery Pack 1",className="card-title text-center"),
html.Div([
html.Div([
html.Button('Load Pack 1', id='button-1', className="btn btn-secondary btn-lg btn-block", style={
'margin-top' : '12',
'margin-bottom' : '0',
'padding-top' : '20',
'padding-bottom' : '20',
}), ], className="col-lg-12 col-md-12 col-xs-4",)
], className="row"),
html.Div([
html.Div([
html.Button('Stop Loading', id='button-2', className="btn btn-secondary btn-lg btn-block", style={
'margin-top' : '14',
'margin-bottom' : '0',
'padding-top' : '20',
'padding-bottom' : '20',
}), ], className="col-lg-12 col-md-12 col-xs-4",)
], className="row"),
html.Hr(),
html.H4(" Battery Pack 2",className="card-title text-center"),
html.Div([
html.Div([
html.Button('Load Pack 2', id='button-3', className="btn btn-secondary btn-lg btn-block", style={
'margin-top' : '12',
'margin-bottom' : '0',
'padding-top' : '20',
'padding-bottom' : '20',
}), ], className="col-lg-12 col-md-12 col-xs-4",)
], className="row"),
html.Div([
html.Div([
html.Button('Stop Loading', id='button-4', className="btn btn-secondary btn-lg btn-block", style={
'margin-top' : '14',
'margin-bottom' : '0',
'padding-top' : '20',
'padding-bottom' : '20',
}), ], className="col-lg-12 col-md-12 col-xs-4",)
], className="row"),
html.Hr(),
], className="card-body ", style={
'backgroundColor': colors['c1'],},),
],className="card", style={
}),
], className="col-lg-2 col-xs-12 ", style={
}),
########## battery pack 1 ##########
html.Div([
html.Div([
html.H4(" Battery Pack 1")
], className="card-header text-center text-white mb2",style={
'backgroundColor': colors['head'],'padding-top' : '15','padding-bottom' : '8',},),
html.Div([
html.Div([
html.Div([
html.Div([
dcc.Graph(id='g01',),
dcc.Interval(id='graph-update', interval= 1000 * 1),
],className="row", style={}),
],className="d-flex align-items-center flex-column justify-content-center ", style={}),
html.Div([
html.Div([
dcc.Input(id='input-01', type='text',className="form-control form-control-lg ", style={
'text-align':'center','font-size': '20', 'width': '150', 'display': 'inline-block',
'margin-left' : '0','margin-right' : '0','padding-left' : '0','padding-right' : '0',}),
dcc.Input(id='input-02', type='text', value='50',className="form-control form-control-lg ", style={
'text-align':'center','font-size': '20', 'width': '150', 'display': 'inline-block',
'margin-left' : '34','margin-right' : '0','padding-left' : '0','padding-right' : '0',}),
dcc.Input(id='input-03', type='text', value='50',className="form-control form-control-lg ", style={
'text-align':'center','font-size': '20', 'width': '150', 'display': 'inline-block',
'margin-left' : '34','margin-right' : '0','padding-left' : '0','padding-right' : '0',}),
dcc.Input(id='input-04', type='text', value='50',className="form-control form-control-lg ", style={
'text-align':'center','font-size': '20', 'width': '150', 'display': 'inline-block',
'margin-left' : '34','margin-right' : '0','padding-left' : '0','padding-right' : '0',}),
],className="row ", style={}),
],className="d-flex align-items-center flex-column justify-content-center ", style={}),
html.Div([
html.Div([
dcc.Graph(id='g02',),
],className="row", style={}),
],className="d-flex align-items-center flex-column justify-content-center ", style={}),
html.Div([
html.Div([
dcc.Input(id='input-05', type='text',className="form-control form-control-lg ", style={
'text-align':'center','font-size': '20', 'width': '150', 'display': 'inline-block',
'margin-left' : '0','margin-right' : '0','padding-left' : '0','padding-right' : '0',}),
dcc.Input(id='input-06', type='text', value='50',className="form-control form-control-lg ", style={
'text-align':'center','font-size': '20', 'width': '150', 'display': 'inline-block',
'margin-left' : '34','margin-right' : '0','padding-left' : '0','padding-right' : '0',}),
dcc.Input(id='input-07', type='text', value='50',className="form-control form-control-lg ", style={
'text-align':'center','font-size': '20', 'width': '150', 'display': 'inline-block',
'margin-left' : '34','margin-right' : '0','padding-left' : '0','padding-right' : '0',}),
dcc.Input(id='input-08', type='text', value='50',className="form-control form-control-lg ", style={
'text-align':'center','font-size': '20', 'width': '150', 'display': 'inline-block',
'margin-left' : '34','margin-right' : '0','padding-left' : '0','padding-right' : '0',}),
],className="row", style={}),
],className="d-flex align-items-center flex-column justify-content-center ", style={}),
html.Div([
html.Div([
dcc.Graph(id='g03',),
],className="row", style={}),
],className="d-flex align-items-center flex-column justify-content-center ", style={}),
html.Div([
html.Div([
dcc.Input(id='input-09', type='text',className="form-control form-control-lg ", style={
'text-align':'center','font-size': '20', 'width': '150', 'display': 'inline-block',
'margin-left' : '0','margin-right' : '0','padding-left' : '0','padding-right' : '0',}),
dcc.Input(id='input-10', type='text', value='50',className="form-control form-control-lg ", style={
'text-align':'center','font-size': '20', 'width': '150', 'display': 'inline-block',
'margin-left' : '34','margin-right' : '0','padding-left' : '0','padding-right' : '0',}),
dcc.Input(id='input-11', type='text', value='50',className="form-control form-control-lg ", style={
'text-align':'center','font-size': '20', 'width': '150', 'display': 'inline-block',
'margin-left' : '34','margin-right' : '0','padding-left' : '0','padding-right' : '0',}),
dcc.Input(id='input-12', type='text', value='50',className="form-control form-control-lg ", style={
'text-align':'center','font-size': '20', 'width': '150', 'display': 'inline-block',
'margin-left' : '34','margin-right' : '0','padding-left' : '0','padding-right' : '0',}),
],className="row", style={}),
],className="d-flex align-items-center flex-column justify-content-center ", style={}),
], className="card-body ", style={}),
],className="card ", style={
}),
], className="col-lg-5 col-xs-12 ", style={
}),
########## battery pack 2 ##########
html.Div([
html.Div([
html.H4(" Battery Pack 2")
], className="card-header text-center text-white mb2",style={
'backgroundColor': colors['head'],'padding-top' : '15','padding-bottom' : '8', },),
html.Div([
html.Div([
html.Div([
html.Div([
dcc.Graph(id='g11',),
],className="row", style={}),
],className="d-flex align-items-center flex-column justify-content-center ", style={}),
html.Div([
html.Div([
dcc.Input(id='input-13', type='text',className="form-control form-control-lg ", style={
'text-align':'center','font-size': '20', 'width': '150', 'display': 'inline-block',
'margin-left' : '0','margin-right' : '0','padding-left' : '0','padding-right' : '0',}),
dcc.Input(id='input-14', type='text', value='50',className="form-control form-control-lg ", style={
'text-align':'center','font-size': '20', 'width': '150', 'display': 'inline-block',
'margin-left' : '34','margin-right' : '0','padding-left' : '0','padding-right' : '0',}),
dcc.Input(id='input-15', type='text', value='50',className="form-control form-control-lg ", style={
'text-align':'center','font-size': '20', 'width': '150', 'display': 'inline-block',
'margin-left' : '34','margin-right' : '0','padding-left' : '0','padding-right' : '0',}),
dcc.Input(id='input-16', type='text', value='50',className="form-control form-control-lg ", style={
'text-align':'center','font-size': '20', 'width': '150', 'display': 'inline-block',
'margin-left' : '34','margin-right' : '0','padding-left' : '0','padding-right' : '0',}),
],className="row", style={}),
],className="d-flex align-items-center flex-column justify-content-center ", style={}),
html.Div([
html.Div([
dcc.Graph(id='g12',),
],className="row", style={}),
],className="d-flex align-items-center flex-column justify-content-center", style={}),
html.Div([
html.Div([
dcc.Input(id='input-17', type='text',className="form-control form-control-lg ", style={
'text-align':'center','font-size': '20', 'width': '150', 'display': 'inline-block',
'margin-left' : '0','margin-right' : '0','padding-left' : '0','padding-right' : '0',}),
dcc.Input(id='input-18', type='text', value='50',className="form-control form-control-lg ", style={
'text-align':'center','font-size': '20', 'width': '150', 'display': 'inline-block',
'margin-left' : '34','margin-right' : '0','padding-left' : '0','padding-right' : '0',}),
dcc.Input(id='input-19', type='text', value='50',className="form-control form-control-lg ", style={
'text-align':'center','font-size': '20', 'width': '150', 'display': 'inline-block',
'margin-left' : '34','margin-right' : '0','padding-left' : '0','padding-right' : '0',}),
dcc.Input(id='input-20', type='text', value='50',className="form-control form-control-lg ", style={
'text-align':'center','font-size': '20', 'width': '150', 'display': 'inline-block',
'margin-left' : '34','margin-right' : '0','padding-left' : '0','padding-right' : '0',}),
],className="row", style={}),
],className="d-flex align-items-center flex-column justify-content-center ", style={}),
html.Div([
html.Div([
dcc.Graph(id='g13',),
],className="row", style={}),
],className="d-flex align-items-center flex-column justify-content-center ", style={}),
html.Div([
html.Div([
dcc.Input(id='input-21', type='text',className="form-control form-control-lg ", style={
'text-align':'center','font-size': '20', 'width': '150', 'display': 'inline-block',
'margin-left' : '0','margin-right' : '0','padding-left' : '0','padding-right' : '0',}),
dcc.Input(id='input-22', type='text', value='50',className="form-control form-control-lg ", style={
'text-align':'center','font-size': '20', 'width': '150', 'display': 'inline-block',
'margin-left' : '34','margin-right' : '0','padding-left' : '0','padding-right' : '0',}),
dcc.Input(id='input-23', type='text', value='50',className="form-control form-control-lg ", style={
'text-align':'center','font-size': '20', 'width': '150', 'display': 'inline-block',
'margin-left' : '34','margin-right' : '0','padding-left' : '0','padding-right' : '0',}),
dcc.Input(id='input-24', type='text', value='50',className="form-control form-control-lg ", style={
'text-align':'center','font-size': '20', 'width': '150', 'display': 'inline-block',
'margin-left' : '34','margin-right' : '0','padding-left' : '0','padding-right' : '0',}),
],className="row", style={}),
],className="d-flex align-items-center flex-column justify-content-center ", style={}),
], className="card-body"),
], className="card"),
], className="col-lg-5 col-xs-12"),
], className="row", style={
}),
], className="container-fluid", style={
'margin-top': '10',
'padding' : '10',
}),
], className="container-fluid", style={
'margin': '0',
'padding' : '0',
})
########## callbacks ##########
@app.callback(
Output(component_id='input-01', component_property='value'),
events=[Event('graph-update', 'interval')])
def update_output_div():
return "{:.3f}".format(Y[-1])
@app.callback(
Output(component_id='input-02', component_property='value'),
events=[Event('graph-update', 'interval')])
def update_output_div():
return "{:.3f}".format(Y[-1])
@app.callback(
Output(component_id='input-03', component_property='value'),
events=[Event('graph-update', 'interval')])
def update_output_div():
return "{:.3f}".format(Y[-1])
@app.callback(
Output(component_id='input-04', component_property='value'),
events=[Event('graph-update', 'interval')])
def update_output_div():
return "{:.3f}".format(Y[-1])
@app.callback(
Output(component_id='input-05', component_property='value'),
events=[Event('graph-update', 'interval')])
def update_output_div():
return "{:.3f}".format(Y[-1])
@app.callback(
Output(component_id='input-06', component_property='value'),
events=[Event('graph-update', 'interval')])
def update_output_div():
return "{:.3f}".format(Y[-1])
@app.callback(
Output(component_id='input-07', component_property='value'),
events=[Event('graph-update', 'interval')])
def update_output_div():
return "{:.3f}".format(Y[-1])
@app.callback(
Output(component_id='input-08', component_property='value'),
events=[Event('graph-update', 'interval')])
def update_output_div():
return "{:.3f}".format(Y[-1])
@app.callback(
Output(component_id='input-09', component_property='value'),
events=[Event('graph-update', 'interval')])
def update_output_div():
return "{:.3f}".format(Y[-1])
@app.callback(
Output(component_id='input-10', component_property='value'),
events=[Event('graph-update', 'interval')])
def update_output_div():
return "{:.3f}".format(Y[-1])
@app.callback(
Output(component_id='input-11', component_property='value'),
events=[Event('graph-update', 'interval')])
def update_output_div():
return "{:.3f}".format(Y[-1])
@app.callback(
Output(component_id='input-12', component_property='value'),
events=[Event('graph-update', 'interval')])
def update_output_div():
return "{:.3f}".format(Y[-1])
@app.callback(
Output(component_id='input-13', component_property='value'),
events=[Event('graph-update', 'interval')])
def update_output_div():
return "{:.3f}".format(Y[-1])
@app.callback(
Output(component_id='input-14', component_property='value'),
events=[Event('graph-update', 'interval')])
def update_output_div():
return "{:.3f}".format(Y[-1])
@app.callback(
Output(component_id='input-15', component_property='value'),
events=[Event('graph-update', 'interval')])
def update_output_div():
return "{:.3f}".format(Y[-1])
@app.callback(
Output(component_id='input-16', component_property='value'),
events=[Event('graph-update', 'interval')])
def update_output_div():
return "{:.3f}".format(Y[-1])
@app.callback(
Output(component_id='input-17', component_property='value'),
events=[Event('graph-update', 'interval')])
def update_output_div():
return "{:.3f}".format(Y[-1])
@app.callback(
Output(component_id='input-18', component_property='value'),
events=[Event('graph-update', 'interval')])
def update_output_div():
return "{:.3f}".format(Y[-1])
@app.callback(
Output(component_id='input-19', component_property='value'),
events=[Event('graph-update', 'interval')])
def update_output_div():
return "{:.3f}".format(Y[-1])
@app.callback(
Output(component_id='input-20', component_property='value'),
events=[Event('graph-update', 'interval')])
def update_output_div():
return "{:.3f}".format(Y[-1])
@app.callback(
Output(component_id='input-21', component_property='value'),
events=[Event('graph-update', 'interval')])
def update_output_div():
return "{:.3f}".format(Y[-1])
@app.callback(
Output(component_id='input-22', component_property='value'),
events=[Event('graph-update', 'interval')])
def update_output_div():
return "{:.3f}".format(Y[-1])
@app.callback(
Output(component_id='input-23', component_property='value'),
events=[Event('graph-update', 'interval')])
def update_output_div():
return "{:.3f}".format(Y[-1])
@app.callback(
Output(component_id='input-24', component_property='value'),
events=[Event('graph-update', 'interval')])
def update_output_div():
return "{:.3f}".format(Y[-1])
@app.callback(Output('g01', 'figure'),
events=[Event('graph-update', 'interval')])
def update_graph_bar():
#X.append(X[-1]+1)
val = 1+1*random.uniform(-0.1,0.1)
X = np.arange(0,-20,-1)
Y.appendleft(val)
if val < 0.95:
c_1 = 'rgb(255,0,0, 0.8)' # warning mode
c_2 = 'rgb(255,0,0, 0.8)' # warning mode
else:
c_1 = 'rgb(49,130,189, 0.8)' # normal mode
c_2 = 'rgb(0,128,0,0.8)' # normal mode
trace1 = go.Bar(x=list(X), y=list(Y),marker=dict(color= c_1), width=1.0,showlegend=False,) #3888ba
trace2 = go.Bar(x=list(X), y=list(Y),marker=dict(color= c_2), width=1.0,showlegend=False,) #38ba72
trace3 = go.Bar(x=list(X), y=list(Y),marker=dict(color= c_1), width=1.0,showlegend=False,)
trace4 = go.Bar(x=list(X), y=list(Y),marker=dict(color= c_2), width=1.0,showlegend=False,)
fig = tools.make_subplots(rows=1, cols=4, | |
-------------------------------------------------------------------------
def build_query(self,
id=None,
uid=None,
filter=None,
vars=None,
filter_component=None):
"""
Query builder
@param id: record ID or list of record IDs to include
@param uid: record UID or list of record UIDs to include
@param filter: filtering query (DAL only)
@param vars: dict of URL query variables
@param filter_component: the alias of the component the URL
filters apply for (filters for this
component must be handled separately)
"""
# Reset the rows counter
self._length = None
self.rfilter = S3ResourceFilter(self,
id=id,
uid=uid,
filter=filter,
vars=vars,
filter_component=filter_component)
return self.rfilter
# -------------------------------------------------------------------------
def add_filter(self, f=None, c=None):
"""
Extend the current resource filter
@param f: a Query or a S3ResourceQuery instance
@param c: alias of the component this filter concerns,
automatically adds the respective component join
(not needed for S3ResourceQuery instances)
"""
if f is None:
return
self.clear()
if self.rfilter is None:
self.rfilter = S3ResourceFilter(self)
self.rfilter.add_filter(f, component=c)
# -------------------------------------------------------------------------
def add_component_filter(self, alias, f=None):
"""
Extend the resource filter of a particular component, does
not affect the master resource filter (as opposed to add_filter)
@param alias: the alias of the component
@param f: a Query or a S3ResourceQuery instance
"""
if f is None:
return
if self.rfilter is None:
self.rfilter = S3ResourceFilter(self)
self.rfilter.add_filter(f, component=alias, master=False)
# -------------------------------------------------------------------------
def get_query(self):
""" Get the effective query """
if self.rfilter is None:
self.build_query()
return self.rfilter.get_query()
# -------------------------------------------------------------------------
def get_filter(self):
""" Get the effective virtual fields filter """
if self.rfilter is None:
self.build_query()
return self.rfilter.get_filter()
# -------------------------------------------------------------------------
def clear_query(self):
""" Removes the current query (does not remove the set!) """
self.rfilter = None
components = self.components
if components:
for c in components:
components[c].clear_query()
# -------------------------------------------------------------------------
# Data access (new API)
# -------------------------------------------------------------------------
def count(self, left=None, distinct=False):
"""
Get the total number of available records in this resource
@param left: left outer joins, if required
@param distinct: only count distinct rows
"""
if self.rfilter is None:
self.build_query()
if self._length is None:
self._length = self.rfilter.count(left=left,
distinct=distinct)
return self._length
# -------------------------------------------------------------------------
def select(self,
fields,
start=0,
limit=None,
left=None,
orderby=None,
groupby=None,
distinct=False,
virtual=True,
count=False,
getids=False,
as_rows=False,
represent=False,
show_links=True,
raw_data=False):
"""
Extract data from this resource
@param fields: the fields to extract (selector strings)
@param start: index of the first record
@param limit: maximum number of records
@param left: additional left joins required for filters
@param orderby: orderby-expression for DAL
@param groupby: fields to group by (overrides fields!)
@param distinct: select distinct rows
@param virtual: include mandatory virtual fields
@param count: include the total number of matching records
@param getids: include the IDs of all matching records
@param as_rows: return the rows (don't extract)
@param represent: render field value representations
@param raw_data: include raw data in the result
"""
# Init
db = current.db
s3db = current.s3db
table = self.table
tablename = table._tablename
pkey = str(table._id)
query = self.get_query()
vfltr = self.get_filter()
rfilter = self.rfilter
resolve = self.resolve_selectors
# dict to collect accessible queries for differential
# field authorization (each joined table is authorized separately)
aqueries = {}
# Query to use for filtering
filter_query = query
#if DEBUG:
# _start = datetime.datetime.now()
# _debug("select of %s starting" % tablename)
# Resolve tables, fields and joins
joins = {}
left_joins = S3LeftJoins(tablename)
# Left joins from filter
ftables = left_joins.add(rfilter.get_left_joins())
# Left joins from caller
qtables = left_joins.add(left)
ftables.extend(qtables)
# Virtual fields and extra fields required by filter
virtual_fields = rfilter.get_fields()
vfields, vjoins, l, d = resolve(virtual_fields, show=False)
joins.update(vjoins)
vtables = left_joins.extend(l)
distinct |= d
# Display fields (fields to include in the result)
if fields is None:
fields = [f.name for f in self.readable_fields()]
dfields, djoins, l, d = resolve(fields, extra_fields=False)
joins.update(djoins)
dtables = left_joins.extend(l)
distinct |= d
# Temporarily deactivate (mandatory) virtual fields
if not virtual:
vf = table.virtualfields
osetattr(table, "virtualfields", [])
# Initialize field data and effort estimates
field_data = {pkey: ({}, {}, False, False, False)}
effort = {pkey: 0}
for dfield in dfields:
colname = dfield.colname
effort[colname] = 0
field_data[colname] = ({}, {},
dfield.tname != self.tablename,
dfield.ftype[:5] == "list:",
dfield.virtual)
# Resolve ORDERBY
orderby_aggregate = orderby_fields = None
if orderby:
if isinstance(orderby, str):
items = orderby.split(",")
elif not isinstance(orderby, (list, tuple)):
items = [orderby]
else:
items = orderby
orderby = []
orderby_fields = []
# For GROUPBY id (which we need here for left joins), we need
# all ORDERBY-fields to appear in an aggregation function, or
# otherwise the ORDERBY can be ambiguous.
orderby_aggregate = []
for item in items:
expression = None
if type(item) is Expression:
f = item.first
op = item.op
if op == db._adapter.AGGREGATE:
# Already an aggregation
expression = item
elif isinstance(f, Field) and op == db._adapter.INVERT:
direction = "desc"
else:
# Other expression - not supported
continue
elif isinstance(item, Field):
direction = "asc"
f = item
elif isinstance(item, str):
fn, direction = (item.strip().split() + ["asc"])[:2]
tn, fn = ([table._tablename] + fn.split(".", 1))[-2:]
try:
f = db[tn][fn]
except (AttributeError, KeyError):
continue
else:
continue
fname = str(f)
tname = fname.split(".", 1)[0]
if tname != tablename:
if tname in left_joins:
ftables.append(tname)
elif tname in joins:
filter_query &= joins[tname]
else:
# No join found for this field => skip
continue
orderby_fields.append(f)
if expression is None:
expression = f if direction == "asc" else ~f
orderby.append(expression)
direction = direction.strip().lower()[:3]
if fname != pkey:
expression = f.min() if direction == "asc" else ~(f.max())
else:
orderby.append(expression)
orderby_aggregate.append(expression)
# Initialize master query
master_query = filter_query
# Ignore limitby if vfltr
if vfltr is None:
limitby = self.limitby(start=start, limit=limit)
else:
limitby = None
# Filter Query:
ids = None
page = None
totalrows = None
# Get the left joins
filter_joins = left_joins.as_list(tablenames=ftables,
aqueries=aqueries)
if getids or count or left_joins:
if not groupby and not vfltr and \
(count or limitby or vtables != ftables):
if getids or left_joins:
field = table._id
fdistinct = False
fgroupby = field
else:
field = table._id.count()
fdistinct = True
fgroupby = None
# We don't need virtual fields here, so deactivate
# even if virtual is True
if virtual:
vf = table.virtualfields
osetattr(table, "virtualfields", [])
# Retrieve the ordered record IDs (or number of rows)
rows = db(filter_query).select(field,
left=filter_joins,
distinct=fdistinct,
orderby=orderby_aggregate,
groupby=fgroupby,
cacheable=True)
# Restore the virtual fields
if virtual:
osetattr(table, "virtualfields", vf)
if getids or left_joins:
ids = [row[pkey] for row in rows]
totalrows = len(ids)
if limitby:
page = ids[limitby[0]:limitby[1]]
else:
page = ids
# Use simplified master query
master_query = table._id.belongs(page)
orderby = None
limitby = None
else:
totalrows = rows.first()[field]
# Master Query:
# Add joins for virtual fields
for join in vjoins.values():
master_query &= join
# Determine fields in master query
mfields = {}
qfields = {}
if groupby:
# Only extract GROUPBY fields (as we don't support aggregates)
if isinstance(groupby, str):
items = groupby.split(",")
elif not isinstance(groupby, (list, tuple)):
items = [groupby]
else:
items = groupby
groupby = []
gappend = groupby.append
for item in items:
tname = None
if isinstance(item, Field):
f = item
elif isinstance(item, str):
fn = item.strip()
tname, fn = ([table._tablename] + fn.split(".", 1))[-2:]
try:
f = db[tname][fn]
except (AttributeError, KeyError):
continue
else:
continue
gappend(f)
fname = str(f)
qfields[fname] = f
tnames = None
for dfield in dfields:
if dfield.colname == fname:
tnames = dfield.left.keys()
break
if not tnames:
if not tname:
tname = fname.split(".", 1)[0]
if tname != tablename:
qtables.append(tname)
else:
qtables.extend([tn for tn in tnames if tn != tablename])
mfields.update(qfields)
else:
if ids is None and filter_joins:
qtables = ftables
qtables.extend(vtables)
for flist in [dfields, vfields]:
for rfield in flist:
tname = rfield.tname
if tname == tablename or as_rows or tname in qtables:
colname = rfield.colname
if rfield.show:
mfields[colname] = True
if rfield.field:
qfields[colname] = rfield.field
if as_rows and \
tname != tablename and \
tname not in qtables:
left = rfield.left
if left:
for tn in left:
qtables.extend([j.first._tablename
for j | |
from __future__ import unicode_literals
import requests
import time
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
from decimal import Decimal
from .exceptions import (
PageError, DisambiguationError, RedirectError, HTTPTimeoutError,
WikipediaException, ODD_ERROR_MESSAGE)
from .util import cache, stdout_encode, debug
import re
API_URL = 'http://en.wikipedia.org/w/api.php'
RATE_LIMIT = False
RATE_LIMIT_MIN_WAIT = None
RATE_LIMIT_LAST_CALL = None
USER_AGENT = 'wikipedia (https://github.com/goldsmith/Wikipedia/)'
PROXY = {
'http': 'socks5://127.0.0.1:10808',
'https': 'socks5://127.0.0.1:10808'
}
def set_lang(prefix):
'''
Change the language of the API being requested.
Set `prefix` to one of the two letter prefixes found on the `list of all Wikipedias <http://meta.wikimedia.org/wiki/List_of_Wikipedias>`_.
After setting the language, the cache for ``search``, ``suggest``, and ``summary`` will be cleared.
.. note:: Make sure you search for page titles in the language that you have set.
'''
global API_URL
API_URL = 'http://' + prefix.lower() + '.wikipedia.org/w/api.php'
for cached_func in (search, suggest, summary):
cached_func.clear_cache()
def set_user_agent(user_agent_string):
'''
Set the User-Agent string to be used for all requests.
Arguments:
* user_agent_string - (string) a string specifying the User-Agent header
'''
global USER_AGENT
USER_AGENT = user_agent_string
def set_rate_limiting(rate_limit, min_wait=timedelta(milliseconds=50)):
'''
Enable or disable rate limiting on requests to the Mediawiki servers.
If rate limiting is not enabled, under some circumstances (depending on
load on Wikipedia, the number of requests you and other `wikipedia` users
are making, and other factors), Wikipedia may return an HTTP timeout error.
Enabling rate limiting generally prevents that issue, but please note that
HTTPTimeoutError still might be raised.
Arguments:
* rate_limit - (Boolean) whether to enable rate limiting or not
Keyword arguments:
* min_wait - if rate limiting is enabled, `min_wait` is a timedelta describing the minimum time to wait before requests.
Defaults to timedelta(milliseconds=50)
'''
global RATE_LIMIT
global RATE_LIMIT_MIN_WAIT
global RATE_LIMIT_LAST_CALL
RATE_LIMIT = rate_limit
if not rate_limit:
RATE_LIMIT_MIN_WAIT = None
else:
RATE_LIMIT_MIN_WAIT = min_wait
RATE_LIMIT_LAST_CALL = None
@cache
def search(query, results=10, suggestion=False):
'''
Do a Wikipedia search for `query`.
Keyword arguments:
* results - the maxmimum number of results returned
* suggestion - if True, return results and suggestion (if any) in a tuple
'''
search_params = {
'list': 'search',
'srprop': '',
'srlimit': results,
'limit': results,
'srsearch': query
}
if suggestion:
search_params['srinfo'] = 'suggestion'
raw_results = _wiki_request(search_params)
if 'error' in raw_results:
if raw_results['error']['info'] in ('HTTP request timed out.', 'Pool queue is full'):
raise HTTPTimeoutError(query)
else:
raise WikipediaException(raw_results['error']['info'])
search_results = (d['title'] for d in raw_results['query']['search'])
if suggestion:
if raw_results['query'].get('searchinfo'):
return list(search_results), raw_results['query']['searchinfo']['suggestion']
else:
return list(search_results), None
return list(search_results)
@cache
def geosearch(latitude, longitude, title=None, results=10, radius=1000):
'''
Do a wikipedia geo search for `latitude` and `longitude`
using HTTP API described in http://www.mediawiki.org/wiki/Extension:GeoData
Arguments:
* latitude (float or decimal.Decimal)
* longitude (float or decimal.Decimal)
Keyword arguments:
* title - The title of an article to search for
* results - the maximum number of results returned
* radius - Search radius in meters. The value must be between 10 and 10000
'''
search_params = {
'list': 'geosearch',
'gsradius': radius,
'gscoord': '{0}|{1}'.format(latitude, longitude),
'gslimit': results
}
if title:
search_params['titles'] = title
raw_results = _wiki_request(search_params)
if 'error' in raw_results:
if raw_results['error']['info'] in ('HTTP request timed out.', 'Pool queue is full'):
raise HTTPTimeoutError('{0}|{1}'.format(latitude, longitude))
else:
raise WikipediaException(raw_results['error']['info'])
search_pages = raw_results['query'].get('pages', None)
if search_pages:
search_results = (v['title'] for k, v in search_pages.items() if k != '-1')
else:
search_results = (d['title'] for d in raw_results['query']['geosearch'])
return list(search_results)
@cache
def suggest(query):
'''
Get a Wikipedia search suggestion for `query`.
Returns a string or None if no suggestion was found.
'''
search_params = {
'list': 'search',
'srinfo': 'suggestion',
'srprop': '',
}
search_params['srsearch'] = query
raw_result = _wiki_request(search_params)
if raw_result['query'].get('searchinfo'):
return raw_result['query']['searchinfo']['suggestion']
return None
def random(pages=1):
'''
Get a list of random Wikipedia article titles.
.. note:: Random only gets articles from namespace 0, meaning no Category, User talk, or other meta-Wikipedia pages.
Keyword arguments:
* pages - the number of random pages returned (max of 10)
'''
# http://en.wikipedia.org/w/api.php?action=query&list=random&rnlimit=5000&format=jsonfm
query_params = {
'list': 'random',
'rnnamespace': 0,
'rnlimit': pages,
}
request = _wiki_request(query_params)
titles = [page['title'] for page in request['query']['random']]
if len(titles) == 1:
return titles[0]
return titles
@cache
def summary(title, sentences=0, chars=0, auto_suggest=True, redirect=True):
'''
Plain text summary of the page.
.. note:: This is a convenience wrapper - auto_suggest and redirect are enabled by default
Keyword arguments:
* sentences - if set, return the first `sentences` sentences (can be no greater than 10).
* chars - if set, return only the first `chars` characters (actual text returned may be slightly longer).
* auto_suggest - let Wikipedia find a valid page title for the query
* redirect - allow redirection without raising RedirectError
'''
# use auto_suggest and redirect to get the correct article
# also, use page's error checking to raise DisambiguationError if necessary
page_info = page(title, auto_suggest=auto_suggest, redirect=redirect)
title = page_info.title
pageid = page_info.pageid
query_params = {
'prop': 'extracts',
'explaintext': '',
'titles': title
}
if sentences:
query_params['exsentences'] = sentences
elif chars:
query_params['exchars'] = chars
else:
query_params['exintro'] = ''
request = _wiki_request(query_params)
summary = request['query']['pages'][pageid]['extract']
return summary
def page(title=None, pageid=None, auto_suggest=True, redirect=True, preload=False):
'''
Get a WikipediaPage object for the page with title `title` or the pageid
`pageid` (mutually exclusive).
Keyword arguments:
* title - the title of the page to load
* pageid - the numeric pageid of the page to load
* auto_suggest - let Wikipedia find a valid page title for the query
* redirect - allow redirection without raising RedirectError
* preload - load content, summary, images, references, and links during initialization
'''
if title is not None:
if auto_suggest:
results, suggestion = search(title, results=1, suggestion=True)
try:
title = suggestion or results[0]
except IndexError:
# if there is no suggestion or search results, the page doesn't exist
raise PageError(title)
return WikipediaPage(title, redirect=redirect, preload=preload)
elif pageid is not None:
return WikipediaPage(pageid=pageid, preload=preload)
else:
raise ValueError("Either a title or a pageid must be specified")
class WikipediaPage(object):
'''
Contains data from a Wikipedia page.
Uses property methods to filter data from the raw HTML.
'''
def __init__(self, title=None, pageid=None, redirect=True, preload=False, original_title=''):
if title is not None:
self.title = title
self.original_title = original_title or title
elif pageid is not None:
self.pageid = pageid
else:
raise ValueError("Either a title or a pageid must be specified")
self.__load(redirect=redirect, preload=preload)
if preload:
for prop in ('content', 'summary', 'images', 'references', 'links', 'sections'):
getattr(self, prop)
def __repr__(self):
return stdout_encode(u'<WikipediaPage \'{}\'>'.format(self.title))
def __eq__(self, other):
try:
return (
self.pageid == other.pageid
and self.title == other.title
and self.url == other.url
)
except:
return False
def __load(self, redirect=True, preload=False):
'''
Load basic information from Wikipedia.
Confirm that page exists and is not a disambiguation/redirect.
Does not need to be called manually, should be called automatically during __init__.
'''
query_params = {
'prop': 'info|pageprops',
'inprop': 'url',
'ppprop': 'disambiguation',
'redirects': '',
}
if not getattr(self, 'pageid', None):
query_params['titles'] = self.title
else:
query_params['pageids'] = self.pageid
request = _wiki_request(query_params)
query = request['query']
pageid = list(query['pages'].keys())[0]
page = query['pages'][pageid]
# missing is present if the page is missing
if 'missing' in page:
if hasattr(self, 'title'):
raise PageError(self.title)
else:
raise PageError(pageid=self.pageid)
# same thing for redirect, except it shows up in query instead of page for
# whatever silly reason
elif 'redirects' in query:
if redirect:
redirects = query['redirects'][0]
if 'normalized' in query:
normalized = query['normalized'][0]
assert normalized['from'] == self.title, ODD_ERROR_MESSAGE
from_title = normalized['to']
else:
from_title = self.title
assert redirects['from'] == from_title, ODD_ERROR_MESSAGE
# change the title and reload the whole object
self.__init__(redirects['to'], redirect=redirect, preload=preload)
else:
raise RedirectError(getattr(self, 'title', page['title']))
# since we only asked for disambiguation in ppprop,
# if a pageprop is returned,
# then the page must be a disambiguation page
elif 'pageprops' in page:
query_params = {
'prop': 'revisions',
'rvprop': 'content',
'rvparse': '',
'rvlimit': 1
}
if hasattr(self, 'pageid'):
query_params['pageids'] = self.pageid
else:
query_params['titles'] = self.title
request = _wiki_request(query_params)
html = request['query']['pages'][pageid]['revisions'][0]['*']
lis = BeautifulSoup(html, 'html.parser').find_all('li')
filtered_lis = [li for li in lis if not 'tocsection' in ''.join(li.get('class', []))]
may_refer_to = [li.a.get_text() for li in filtered_lis if li.a]
raise DisambiguationError(getattr(self, 'title', page['title']), may_refer_to)
else:
self.pageid = pageid
self.title = page['title']
self.url = page['fullurl']
def __continued_query(self, query_params):
'''
Based on https://www.mediawiki.org/wiki/API:Query#Continuing_queries
'''
query_params.update(self.__title_query_param)
| |
import time
import random
from copy import copy
try:
from neopixel import *
except ImportError:
print "non-neopixel"
BOARD_HEIGHT = 16
BOARD_WIDTH = 8
def unshared_copy(inList):
if isinstance(inList, list):
return list( map(unshared_copy, inList) )
return inList
# tetrisbuster code
pieces = {
'i':[ [ [True, False, False, False],[True, False, False, False],[True, False, False, False],[True, False, False, False] ], [ [True, True, True, True],[False, False, False, False],[False, False, False, False],[False, False, False, False] ], [ [True, False, False, False],[True, False, False, False],[True, False, False, False],[True, False, False, False] ], [ [True, True, True, True],[False, False, False, False],[False, False, False, False],[False, False, False, False] ] ],
'j':[ [ [False, True, False, False],[False, True, False, False],[True, True, False, False],[False, False, False, False] ], [ [True, False, False, False],[True, True, True, False],[False, False, False, False],[False, False, False, False] ], [ [True, True, False, False],[True, False, False, False],[True, False, False, False],[False, False, False, False] ], [ [True, True, True, False],[False, False, True, False],[False, False, False, False],[False, False, False, False] ] ],
'l':[ [ [True, False, False, False],[True, False, False, False],[True, True, False, False],[False, False, False, False] ], [ [True, True, True, False],[True, False, False, False],[False, False, False, False],[False, False, False, False] ], [ [True, True, False, False],[False, True, False, False],[False, True, False, False],[False, False, False, False] ], [ [False, False, True, False],[True, True, True, False],[False, False, False, False],[False, False, False, False] ] ],
'o':[ [ [True, True, False, False],[True, True, False, False],[False, False, False, False],[False, False, False, False] ], [ [True, True, False, False],[True, True, False, False],[False, False, False, False],[False, False, False, False] ], [ [True, True, False, False],[True, True, False, False],[False, False, False, False],[False, False, False, False] ], [ [True, True, False, False],[True, True, False, False],[False, False, False, False],[False, False, False, False] ] ],
's':[ [ [False, True, True, False],[True, True, False, False],[False, False, False, False],[False, False, False, False] ], [ [True, False, False, False],[True, True, False, False],[False, True, False, False],[False, False, False, False] ], [ [False, True, True, False],[True, True, False, False],[False, False, False, False],[False, False, False, False] ], [ [True, False, False, False],[True, True, False, False],[False, True, False, False],[False, False, False, False] ] ],
't':[ [ [True, True, True, False],[False, True, False, False],[False, False, False, False],[False, False, False, False] ], [ [False, True, False, False],[True, True, False, False],[False, True, False, False],[False, False, False, False] ], [ [False, True, False, False],[True, True, True, False],[False, False, False, False],[False, False, False, False] ], [ [True, False, False, False],[True, True, False, False],[True, False, False, False],[False, False, False, False] ], ],
'z':[ [ [True, True, False, False],[False, True, True, False],[False, False, False, False],[False, False, False, False] ], [ [False, True, False, False],[True, True, False, False],[True, False, False, False],[False, False, False, False] ], [ [True, True, False, False],[False, True, True, False],[False, False, False, False],[False, False, False, False] ], [ [False, True, False, False],[True, True, False, False],[True, False, False, False],[False, False, False, False] ], ],
}
board_height = 16
board_width = 8
def getPositionAndDegrees(board, piece):
board = board.split()
outstr = ""
# try every combination and compute a score for that combo.
combos = {}
for x in range(board_width):
for rot in range(4):
new_board, legal = doMove(board, piece, x, rot)
if legal:
# determine how many points this board is worth
pts = 0
for by in range(board_height):
# negative score for each block, blocks toward the bottom
# don't hurt as much
pts += sum(-10*(board_height-by) for char in new_board[by] if char != ".")
# find holes and give them a penalty
for bx in range(board_width):
if new_board[by][bx] == '.':
# see if there is a block anywhere above it
for check_y in range(by):
if new_board[check_y][bx] != ".":
pts -= 50 * (board_height-check_y)
break
combos[pts] = [x, rot]
# best combo is lowest key
keys = combos.keys()
keys.sort(reverse=True)
# log
if len(keys) > 0:
new_board, legal = doMove(board, piece, combos[keys[0]][0], combos[keys[0]][1])
outstr += "\nexpected board:\n%s" % '\n'.join(new_board)
# open('lastrun.txt', 'a').write("\n---------------------\nboard:\n%s\npiece: %s\n combos: %s\noutstr: %s\n" % ('\n'.join(board), piece, combos, outstr))
if len(keys) == 0:
# random, we're about to lose
position = 0
degrees = 0
else:
position = combos[keys[0]][0]
degrees = combos[keys[0]][1] * 90
return position, degrees
def canPutPiece(board, piece, left, top, rot):
new_board = board[:]
for y in range(4):
for x in range(4):
bx = left+x
by = top+y
if pieces[piece][rot][y][x]:
if by >= board_height or bx >= board_width or \
bx < 0 or by < 0 or board[by][bx] != '.':
return (board, False,) # illegal move
else:
row = list(new_board[by])
row[bx] = piece
new_board[by] = ''.join(row)
return (new_board, True,) # legal move
def doMove(board, piece, position, rot):
# put piece at top of board
y = 0
# move piece down until we can't anymore
new_board = board[:]
while True:
ret_board = new_board[:]
new_board, legal = canPutPiece(board, piece, position, y, rot)
if not legal:
break
y += 1
# delete complete rows
ret_board = filter(lambda row: any(item == '.' for item in row), ret_board)
for i in range(board_height-len(ret_board)):
ret_board.insert(0, '.' * board_width)
if y == 0:
# cannot place piece; return illegal move
return (board, False,)
else:
# put the changes into board
return (ret_board, True,) # 'True' - move is legal
class Tetris:
def __init__(self, emitter):
self.board = [['.' for _ in range(BOARD_WIDTH)] for x in range(BOARD_HEIGHT)]
self.gameResults = []
self.emitter = emitter
def dropRandom(self):
piece = random.choice(['i','j','l','o','s','t','z'])
position, degrees = getPositionAndDegrees(self.boardToPOSTString(), piece)
return self.dropPiece(position, piece, degrees)
def dropPiece(self, pos, piecech, degrees):
if piecech == 'i':
piece = [[1],[1],[1],[1]]
elif piecech == 'j':
piece = [[0,1],[0,1],[1,1]]
elif piecech == 'l':
piece = [[1,0],[1,0],[1,1]]
elif piecech == 'o':
piece = [[1,1],[1,1]]
elif piecech == 's':
piece = [[0,1,1],[1,1,0]]
elif piecech == 't':
piece = [[1,1,1],[0,1,0]]
elif piecech == 'z':
piece = [[1,1,0],[0,1,1]]
rotated = self.rotatePiece(piece, degrees)
height = len(rotated)
width = len(rotated[0])
maxpos = BOARD_WIDTH - width #what's the furthest along this piece could go
pos = min([pos, maxpos]) # bring in a piece that's too far right
# array of heights of every column
boardheight = [0 for i in range(width)]
for x in range(width):
for y in range(BOARD_HEIGHT):
if self.board[y][pos + x] != '.':
boardheight[x] = y+1
# array of heights of the piece
pieceheight = [0 for i in range(width)]
for x in range(width):
for y in range(height):
if rotated[y][x] == 1:
pieceheight[x] = y+1
# work out the max heights, this is where the piece will fall to
sum = [boardheight[i]+pieceheight[i] for i in range(width)]
if max(sum) > BOARD_HEIGHT:
return False # full board
destx, desty = pos, max(sum)-1
for n in range(BOARD_HEIGHT-1, desty, -1):
frame = unshared_copy(self.board)
for y in range(len(rotated)):
for x in range(len(rotated[y])):
if rotated[y][x]==1:
frame[n-y][destx+x] = piecech
squares = self.boardToLights(frame)
#self.emitter.emit('my response', squares, namespace='/test')
self.emitter(squares)
time.sleep(0.5)
# copy the rotated piece to its dest on the board
for y in range(len(rotated)):
for x in range(len(rotated[y])):
if rotated[y][x]==1:
self.board[desty-y][destx+x] = piecech
squares = self.boardToLights(self.board)
#self.emitter.emit('my response', squares, namespace='/test')
self.emitter(squares)
time.sleep(0.5)
# test for rows to remove - could be a bit more efficient: break out when u hit a blank row
row = BOARD_HEIGHT - 1
rowsremoved = 0
while (row >= 0 ):
allfull = True
for x in self.board[row]:
if (x == '.'):
allfull = False
break
if allfull:
self.board[row] = ['f1' for _ in range(BOARD_WIDTH)]
squares = self.boardToLights(self.board)
#self.emitter.emit('my response', squares, namespace='/test')
self.emitter(squares)
time.sleep(0.25)
self.board[row] = ['f2' for _ in range(BOARD_WIDTH)]
squares = self.boardToLights(self.board)
#self.emitter.emit('my response', squares, namespace='/test')
self.emitter(squares)
time.sleep(0.25)
del self.board[row]
self.board.append(['.' for _ in range(BOARD_WIDTH)])
rowsremoved += 1
else:
row -= 1
squares = self.boardToLights(self.board)
#self.emitter.emit('my response', squares, namespace='/test')
self.emitter(squares)
return True
def rotatePiece(self, piece, degrees):
if degrees == 0:
piecen = copy(piece)
for i in range(degrees / 90): #
piecen = []
# initialize piecen list to be the rotation of piece
for y in range(len(piece[0])):
piecen.append([0 for x in range(len(piece))])
for y in range(len(piece)):
for x in range(len(piece[y])):
piecen[x][len(piece)-1-y] = piece[y][x]
piece = copy(piecen)
return piecen
def boardToLights(self, board):
array_pos = -1;
squares = [{'r':0, 'g':0, 'b':0}] * BOARD_WIDTH * BOARD_HEIGHT
direction = -1
y = BOARD_HEIGHT - 1;
for x in range(BOARD_WIDTH):
for _ in range(BOARD_HEIGHT):
spot = board[y][x]
# lots of hacky hardcoding to get the light postions
if y > 7:
array_pos = (x * 8) + 15-y
else:
array_pos = (x * 8) + 15-(y+8) + 64
if (spot != '.'):
colors = { 'i' : {'r':0x00, 'g':0xe4, 'b':0xe4}, # '#00E4E4', // line piece
'o' : | |
in range(self.num_loc_pods)
]
)
# Store for hooks
self._hooks = {}
def add_functions(
self,
num_new_functions: int,
freeze_existing_parameters: bool = False,
init_strategy: str = "random",
):
def hook(grad):
grad.data[:-num_new_functions] = 0.0
return grad
def extend_tensor(
x: torch.Tensor, num_channels: int
) -> Tuple[torch.Tensor, Optional[RemovableHandle]]:
if init_strategy == "random":
extension = torch.randn(
num_new_functions, num_channels, device=x.device, dtype=x.dtype
)
elif init_strategy == "mirror":
assert num_new_functions <= x.shape[0]
extension_idxs = torch.randperm(x.shape[0], device=x.device)[
:num_new_functions
]
extension = x.data[extension_idxs]
else:
raise NotImplementedError
# Reparameterize
extended = nn.Parameter(
torch.cat([x.data, extension.data], dim=0), requires_grad=True
)
if freeze_existing_parameters:
# Don't want gradients flowing to the frozen parts of the tensor
hook_handle = extended.register_hook(hook)
else:
hook_handle = None
return extended, hook_handle
if self.consolidate_function_embeddings:
(
self.function_embeddings, # noqa
self._hooks["function_embeddings"],
) = extend_tensor(
self.function_embeddings, self.function_embedding_features
)
else:
(
self.function_signatures, # noqa
self._hooks["function_signatures"],
) = extend_tensor(self.function_signatures, self.type_features)
(
self.function_output_signatures, # noqa
self._hooks["function_output_signatures"],
) = extend_tensor(self.function_output_signatures, self.type_features)
self.function_codes, self._hooks["function_codes"] = extend_tensor( # noqa
self.function_codes, self.code_features
)
self.num_functions += num_new_functions
return self
def remove_functions(self, function_indices: List[int]):
new_function_indices = [
new_function_index
for new_function_index in range(self.num_functions)
if new_function_index not in function_indices
]
new_num_functions = len(new_function_indices)
new_function_indices = torch.tensor(
new_function_indices, device=self.function_signatures.device
)
assert not self.consolidate_function_embeddings, (
"Removing functions for consolidated function "
"embeddings is not implemented as of now."
)
def rewrap(p):
return torch.nn.Parameter(p.data[new_function_indices])
# Remove signatures
self.function_signatures = rewrap(self.function_signatures) # noqa
# Remove codes
self.function_codes = rewrap(self.function_codes) # noqa
# Remove output signatures
# noinspection PyAttributeOutsideInit
self.function_output_signatures = rewrap(self.function_output_signatures)
# Reduce the number of functions
self.num_functions = new_num_functions
return self
def release_hooks(self):
for key, hook in self._hooks.items():
if hook is not None:
hook.remove()
self._hooks.clear()
return self
def _initialize_function_embeddings(self):
if self.consolidate_function_embeddings:
# Consolidated, so we only need a single parameter vector per function
self.function_embeddings = nn.Parameter(
torch.randn(self.num_functions, self.function_embedding_features)
)
shared_deconsolidation_mlp_kwargs = dict(
capacity=self.deconsolidation_mlp_capacity,
num_layers=self.deconsolidation_mlp_depth,
trailing_activation=False,
activation=nn.GELU,
)
self.function_embeddings_to_signatures = make_mlp(
in_features=self.function_embedding_features,
out_features=self.type_features,
**shared_deconsolidation_mlp_kwargs,
)
self.function_embeddings_to_output_signatures = make_mlp(
in_features=self.function_embedding_features,
out_features=self.type_features,
**shared_deconsolidation_mlp_kwargs,
)
self.function_embeddings_to_codes = make_mlp(
in_features=self.function_embedding_features,
out_features=self.code_features,
**shared_deconsolidation_mlp_kwargs,
)
# Set the variables that are not needed to None
self.function_signatures = None
self.function_output_signatures = None
self.function_codes = None
else:
# Not consolidated, so we have a separate parameter for signature,
# code and output signature.
self.function_signatures = nn.Parameter(
torch.randn(self.num_functions, self.type_features)
)
self.function_output_signatures = nn.Parameter(
torch.randn(self.num_functions, self.type_features)
)
self.function_codes = nn.Parameter(
torch.randn(self.num_functions, self.code_features)
)
# Set the variables that are not needed to None
self.function_embeddings = None
self.function_embeddings_to_signatures = None
self.function_embeddings_to_output_signatures = None
self.function_embeddings_to_codes = None
def _initialize_temporary_variables(self):
if self.num_temporary_variables == 0:
# Nothing to do here
self.temporary_variables = None
self.function_embeddings_to_temporary_variables = None
return
if self.consolidate_function_embeddings:
# We're dealing with consolidated function embeddings, which means we'll
# need to roll a MLP
self.function_embeddings_to_temporary_variables = make_mlp(
in_features=self.function_embedding_features,
out_features=self.variable_features * self.num_temporary_variables,
capacity=self.deconsolidation_mlp_capacity,
num_layers=self.deconsolidation_mlp_depth,
trailing_activation=False,
activation=nn.GELU,
)
self.temporary_variables = None
else:
# We'll need to instantiate freely floating parameters
self.temporary_variables = nn.Parameter(
torch.randn(
self.num_functions,
self.num_temporary_variables,
self.variable_features,
)
)
self.function_embeddings_to_temporary_variables = None
def fetch_function_variables(self,) -> Iterable[torch.Tensor]:
if self.consolidate_function_embeddings:
if self.detach_function_embeddings:
function_embeddings = self.function_embeddings.detach()
else:
function_embeddings = self.function_embeddings
function_signatures = self.function_embeddings_to_signatures(
function_embeddings
)
function_output_signatures = self.function_embeddings_to_output_signatures(
function_embeddings
)
function_codes = self.function_embeddings_to_codes(function_embeddings)
else:
function_signatures = self.function_signatures # noqa
function_output_signatures = self.function_output_signatures # noqa
function_codes = self.function_codes
if self.detach_function_signatures:
function_signatures = function_signatures.detach()
if self.detach_function_output_signatures:
function_output_signatures = function_output_signatures.detach()
if self.detach_function_codes:
function_codes = function_codes.detach()
return function_signatures, function_output_signatures, function_codes
def fetch_temporary_variables(self, batch_size: int) -> torch.Tensor:
assert self.num_temporary_variables > 0
if self.consolidate_function_embeddings:
# temporary_variables.shape = u(vc)
temporary_variables = self.function_embeddings_to_temporary_variables(
self.function_embeddings
)
temporary_variables = eo.repeat(
temporary_variables,
"u (v c) -> b u v c",
b=batch_size,
v=self.num_temporary_variables,
)
else:
temporary_variables = eo.repeat(
self.temporary_variables, "u v c -> b u v c", b=batch_size
)
# temporary_variables.shape = b u v c
return temporary_variables
def append_temporary_variables(
self, variables: torch.Tensor, function_variable_affinities: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
# variables.shape = BUVC
# function_variable_affinities.shape = BUV
if self.num_temporary_variables == 0:
# Nothing to do here, this function is a no-op.
return variables, function_variable_affinities
B, U, V = function_variable_affinities.shape
assert variables.dim() == 4, (
f"Expecting a BUVC tensor, got one of " f"shape {variables.shape}."
)
# First, fetch the temporary variables
# temporary_variables.shape = BUVC
temporary_variables = self.fetch_temporary_variables(
batch_size=variables.shape[0]
)
assert temporary_variables.shape[-2] == self.num_temporary_variables
# Next, append it to the variables
variables_with_temporary_variables = torch.cat(
[temporary_variables, variables], dim=-2
)
# Finally, we need to create a new FVA
fva_with_temporary_variables = torch.cat(
[
torch.ones(
(B, U, self.num_temporary_variables),
dtype=function_variable_affinities.dtype,
device=function_variable_affinities.device,
),
function_variable_affinities,
],
dim=-1,
)
return variables_with_temporary_variables, fva_with_temporary_variables
def forward(
self,
variables: torch.Tensor,
positional_encoding: Optional[PositionalEncoding] = None,
function_dropout: Optional[FunctionDropout] = None,
):
# variables.shape = BVC
# The first step is to infer the types of the variables.
# shape = BVC
variable_types: QuantizedTensorContainer = self.type_inference(
variables, quantize=self.quantize_variable_types
)
# Read out the function signatures, codes and output signatures
(
function_signatures,
function_output_signatures,
function_codes,
) = self.fetch_function_variables()
# Optionally, quantize the signature of the function if required
if self.quantize_function_signature:
# quantized.shape = UC
function_signatures: QuantizedTensorContainer = self.quantizer(
function_signatures
)
function_output_signatures: QuantizedTensorContainer = self.quantizer(
function_output_signatures
)
else:
# quantized.shape = UC
function_signatures = QuantizedTensorContainer(
quantized=function_signatures, # noqa
commitment_loss=None,
codebook=None,
input=function_signatures, # noqa
)
function_output_signatures = QuantizedTensorContainer(
quantized=function_output_signatures, # noqa
commitment_loss=None,
codebook=None,
input=function_output_signatures, # noqa
)
# Next, we compute the affinity between functions and variables with the kernel
# shape = BUV
function_variable_affinities: torch.Tensor = self.kernel(
function_signatures.quantized, variable_types.quantized
)
# Normalize along functions if required
if self.normalize_function_variable_affinities:
function_variable_affinities = function_variable_affinities / (
function_variable_affinities.sum(1, keepdim=True) + self.epsilon
)
# Evaluate LOCs. The LOCPod expects variables to have the shape BUVC, so we'll
# have to reshape from BVC
input_variables = variables
variables = eo.repeat(variables, "b v c -> b u v c", u=self.num_functions)
loc_pod_outputs = []
for loc in self.loc_pods:
loc_pod_output = loc(
variables=variables,
codes=function_codes,
function_variable_affinities=function_variable_affinities,
positional_encoding=positional_encoding,
)
variables = loc_pod_output.output
loc_pod_outputs.append(loc_pod_output)
# The last step is to reduce BUVC to BVC. To do so, consider that the BUVC
# measures the contribution of function U on variable V. If a variable is
# therefore not compatible with a function, we expect it to be eliminated
# when it's weighted with the kernel.
output = torch.einsum(
"buv,buvc->bvc",
function_variable_affinities,
(function_dropout if function_dropout is not None else (lambda x: x))(
variables
),
)
if not self.no_residual:
if self.residual_mode == "gating":
# For this gating, we let the variables through in proportion to which
# it was required and modified by all functions. If a variable was not
# required by any function, the gate is zero and we let the input variable
# pass unaltered.
# affinity_gate.shape = BV1
affinity_gate = eo.reduce(
function_variable_affinities, "b u v -> b v ()", reduction="mean"
)
output = ((1 - affinity_gate) * input_variables) + (
affinity_gate * output
)
elif self.residual_mode == "vanilla":
output = input_variables + output
else:
raise NotImplementedError
# Evaluate the type of output coming out from the function
output_types: QuantizedTensorContainer = self.type_inference(
variables, quantize=self.quantize_variable_types
)
# Get the last output and fix up the
return FunctionPodOutput(
input=input_variables,
output=output,
loc_pod_outputs=loc_pod_outputs,
variable_types=variable_types,
function_signatures=function_signatures,
function_output_signatures=function_output_signatures,
function_variable_affinities=function_variable_affinities,
output_types=output_types,
)
class Script(nn.Module):
"""
A Script a wrapper around FunctionPod that applies the latter recursively.
This is like the main() method of a script that uses the functions as defined
in the script.
"""
def __init__(
self,
*,
variable_features: int,
num_iterations: int,
no_residual: bool = True,
# Type inference
type_features: int,
num_types: int,
type_inference_kwargs: Optional[dict] = None,
# Function pod
function_pod_kwargs: dict,
# Function dropout
function_dropout_kwargs: Optional[dict] = None,
):
super(Script, self).__init__()
# Meta
self.variable_features = variable_features
self.num_iterations = num_iterations
self.no_residual = no_residual
# Make the type inference engine, which is shared between all variables
self.type_inference = TypeInference(
variable_features=variable_features,
type_features=type_features,
num_types=num_types,
**(type_inference_kwargs or {}),
)
# Make the function pod
self.function_pod = FunctionPod(
type_inference=self.type_inference, **function_pod_kwargs
)
# Make the function dropout. This is a no-op by default
self.function_dropout = FunctionDropout(**(function_dropout_kwargs or {}))
def add_functions(self, *args, **kwargs):
self.function_pod.add_functions(*args, **kwargs)
return self
def forward(
self,
variables: torch.Tensor,
positional_encoding: Optional[PositionalEncoding] = None,
):
# variables.shape = BVC
input_variables = variables
function_pod_outputs = []
self.function_dropout.reset()
for iter_num in range(self.num_iterations):
function_pod_output: FunctionPodOutput = self.function_pod(
variables,
positional_encoding=positional_encoding,
function_dropout=self.function_dropout,
)
variables = function_pod_output.output
function_pod_outputs.append(function_pod_output)
| |
such as required and readonly status,
required type, and so on.
This implementation uses a field list for this.
Subclasses may override or extend.
.. versionadded:: 4.6.0
"""
lines = []
lines.append(_DocStringHelpers.make_class_field('Implementation', type(self)))
lines.append(_DocStringHelpers.make_field("Read Only", self.readonly))
lines.append(_DocStringHelpers.make_field("Required", self.required))
if self.defaultFactory:
lines.append(_DocStringHelpers.make_field("Default Factory", repr(self.defaultFactory)))
else:
lines.append(_DocStringHelpers.make_field("Default Value", repr(self.default)))
if self._type:
lines.append(_DocStringHelpers.make_class_field("Allowed Type", self._type))
# key_type and value_type are commonly used, but don't
# have a common superclass to add them, so we do it here.
# Using a rubric produces decent formatting
for name, rubric in (('key_type', 'Key Type'),
('value_type', 'Value Type')):
field = getattr(self, name, None)
if hasattr(field, 'getDoc'):
lines.append("")
lines.append(".. rubric:: " + rubric)
lines.append("")
lines.append(field.getDoc())
return lines
def getDoc(self):
doc = super(Field, self).getDoc()
lines = _DocStringHelpers.docstring_to_lines(doc)
lines += self.getExtraDocLines()
lines.append('')
return '\n'.join(lines)
class Container(Field):
def _validate(self, value):
super(Container, self)._validate(value)
if not hasattr(value, '__contains__'):
try:
iter(value)
except TypeError:
raise NotAContainer(value).with_field_and_value(self, value)
# XXX This class violates the Liskov Substituability Principle: it
# is derived from Container, but cannot be used everywhere an instance
# of Container could be, because it's '_validate' is more restrictive.
class Iterable(Container):
def _validate(self, value):
super(Iterable, self)._validate(value)
# See if we can get an iterator for it
try:
iter(value)
except TypeError:
raise NotAnIterator(value).with_field_and_value(self, value)
class Orderable(object):
"""Values of ordered fields can be sorted.
They can be restricted to a range of values.
Orderable is a mixin used in combination with Field.
"""
min = ValidatedProperty('min', allow_none=True)
max = ValidatedProperty('max', allow_none=True)
def __init__(self, min=None, max=None, default=None, **kw):
# Set min and max to None so that we can validate if
# one of the super methods invoke validation.
self.min = None
self.max = None
super(Orderable, self).__init__(**kw)
# Now really set min and max
self.min = min
self.max = max
# We've taken over setting default so it can be limited by min
# and max.
self.default = default
def _validate(self, value):
super(Orderable, self)._validate(value)
if self.min is not None and value < self.min:
raise TooSmall(value, self.min).with_field_and_value(self, value)
if self.max is not None and value > self.max:
raise TooBig(value, self.max).with_field_and_value(self, value)
class MinMaxLen(object):
"""Expresses constraints on the length of a field.
MinMaxLen is a mixin used in combination with Field.
"""
min_length = 0
max_length = None
def __init__(self, min_length=0, max_length=None, **kw):
self.min_length = min_length
self.max_length = max_length
super(MinMaxLen, self).__init__(**kw)
def _validate(self, value):
super(MinMaxLen, self)._validate(value)
if self.min_length is not None and len(value) < self.min_length:
raise TooShort(value, self.min_length).with_field_and_value(self, value)
if self.max_length is not None and len(value) > self.max_length:
raise TooLong(value, self.max_length).with_field_and_value(self, value)
@implementer(IFromUnicode)
class Text(MinMaxLen, Field):
"""A field containing text used for human discourse."""
_type = text_type
def __init__(self, *args, **kw):
super(Text, self).__init__(*args, **kw)
def fromUnicode(self, str):
"""
>>> from zope.schema.interfaces import WrongType
>>> from zope.schema.interfaces import ConstraintNotSatisfied
>>> from zope.schema import Text
>>> from zope.schema._compat import text_type
>>> t = Text(constraint=lambda v: 'x' in v)
>>> t.fromUnicode(b"foo x spam") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
zope.schema._bootstrapinterfaces.WrongType: ('foo x spam', <type 'unicode'>, '')
>>> result = t.fromUnicode(u"foo x spam")
>>> isinstance(result, bytes)
False
>>> str(result)
'foo x spam'
>>> t.fromUnicode(u"foo spam") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
zope.schema._bootstrapinterfaces.ConstraintNotSatisfied: (u'foo spam', '')
"""
self.validate(str)
return str
class TextLine(Text):
"""A text field with no newlines."""
def constraint(self, value):
return '\n' not in value and '\r' not in value
class Password(TextLine):
"""A text field containing a text used as a password."""
UNCHANGED_PASSWORD = object()
def set(self, context, value):
"""Update the password.
We use a special marker value that a widget can use
to tell us that the password didn't change. This is
needed to support edit forms that don't display the
existing password and want to work together with
encryption.
"""
if value is self.UNCHANGED_PASSWORD:
return
super(Password, self).set(context, value)
def validate(self, value):
try:
existing = bool(self.get(self.context))
except AttributeError:
existing = False
if value is self.UNCHANGED_PASSWORD and existing:
# Allow the UNCHANGED_PASSWORD value, if a password is set already
return
return super(Password, self).validate(value)
@implementer(IFromUnicode, IFromBytes)
class Bool(Field):
"""
A field representing a Bool.
.. versionchanged:: 4.8.0
Implement :class:`zope.schema.interfaces.IFromBytes`
"""
_type = bool
def _validate(self, value):
# Convert integers to bools to they don't get mis-flagged
# by the type check later.
if isinstance(value, int):
value = bool(value)
Field._validate(self, value)
def set(self, object, value):
if isinstance(value, int):
value = bool(value)
Field.set(self, object, value)
def fromUnicode(self, value):
"""
>>> from zope.schema._bootstrapfields import Bool
>>> from zope.schema.interfaces import IFromUnicode
>>> b = Bool()
>>> IFromUnicode.providedBy(b)
True
>>> b.fromUnicode('True')
True
>>> b.fromUnicode('')
False
>>> b.fromUnicode('true')
True
>>> b.fromUnicode('false') or b.fromUnicode('False')
False
>>> b.fromUnicode(u'\u2603')
False
"""
# On Python 2, we're relying on the implicit decoding
# that happens during string comparisons of unicode to native
# (byte) strings; decoding errors are silently dropped
v = value == 'True' or value == 'true'
self.validate(v)
return v
def fromBytes(self, value):
"""
>>> from zope.schema._bootstrapfields import Bool
>>> from zope.schema.interfaces import IFromBytes
>>> b = Bool()
>>> IFromBytes.providedBy(b)
True
>>> b.fromBytes(b'True')
True
>>> b.fromBytes(b'')
False
>>> b.fromBytes(b'true')
True
>>> b.fromBytes(b'false') or b.fromBytes(b'False')
False
>>> b.fromBytes(u'\u2603'.encode('utf-8'))
False
"""
return self.fromUnicode(value.decode("utf-8"))
class InvalidNumberLiteral(ValueError, ValidationError):
"""Invalid number literal."""
@implementer(IFromUnicode, IFromBytes)
class Number(Orderable, Field):
"""
A field representing a :class:`numbers.Number` and implementing
:class:`zope.schema.interfaces.INumber`.
The :meth:`fromUnicode` method will attempt to use the smallest or
strictest possible type to represent incoming strings::
>>> from zope.schema._bootstrapfields import Number
>>> f = Number()
>>> f.fromUnicode(u"1")
1
>>> f.fromUnicode(u"125.6")
125.6
>>> f.fromUnicode(u"1+0j")
(1+0j)
>>> f.fromUnicode(u"1/2")
Fraction(1, 2)
>>> f.fromUnicode(str(2**31234) + '.' + str(2**256)) # doctest: +ELLIPSIS
Decimal('234...936')
>>> f.fromUnicode(u"not a number") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidNumberLiteral: Invalid literal for Decimal: 'not a number'
Similarly, :meth:`fromBytes` will do the same for incoming byte strings::
>>> from zope.schema._bootstrapfields import Number
>>> f = Number()
>>> f.fromBytes(b"1")
1
>>> f.fromBytes(b"125.6")
125.6
>>> f.fromBytes(b"1+0j")
(1+0j)
>>> f.fromBytes(b"1/2")
Fraction(1, 2)
>>> f.fromBytes((str(2**31234) + '.' + str(2**256)).encode('ascii')) # doctest: +ELLIPSIS
Decimal('234...936')
>>> f.fromBytes(b"not a number") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidNumberLiteral: Invalid literal for Decimal: 'not a number'
.. versionadded:: 4.6.0
.. versionchanged:: 4.8.0
Implement :class:`zope.schema.interfaces.IFromBytes`
"""
_type = numbers.Number
# An ordered sequence of conversion routines. These should accept
# a native string and produce an object that is an instance of `_type`, or raise
# a ValueError. The order should be most specific/strictest towards least
# restrictive (in other words, lowest in the numeric tower towards highest).
# We break this rule with fractions, though: a floating point number is
# more generally useful and expected than a fraction, so we attempt to parse
# as a float before a fraction.
_unicode_converters = (int, float, fractions.Fraction, complex, decimal.Decimal)
# The type of error we will raise if all conversions fail.
_validation_error = InvalidNumberLiteral
def fromUnicode(self, value):
last_exc = None
for converter in self._unicode_converters:
try:
val = converter(value)
if converter is float and isinf(val) and decimal.Decimal in self._unicode_converters:
# Pass this on to decimal, if we're allowed
val = decimal.Decimal(value)
except (ValueError, decimal.InvalidOperation) as e:
last_exc = e
else:
self.validate(val)
return val
try:
raise self._validation_error(*last_exc.args).with_field_and_value(self, value)
finally:
last_exc = None
# On Python 2, native strings are byte strings, which is
# what the converters expect, so we don't need to do any decoding.
if PY2: # pragma: no cover
fromBytes = fromUnicode
else:
def fromBytes(self, value):
return self.fromUnicode(value.decode('utf-8'))
class Complex(Number):
"""
A field representing a :class:`numbers.Complex` and implementing
:class:`zope.schema.interfaces.IComplex`.
The :meth:`fromUnicode` method is like that for :class:`Number`,
but doesn't allow Decimals::
>>> from zope.schema._bootstrapfields import Complex
>>> f = Complex()
>>> f.fromUnicode(u"1")
1
>>> f.fromUnicode(u"125.6")
125.6
>>> f.fromUnicode(u"1+0j")
(1+0j)
>>> f.fromUnicode(u"1/2")
Fraction(1, 2)
>>> f.fromUnicode(str(2**31234) + '.' + str(2**256)) # doctest: +ELLIPSIS
inf
>>> f.fromUnicode(u"not a number") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidNumberLiteral: Invalid literal for Decimal: 'not a number'
Similarly for :meth:`fromBytes`:
>>> from zope.schema._bootstrapfields import Complex
>>> f = Complex()
>>> f.fromBytes(b"1")
1
>>> f.fromBytes(b"125.6")
125.6
>>> f.fromBytes(b"1+0j")
(1+0j)
>>> f.fromBytes(b"1/2")
Fraction(1, 2)
>>> f.fromBytes((str(2**31234) + '.' + str(2**256)).encode('ascii')) # doctest: +ELLIPSIS
inf
>>> f.fromBytes(b"not a number") # doctest: | |
# East Asian ideograph
0x22434E: (0x6AFD, 0), # East Asian ideograph
0x2D3058: (0x4E9C, 0), # East Asian ideograph
0x29434F: (0x94DE, 0), # East Asian ideograph
0x6F562C: (0xC671, 0), # Korean hangul
0x224350: (0x6AFA, 0), # East Asian ideograph
0x347D24: (0x83C7, 0), # East Asian ideograph
0x6F552E: (0xC561, 0), # Korean hangul
0x2D5F3B: (0x96A0, 0), # East Asian ideograph
0x6F4963: (0xAD20, 0), # Korean hangul
0x6F5132: (0xBCA1, 0), # Korean hangul
0x224352: (0x6B01, 0), # East Asian ideograph
0x4B4A74: (
0x731C,
0,
), # East Asian ideograph (variant of 214A74 which maps to 731C)
0x234354: (0x927E, 0), # East Asian ideograph
0x6F5C47: (0xD45C, 0), # Korean hangul
0x274355: (0x4E66, 0), # East Asian ideograph
0x6F4964: (0xAD28, 0), # Korean hangul
0x334357: (0x6702, 0), # East Asian ideograph
0x223348: (0x6435, 0), # East Asian ideograph
0x224358: (0x6B03, 0), # East Asian ideograph
0x224359: (0x6AF8, 0), # East Asian ideograph
0x21605F: (0x98B6, 0), # East Asian ideograph
0x4D6047: (0x816D, 0), # East Asian ideograph
0x27435A: (0x4F1A, 0), # East Asian ideograph
0x23435B: (0x9291, 0), # East Asian ideograph
0x27603C: (0x987B, 0), # East Asian ideograph
0x6F4965: (0xAD29, 0), # Korean hangul
0x6F4F7E: (0xBA00, 0), # Korean hangul
0x23435D: (0x929B, 0), # East Asian ideograph
0x233748: (0x8D6C, 0), # East Asian ideograph
0x2D305B: (0x4EBE, 0), # East Asian ideograph
0x217573: (0x57D5, 0), # East Asian ideograph
0x6F562F: (0xC67C, 0), # Korean hangul
0x284B43: (0x8365, 0), # East Asian ideograph
0x22435F: (0x6B0D, 0), # East Asian ideograph
0x6F4B3B: (0xB048, 0), # Korean hangul
0x224360: (0x6B09, 0), # East Asian ideograph
0x355053: (0x98C8, 0), # East Asian ideograph
0x6F4966: (0xAD2D, 0), # Korean hangul
0x224361: (0x6B0E, 0), # East Asian ideograph
0x225563: (0x726E, 0), # East Asian ideograph
0x234362: (0x927F, 0), # East Asian ideograph
0x69243C: (0x305C, 0), # Hiragana letter ZE
0x6F5630: (0xC680, 0), # Korean hangul
0x234364: (0x92A3, 0), # East Asian ideograph
0x6F4967: (0xAD34, 0), # Korean hangul
0x275852: (0x8BCF, 0), # East Asian ideograph
0x214366: (0x6727, 0), # East Asian ideograph
0x224367: (0x6B11, 0), # East Asian ideograph
0x2D4E33: (0x78AA, 0), # East Asian ideograph
0x3F5631: (0x517F, 0), # East Asian ideograph
0x334369: (0x5932, 0), # East Asian ideograph
0x234857: (0x9464, 0), # East Asian ideograph
0x21436A: (0x672B, 0), # East Asian ideograph
0x293032: (0x7962, 0), # East Asian ideograph
0x6F4968: (0xAD38, 0), # Korean hangul
0x275853: (0x8BC5, 0), # East Asian ideograph
0x33496A: (0x934A, 0), # East Asian ideograph
0x22436D: (0x6B19, 0), # East Asian ideograph
0x4B5179: (0x7F0B, 0), # East Asian ideograph
0x6F5632: (0xC68B, 0), # Korean hangul
0x23436F: (0x92D0, 0), # East Asian ideograph
0x6F4969: (0xAD3C, 0), # Korean hangul
0x2D4370: (0x6736, 0), # East Asian ideograph
0x215167: (0x7E46, 0), # East Asian ideograph
0x234371: (0x92F1, 0), # East Asian ideograph
0x234372: (0x92DF, 0), # East Asian ideograph
0x275528: (0x835A, 0), # East Asian ideograph
0x215E31: (0x93C8, 0), # East Asian ideograph
0x6F496A: (0xAD44, 0), # Korean hangul
0x214375: (0x6750, 0), # East Asian ideograph
0x215168: (0x7E37, 0), # East Asian ideograph
0x6F572B: (0xC7BC, 0), # Korean hangul
0x234376: (0x92B6, 0), # East Asian ideograph
0x4B4638: (0x6BB1, 0), # East Asian ideograph
0x234377: (0x92C0, 0), # East Asian ideograph
0x6F5634: (0xC694, 0), # Korean hangul
0x6F4D2B: (0xB35B, 0), # Korean hangul
0x6F4B3C: (0xB04A, 0), # Korean hangul
0x234379: (0x92BE, 0), # East Asian ideograph
0x6F572E: (0xC7C0, 0), # Korean hangul
0x2D3061: (0x4EB0, 0), # East Asian ideograph
0x6F5A78: (0xD0D4, 0), # Korean hangul
0x6F5635: (0xC695, 0), # Korean hangul
0x29437D: (0x94D8, 0), # East Asian ideograph
0x696E28: (0x9056, 0), # East Asian ideograph
0x4B5746: (0x8853, 0), # East Asian ideograph
0x23437E: (0x92D5, 0), # East Asian ideograph
0x2D3D2B: (0x5EBF, 0), # East Asian ideograph
0x6F4A62: (0xAED1, 0), # Korean hangul
0x276E2A: (0x53A3, 0), # East Asian ideograph
0x2D527B: (0x8074, 0), # East Asian ideograph
0x6F5A79: (0xD0D5, 0), # Korean hangul
0x282D74: (0x6004, 0), # East Asian ideograph
0x6F5636: (0xC698, 0), # Korean hangul
0x23485C: (0x9465, 0), # East Asian ideograph
0x226233: (0x774A, 0), # East Asian ideograph
0x6F496D: (0xAD50, 0), # Korean hangul
0x6F5C49: (0xD478, 0), # Korean hangul
0x6F5840: (0xC9ED, 0), # Korean hangul
0x222E2F: (0x615C, 0), # East Asian ideograph
0x223351: (0x640A, 0), # East Asian ideograph
0x21317D: (0x501A, 0), # East Asian ideograph
0x6F5A7A: (0xD0DC, 0), # Korean hangul
0x2E7451: (0x7F58, 0), # East Asian ideograph
0x6F5637: (0xC6A5, 0), # Korean hangul
0x215E35: (0x93DD, 0), # East Asian ideograph
0x23485D: (0x9455, 0), # East Asian ideograph
0x6F4E31: (0xB5A1, 0), # Korean hangul
0x225E2C: (0x7590, 0), # East Asian ideograph
0x2D3D2D: (0x5396, 0), # East Asian ideograph
0x275859: (0x8BE5, 0), # East Asian ideograph
0x21516C: (0x7E2B, 0), # East Asian ideograph
0x225128: (0x70E0, 0), # East Asian ideograph
0x6F5A7B: (0xD0DD, 0), # Korean hangul
0x275529: (0x830E, 0), # East Asian ideograph
0x22445F: (0x6B6E, 0), # East Asian ideograph
0x33485E: (0x67D2, 0), # East Asian ideograph
0x226235: (0x7743, 0), # East Asian ideograph
0x2D4171: (0x62CA, 0), # East Asian ideograph
0x6F496F: (0xAD6D, 0), # Korean hangul
0x6F572C: (0xC7BD, 0), # Korean hangul
0x27493A: (0x6FD1, 0), # East Asian ideograph
0x23596B: (0x9C7A, 0), # East Asian ideograph
0x235B59: (0x9DA9, 0), # East Asian ideograph
0x27474E: (0x6CFE, 0), # East Asian ideograph
0x6F5639: (0xC6A9, 0), # Korean hangul
0x215E37: (0x93D8, 0), # East Asian ideograph
0x222E3D: (0x61A2, 0), # East Asian ideograph
0x2D3D2F: (0x539B, 0), # East Asian ideograph
0x6F5946: (0xCCD0, 0), # Korean hangul
0x345D6B: (0x756D, 0), # East Asian ideograph
0x285D6B: (0x7572, 0), # East Asian ideograph
0x222E40: (0x61A8, 0), # East Asian ideograph
0x6F563A: (0xC6B0, 0), # Korean hangul
0x4B3C2B: (0x67C3, 0), # East Asian ideograph (Version J extension)
0x6F4F37: (0xB840, 0), # Korean hangul
0x6F4971: (0xAD73, 0), # Korean hangul
0x6F4A63: (0xAED8, 0), # Korean hangul
0x22512B: (0x70D4, 0), # East Asian ideograph
0x23552A: (0x9AEF, 0), # East Asian ideograph
0x6F5A7E: (0xD0EC, 0), # Korean hangul
0x222E45: (0x6196, 0), # East Asian ideograph
0x6F563B: (0xC6B1, 0), # Korean hangul
0x6F4972: (0xAD74, 0), # Korean hangul
0x6F563C: (0xC6B4, 0), # Korean hangul
0x234862: (0x946A, 0), # East Asian ideograph
0x282E4C: (0x6126, 0), # East Asian ideograph
0x2D5F4B: (0x96D1, 0), # East Asian ideograph
0x6F4973: (0xAD75, 0), # Korean hangul
0x215171: (0x7E54, 0), # East Asian ideograph
0x6F563D: (0xC6B7, 0), # Korean hangul
0x215E3B: (0x93FD, 0), # East Asian ideograph
0x2D4176: (0x6483, 0), # East Asian ideograph
0x2D5F4C: (0x9DC4, 0), # East Asian ideograph
0x6F4974: (0xAD76, 0), # Korean hangul
0x6F5D32: (0xD613, 0), # Korean hangul
0x282E52: (0x6003, 0), # East Asian ideograph
0x27493B: (0x6CA5, 0), # East Asian ideograph
0x233941: (0x8DFD, 0), # East Asian ideograph
0x6F563E: (0xC6B8, 0), # Korean hangul
0x4B5773: (0x7ED4, 0), # East Asian ideograph
0x213C23: (0x5D22, 0), # East Asian ideograph
0x286E56: (0x7BA8, 0), # East Asian ideograph
0x2D3D34: (0x5EFE, 0), # East Asian ideograph
0x215173: (0x7E5E, 0), # East Asian ideograph
0x223359: (0x6407, 0), # East Asian ideograph
0x216E58: (0x535F, 0), # East Asian ideograph
0x707523: (0x9170, 0), # East Asian ideograph
0x6F4B77: (0xB119, 0), # Korean hangul
0x222E5A: (0x61CB, 0), # East Asian ideograph
0x213C24: (0x5D29, 0), # East Asian ideograph
0x33355C: (0x5449, 0), # East Asian ideograph
0x273648: (0x95EE, 0), # East Asian ideograph
0x282E5C: (0x603F, 0), # East Asian ideograph
0x27514C: (0x7EFF, 0), # East Asian ideograph
0x6F5579: (0xC635, 0), # Korean hangul
0x6F5640: (0xC6BA, 0), # Korean hangul
0x6F5951: (0xCD5C, 0), # Korean hangul
0x4B397B: (0x5A2F, 0), # East Asian ideograph
0x29402C: (0x90D0, 0), # East Asian ideograph
0x22623D: (0x7760, 0), # East Asian ideograph
0x6F4977: (0xAD7F, 0), # Korean hangul
0x6F5136: (0xBCB0, 0), # Korean hangul
0x216E61: (0x5414, 0), # East Asian ideograph
0x22335B: (0x643B, 0), # East Asian ideograph
0x6F4A2E: (0xADFC, 0), # Korean hangul
0x6F5641: (0xC6C0, 0), # Korean hangul
0x213C26: (0x5D19, 0), # East Asian ideograph
0x275863: (0x8BE1, 0), # East Asian ideograph
| |
<filename>code/twokenize.py
# -*- coding: utf-8 -*-
"""
Twokenize -- a tokenizer designed for Twitter text in English and some other European languages.
This tokenizer code has gone through a long history:
(1) <NAME> wrote original version in Python, http://github.com/brendano/tweetmotif
TweetMotif: Exploratory Search and Topic Summarization for Twitter.
<NAME>, <NAME>, and <NAME>.
ICWSM-2010 (demo track), http://brenocon.com/oconnor_krieger_ahn.icwsm2010.tweetmotif.pdf
(2a) <NAME> and <NAME> modified it for POS tagging for the CMU ARK Twitter POS Tagger
(2b) <NAME> and <NAME> ported it to Scala
(3) Brendan bugfixed the Scala port and merged with POS-specific changes
for the CMU ARK Twitter POS Tagger
(4) <NAME> ported it back to Java and added many improvements (2012-06)
Current home is http://github.com/brendano/ark-tweet-nlp and http://www.ark.cs.cmu.edu/TweetNLP
(5) Ported to Python by <NAME> <<EMAIL>>
https://github.com/myleott/ark-twokenize-py
LR:
- Added twokenize2() to break up words with \' - convenient for non-english
(e.g. italian) including the unicode apostrophe thing they typically use
u2019.
- Tokenize3
- Splits emojis
<NAME>, <EMAIL>, July 2015
"""
from __future__ import print_function
import re
def regex_or(*items):
return '(?:' + '|'.join(items) + ')'
# should add the missing ones to this list but i'm lazy
all_emoji = open('emoji-data.txt').readlines()
all_emoji = [x.strip() for x in all_emoji if not x.strip().startswith('#')]
all_emoji = [x.split(';')[0].strip() for x in all_emoji]
all_emoji_1 = [unichr(int(x, 16)) for x in all_emoji if len(x.split()) == 1]
all_emoji_2 = [x.split() for x in all_emoji if len(x.split()) == 2]
all_emoji_2 = [unichr(int(x[0].strip(), 16)) + unichr(int(x[1].strip(), 16))
for x in all_emoji_2]
all_emoji = all_emoji_1 + all_emoji_2
all_emoji = [re.escape(x) for x in all_emoji]
all_emoji_str = u'(' + ur'|'.join(all_emoji) + u')'
re_emoji = re.compile(all_emoji_str, re.UNICODE)
Contractions = re.compile(u"(?i)(\w+)(n['’′]t|['’′]ve|['’′]ll|['’′]d|['’′]re|['’′]s|['’′]m)$", re.UNICODE)
Whitespace = re.compile(u"[\s\u0020\u00a0\u1680\u180e\u202f\u205f\u3000\u2000-\u200a]+", re.UNICODE)
punctSeq = r"['\"“”‘’]+|[.?!,…]+|[:;]+" #'anthem'. => ' anthem ' .
entity = r"&(?:amp|lt|gt|quot);"
# don't it's a trap l'app - words separated by apostrophe
ApWords = re.compile(ur"(\w+)('|\u2019)(\w+)", re.UNICODE)
# Abbreviations
boundaryNotDot = regex_or("$", r"\s", r"[“\"?!,:;]", entity)
aa1 = r"(?:[A-Za-z]\.){2,}(?=" + boundaryNotDot + ")"
aa2 = r"[^A-Za-z](?:[A-Za-z]\.){1,}[A-Za-z](?=" + boundaryNotDot + ")"
standardAbbreviations = r"\b(?:[Mm]r|[Mm]rs|[Mm]s|[Dd]r|[Ss]r|[Jj]r|[Rr]ep|[Ss]en|[Ss]t)\."
arbitraryAbbrev = regex_or(aa1, aa2, standardAbbreviations)
separators = "(?:--+|―|—|~|–|=)"
decorations = u"(?:[♫♪]+|[★☆]+|[♥❤♡]+|[\u2639-\u263b]+|[\ue001-\uebbb]+)".encode('utf-8')
thingsThatSplitWords = r"[^\s\.,?\"]"
embeddedApostrophe = thingsThatSplitWords+r"+['’′]" + thingsThatSplitWords + "*"
# Emoticons
# myleott: in Python the (?iu) flags affect the whole expression
#normalEyes = "(?iu)[:=]" # 8 and x are eyes but cause problems
normalEyes = "[:=]" # 8 and x are eyes but cause problems
wink = "[;]"
noseArea = "(?:|-|[^a-zA-Z0-9 ])" # doesn't get :'-(
happyMouths = r"[D\)\]\}]+"
sadMouths = r"[\(\[\{]+"
tongue = "[pPd3]+"
otherMouths = r"(?:[oO]+|[/\\]+|[vV]+|[Ss]+|[|]+)" # remove forward slash if http://'s aren't cleaned
# mouth repetition examples:
# @aliciakeys Put it in a love song :-))
# @hellocalyclops =))=))=)) Oh well
# myleott: try to be as case insensitive as possible, but still not perfect, e.g., o.O fails
#bfLeft = u"(♥|0|o|°|v|\\$|t|x|;|\u0ca0|@|ʘ|•|・|◕|\\^|¬|\\*)".encode('utf-8')
bfLeft = u"(♥|0|[oO]|°|[vV]|\\$|[tT]|[xX]|;|\u0ca0|@|ʘ|•|・|◕|\\^|¬|\\*)".encode('utf-8')
bfCenter = r"(?:[\.]|[_-]+)"
bfRight = r"\2"
s3 = r"(?:--['\"])"
s4 = r"(?:<|<|>|>)[\._-]+(?:<|<|>|>)"
s5 = "(?:[.][_]+[.])"
# myleott: in Python the (?i) flag affects the whole expression
#basicface = "(?:(?i)" +bfLeft+bfCenter+bfRight+ ")|" +s3+ "|" +s4+ "|" + s5
basicface = "(?:" +bfLeft+bfCenter+bfRight+ ")|" +s3+ "|" +s4+ "|" + s5
eeLeft = r"[\\\ƪԄ\((<>;ヽ\-=~\*]+"
eeRight= u"[\\-=\\);'\u0022<>ʃ)//ノノ丿╯σっµ~\\*]+".encode('utf-8')
eeSymbol = r"[^A-Za-z0-9\s\(\)\*:=-]"
eastEmote = eeLeft + "(?:"+basicface+"|" +eeSymbol+")+" + eeRight
oOEmote = r"(?:[oO]" + bfCenter + r"[oO])"
emoticon = regex_or(
# Standard version :) :( :] :D :P
"(?:>|>)?" + regex_or(normalEyes, wink) + regex_or(noseArea,"[Oo]") + regex_or(tongue+r"(?=\W|$|RT|rt|Rt)", otherMouths+r"(?=\W|$|RT|rt|Rt)", sadMouths, happyMouths),
# reversed version (: D: use positive lookbehind to remove "(word):"
# because eyes on the right side is more ambiguous with the standard usage of : ;
regex_or("(?<=(?: ))", "(?<=(?:^))") + regex_or(sadMouths,happyMouths,otherMouths) + noseArea + regex_or(normalEyes, wink) + "(?:<|<)?",
#inspired by http://en.wikipedia.org/wiki/User:Scapler/emoticons#East_Asian_style
eastEmote.replace("2", "1", 1), basicface,
# iOS 'emoji' characters (some smileys, some symbols) [\ue001-\uebbb]
# TODO should try a big precompiled lexicon from Wikipedia, <NAME> told me (BTO) he does this
# myleott: o.O and O.o are two of the biggest sources of differences
# between this and the Java version. One little hack won't hurt...
oOEmote
)
Hearts = "(?:<+/?3+)+" #the other hearts are in decorations
Arrows = regex_or(r"(?:<*[-―—=]*>+|<+[-―—=]*>*)", u"[\u2190-\u21ff]+".encode('utf-8'))
# BTO 2011-06: restored Hashtag, AtMention protection (dropped in original scala port) because it fixes
# "hello (#hashtag)" ==> "hello (#hashtag )" WRONG
# "hello (#hashtag)" ==> "hello ( #hashtag )" RIGHT
# "hello (@person)" ==> "hello (@person )" WRONG
# "hello (@person)" ==> "hello ( @person )" RIGHT
# ... Some sort of weird interaction with edgepunct I guess, because edgepunct
# has poor content-symbol detection.
# This also gets #1 #40 which probably aren't hashtags .. but good as tokens.
# If you want good hashtag identification, use a different regex.
Hashtag = "#[a-zA-Z0-9_]+" #optional: lookbehind for \b
# I was worried this would conflict with at-mentions
# but seems ok in sample of 5800: 7 changes all email fixes
# http://www.regular-expressions.info/email.html
Bound = r"(?:\W|^|$)"
Email = regex_or("(?<=(?:\W))", "(?<=(?:^))") + r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,4}(?=" +Bound+")"
# We will be tokenizing using these regexps as delimiters
# Additionally, these things are "protected", meaning they shouldn't be further split themselves.
Protected = re.compile(
unicode(regex_or(
Hearts,
Email,
emoticon,
Arrows,
punctSeq,
arbitraryAbbrev,
separators,
decorations,
embeddedApostrophe,
Hashtag,
).decode('utf-8')), re.UNICODE)
# Edge punctuation
# Want: 'foo' => ' foo '
# While also: don't => don't
# the first is considered "edge punctuation".
# the second is word-internal punctuation -- don't want to mess with it.
# BTO (2011-06): the edgepunct system seems to be the #1 source of problems these days.
# I remember it causing lots of trouble in the past as well. Would be good to revisit or eliminate.
# Note the 'smart quotes' (http://en.wikipedia.org/wiki/Smart_quotes)
#edgePunctChars = r"'\"“”‘’«»{}\(\)\[\]\*&" #add \\p{So}? (symbols)
edgePunctChars = u"'\"“”‘’«»{}\\(\\)\\[\\]\\*&" #add \\p{So}? (symbols)
edgePunct = "[" + edgePunctChars + "]"
notEdgePunct = "[a-zA-Z0-9]" # content characters
offEdge = r"(^|$|:|;|\s|\.|,)" # colon here gets "(hello):" ==> "( hello ):"
EdgePunctLeft = re.compile(offEdge + "("+edgePunct+"+)("+notEdgePunct+")", re.UNICODE)
EdgePunctRight = re.compile("("+notEdgePunct+")("+edgePunct+"+)" + offEdge, re.UNICODE)
def splitEdgePunct(input):
input = EdgePunctLeft.sub(r"\1\2 \3", input)
input = EdgePunctRight.sub(r"\1 \2\3", input)
return input
# The main work of tokenizing a tweet.
def simpleTokenize(text):
# Do the no-brainers first
splitPunctText = splitEdgePunct(text)
textLength = len(splitPunctText)
# BTO: the logic here got quite convoluted via the Scala porting detour
# It would be good to switch back to a nice simple procedural style like in the Python version
# ... Scala is such a pain. Never again.
# Find the matches for subsequences that should be protected,
# e.g. URLs, 1.0, U.N.K.L.E., 12:53
bads = []
badSpans = []
for match in Protected.finditer(splitPunctText):
# The spans of the "bads" should not be split.
if (match.start() != match.end()): #unnecessary?
bads.append( [splitPunctText[match.start():match.end()]] )
badSpans.append( (match.start(), match.end()) )
# Create a list of indices to create the "goods", which can be
# split. We are taking "bad" spans like
# List((2,5), (8,10))
# to create
# List(0, 2, 5, 8, 10, 12)
# where, e.g., "12" here would be the textLength
# has an even length and no indices are the same
indices = [0]
for (first, second) in badSpans:
indices.append(first)
indices.append(second)
indices.append(textLength)
# Group the indices and map them to their respective portion of the string
splitGoods = []
for i in range(0, len(indices), 2):
goodstr = splitPunctText[indices[i]:indices[i+1]]
splitstr = goodstr.strip().split() # whitespace tokenization
splitGoods.append(splitstr)
# Reinterpolate the 'good' and 'bad' Lists, ensuring that
# additonal tokens from last good item get included
zippedStr = []
for i in range(len(bads)):
zippedStr = addAllnonempty(zippedStr, splitGoods[i])
zippedStr = addAllnonempty(zippedStr, bads[i])
zippedStr = addAllnonempty(zippedStr, splitGoods[len(bads)])
# LR: Break appart hash use including hashtags
# splitStr = []
# for tok in zippedStr:
# splitStr.extend(splitHash(tok))
# zippedStr = splitStr
# fix emoji tokenization
splitStr = []
for tok in zippedStr:
splitStr.extend(re_emoji.split(tok))
zippedStr = splitStr
zippedStr = u' '.join(splitStr).split()
return zippedStr
# Final pass tokenization based on special patterns
def splitToken(token):
m = Contractions.search(token)
if m:
return [m.group(1), m.group(2)]
return [token]
# '#example' -> ['#', 'example']
# '$example' -> ['$', 'example']
def splitHash(token):
if token.startswith('#'):
return [token[0], token[1:]]
return [token]
def addAllnonempty(master, smaller):
for s in smaller:
strim = s.strip()
if (len(strim) > 0):
master.append(strim)
return master
# "foo bar " => "foo bar"
def squeezeWhitespace(input):
return Whitespace.sub(" ", input).strip()
def tokenize(text):
return simpleTokenize(squeezeWhitespace(text))
def tokenize2(text):
"""Breaks apostrophes:
l'ammore -> ["l'", "ammore"]
"""
tokens = simpleTokenize(squeezeWhitespace(text))
ntoks = []
for tok in tokens:
if '\'' in tok:
matching = ApWords.match(tok)
if matching is not None:
tok_list = list(matching.groups())
p1 = tok_list[0] + tok_list[1] # "l'"
ntoks.extend([p1])
ntoks.extend(tok_list[2:]) # "ammore"
else:
ntoks.extend([tok])
return ntoks
def tokenize3(text):
""" keeps contractions """
tokens = simpleTokenize(squeezeWhitespace(text))
# BTO: our POS tagger wants "ur" | |
"""Utility functions and base classes for implementing extension hooks."""
from __future__ import annotations
import types
from typing import Any, Callable, Dict, List
from torch.nn import Module, Parameter, Sequential
class ModuleHook:
"""Hook class to perform actions on parameters right after BackPACK's extension.
Hook has access to the parameter and its module. Use this hook if information from
a module needs to be stored inside a parameter.
To inherit from this class:
- Implement the ``module_hook`` function.
"""
def module_hook(self, param: Parameter, module: Module) -> Any:
"""Extract info from a parameter during backpropagation with BackPACK.
Args:
param: Parameter of a neural net.
module: Layer that `param` is part of.
Return:
Output which will be stored in ``param``'s attribute ``self.savefield``.
Raises:
NotImplementedError: Must be implemented by child classes.
"""
raise NotImplementedError
def __init__(self, savefield: str = None):
"""Store the attribute under which results are attached to parameters.
Args:
savefield: Attribute name under which results can be saved. ``None``
means that the hook has side effects, but no results will be
saved in parameters. Default value: ``None``.
"""
self.savefield = savefield
self.processed = set()
def __call__(self, module: Module):
"""Execute hook on all module parameters. Skip already processes parameters.
Args:
module: Hook is applied to all parameters in module.
"""
for param in module.parameters():
if self.should_run_hook(param, module):
self.run_hook(param, module)
def should_run_hook(self, param, module):
"""Check if hooks should be executed on a parameter.
Hooks are only executed once on every trainable parameter.
``Sequential``s are being skipped.
Args:
param (Tensor): Parameter of a neural net.
module (Module): Layer that `param` is part of.
Returns:
bool: Whether the hook should be executed on the parameter.
"""
if isinstance(module, Sequential):
return False
else:
return id(param) not in self.processed and param.requires_grad
def run_hook(self, param, module):
"""Execute the hook on parameter, add it to processed items and store result.
Args:
param (Parameter): Parameter to execute the hook on.
module (Module): Module that contains ``param``.
"""
value = self.module_hook(param, module)
self._save(value, param)
self.processed.add(id(param))
def _save(self, value, param):
"""Store value in parameter's ``savefield`` argument if necessary.
Args:
value (any): Arbitrary object that will be stored.
param (Parameter): Parameter the value is attached to.
Raises:
ValueError: If the hook produced an output, but the savefield is empty.
"""
should_save = self.savefield is not None
if value is not None and not should_save:
raise ValueError(
f"Hook has no savefield, but produced output of type {type(value)}."
)
if should_save:
setattr(param, self.savefield, value)
class ParameterHook(ModuleHook):
"""Hook class to perform actions on parameters right after BackPACK's extension.
Hook has access to the parameter.
To inherit from this class:
- Implement the ``param_hook`` function.
"""
def param_hook(self, param):
"""Extract info from a parameter during backpropagation with BackPACK.
Args:
param (Tensor): Parameter of a neural net.
Return:
Arbitrary output which will be stored in ``param``'s attribute
``self.savefield``.
Raises:
NotImplementedError: Must be implemented by child classes.
"""
raise NotImplementedError
def module_hook(self, param, module):
"""Extract info from a parameter during backpropagation with BackPACK.
Args:
param (Tensor): Parameter of a neural net.
module (Module): Layer that `param` is part of.
Returns:
any: Arbitrary output which will be stored in ``param``'s attribute
``self.savefield``.
"""
return self.param_hook(param)
class ParameterGroupsHook(ParameterHook):
"""Handle computations during backpropagation parameterized by parameter groups.
Computation results for parameters in the same group are accumulated, then further
processed once the entire group has undergone backpropagation.
To inherit from this class:
- Implement the ``param_computation`` function.
- Implement the ``group_hook`` function.
- Implement the ``accumulate`` function.
"""
def group_hook(self, accumulation: Any, group: Dict[str, Any]) -> Any:
"""Process accumulated results from parameter computations.
Args:
accumulation: Accumulated parameter computations from cache.
group: Parameter group of a ``torch.optim.Optimizer``.
Returns: # noqa: DAR202
Result that will be saved under the group id.
Raises:
NotImplementedError: Must be implemented by child classes.
"""
raise NotImplementedError
def param_computation(self, param: Parameter) -> Any:
"""Compute partial result of group computation for a parameter.
Args:
param: Parameter of a neural net.
Returns: # noqa: DAR202
Result of parameter computation that will be accumulated group-wise.
Raises:
NotImplementedError: Must be implemented by child classes.
"""
raise NotImplementedError
def accumulate(self, existing: Any, update: Any) -> Any:
"""Update the currently accumulated result with the update from a parameter.
Args:
existing: Cached accumulation for a group.
update: Result from parameter computation.
Returns: # noqa: DAR202
Updated result that will be written to cache.
Raises:
NotImplementedError: Must be implemented by child classes.
"""
raise NotImplementedError
def __init__(self, param_groups):
"""Store parameter groups. Set up mappings between groups and parameters.
Args:
param_groups (list): Parameter group list from a ``torch.optim.Optimizer``.
"""
super().__init__(None)
self._processed_groups = set()
self._check_param_groups(param_groups)
self._param_groups = param_groups
self._param_groups_ids = [id(group) for group in param_groups]
self._param_to_group = {
id(p): id(group) for group in param_groups for p in group["params"]
}
self._group_to_params = {
id(group): [id(p) for p in group["params"]] for group in param_groups
}
# accumulate parameter results for each group under ``id(group)``
self._accumulations = {}
# store group result under ``id(group)``
self._output = {}
def get_output(self, group, pop=True):
"""Return the computation result for a specific parameter group.
Args:
group (dict): Parameter group of a ``torch.optim.Optimizer``.
pop (bool, optional): Remove the result for that group from the
internal buffer. Default: ``True``.
Returns:
Any: Computation result for the parameter group.
Raises:
ValueError: If a parameter of the group was not processed. This indicates
the computation is incomplete, and thus the result may be wrong.
"""
if not all(id(p) in self.processed for p in group["params"]):
raise ValueError("Group contains unprocessed parameters.")
else:
group_id = id(group)
if pop:
return self._output.pop(group_id)
else:
return self._output[group_id]
def param_hook(self, param):
"""Perform parameter computation. Accumulate result in ``self._accumulations``.
Args:
param (Tensor): Parameter of a neural net.
"""
param_id = id(param)
group_id = self._param_to_group[param_id]
result = self.param_computation(param)
self._accumulate_param_computation(result, group_id)
if self.should_run_group_hook(param):
self.run_group_hook(group_id)
def run_group_hook(self, group_id):
"""Execute group hook after results from parameters have been accumulated.
Saves the result in ``self._output`` under the group id.
Args:
group_id (int): Parameter group id.
"""
accumulation = self._accumulations.pop(group_id)
group = self.get_group(group_id)
group_result = self.group_hook(accumulation, group)
self._output[group_id] = group_result
self._processed_groups.add(group_id)
def get_group(self, group_id):
"""Return the parameter group from its ID.
Args:
group_id (int): ID of parameter group.
Returns:
group (dict): Entry of a ``torch.optim.Optimizer``'s parameter group.
"""
idx = self._param_groups_ids.index(group_id)
return self._param_groups[idx]
def should_run_hook(self, param: Parameter, module: Module):
"""Check if hooks should be executed on a parameter.
In addition to the parent class conditions, only execute the hook on a
parameter that is contained in one of the parameter groups.
Args:
param: Parameter of a neural net.
module: Layer that `param` is part of.
Returns:
bool: Whether the hook should be executed on the parameter.
"""
param_in_groups = id(param) in self._param_to_group.keys()
return param_in_groups and super().should_run_hook(param, module)
def should_run_group_hook(self, param):
"""Check if hooks should be executed on the parameter's group.
The earliest possible for a group hook to be executed is when all other
parameters in ``param``'s group have already been processed.
Args:
param (Tensor): Parameter of a neural net.
Returns:
bool: Whether the group hook should be executed.
"""
param_id = id(param)
group_id = self._param_to_group[param_id]
group_param_ids = self._group_to_params[group_id]
other_param_ids = [p_id for p_id in group_param_ids if p_id != param_id]
last_missing = param_id not in self.processed and all(
p_id in self.processed for p_id in other_param_ids
)
return last_missing
def _accumulate_param_computation(self, result, group_id):
"""Accumulate output of parameter computation in the group cache.
Args:
result (Tensor): Result from parameter computation.
group_id (int): Parameter group id.
"""
if group_id not in self._accumulations.keys():
updated = result
else:
existing = self._accumulations[group_id]
updated = self.accumulate(existing, result)
self._accumulations[group_id] = updated
def _check_param_groups(self, param_groups):
"""Check parameter groups.
Args:
param_groups (list): Parameter group list from a ``torch.optim.Optimizer``.
Raises:
ValueError: If parameters occur in multiple groups.
"""
param_ids = [id(p) for group in param_groups for p in group["params"]]
if len(param_ids) != len(set(param_ids)):
raise ValueError("Same parameters occur in different groups")
@classmethod
def from_functions(
cls,
param_groups: List[Dict[str, Any]],
param_computation_fn: Callable[[ParameterGroupsHook, Parameter], Any],
group_hook_fn: Callable[[ParameterGroupsHook, Any, Dict[str, Any]], Any],
accumulate_fn: Callable[[ParameterGroupsHook, Any, Any], Any],
) -> ParameterGroupsHook:
"""Generate parameter | |
"""
GDB pretty printer support for BDE components
This module provides a set of pretty printers to load into gdb for debugging
code using BDE (http://github.com/bloomberg/bde) components.
This is a work in progress, more printers will be added as needed.
Authors: <NAME> <<EMAIL>>
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
Configuration options
---------------------
These settings configure some of the behavior of the pretty printer to
improve access to different elements in the underlying data.
(gdb) set print bslma-allocator on
Setting Meaning
--------------- -----------------------------------------------------
bslma-allocator Controls whether the bslma::Allocator* is printed
bsl-eclipse Controls output format, set to 'on' inside Eclipse
and leave as 'off' for plain gdb.
string-address Controls whether string buffer address is printed
Usage
-----
To use the pretty printers load the script into gdb, either manually
through:
(gdb) python execfile('/path/to/this/script.py')
or automatically at start up. See the gdb documentation on how to setup
automatic loading of pretty printers.
You can list, enable or disable pretty printers by using the gdb commands:
(gdb) info pretty-printer
(gdb) disable pretty-printer global BDE;vector
(gdb) enable pretty-printer global BDE
Additionally, you can ignore the pretty printer for a single 'print'
command by running it in "raw" mode:
(gdb) print /r container
"""
# General design considerations
# -----------------------------
# Pretty printers should focus on the useful information for the developer
# that is debugging the application. What is useful for one developer might
# be too verbose for another and not enough for a third one. Unless
# otherwise noted the provided pretty printers will reformat the available
# information but avoid hiding data from the developer. The implication is
# that the output might be slightly verbose (the 'bslma::Allocator' pointer
# in containers is printed always, the size and capacity for 'bsl::string' is
# printed...). Other existing pretty printers (for example for the standard
# library provided with gcc) will omit details and focus on the data.
#
# The format used for output has been considered for a high information
# density given that it will print more things than needed by most users.
# The description of the format for each one of the pretty printers is
# documented below and does not reflect the layout of the C++ types that are
# being printed.
import re
import string
import sys
import gdb
import gdb.printing
###############################################################################
# Private Types and Helpers
###############################################################################
global docs
docs = {}
global pp
pp = None
## Helpers controlling the printout based on printer options
def _createAllocatorList(cbase):
"""Create a list with the allocator information if 'print bslma-allocator'
is set or empty otherwise.
"""
printAllocator = gdb.parameter("print bslma-allocator")
return [] if not printAllocator else [("alloc", cbase)]
def _optionalAllocator(allocator, prefix=",", suffix=""):
printalloc = gdb.parameter("print bslma-allocator")
return "%salloc:%s%s" % (prefix, allocator, suffix) if printalloc else ""
def keyValueIterator(arg):
eclipseMode = gdb.parameter("print bsl-eclipse")
if eclipseMode:
return RawKeyValueIterator(arg)
else:
return KeyValueIterator(arg)
def valueIterator(arg):
eclipseMode = gdb.parameter("print bsl-eclipse")
if eclipseMode:
return RawValueIterator(arg)
else:
return ValueIterator(arg)
def stringAddress(arg):
char_ptr_type = gdb.lookup_type("unsigned char").pointer()
c_str = arg.cast(char_ptr_type)
return "0x%x " % c_str if gdb.parameter("print string-address") else ""
def stringRep(arg, length):
print_len = gdb.parameter("print elements")
if not print_len or print_len + 4 > length:
print_len = length
print_str = ""
char_ptr_type = gdb.lookup_type("unsigned char").pointer()
c_str = arg.cast(char_ptr_type)
for i in range(print_len):
ci = (c_str + i).dereference()
cc = chr(ci)
if cc in string.printable:
print_str += cc
else:
print_str += "\{0:03o}".format(int(ci))
if print_len < length:
print_str += "..."
return print_str
## Debug catch all pretty printer
class CatchAll:
"""Not a pretty printer
This type complies with the pretty printer interface, but will open an
interactive python shell with the information available to the printer for
debugging and testing purposes.
"""
def __init__(self, val):
"""Store the gdb value in this object and open an interactive shell"""
self.val = val
import code
code.interact(local=locals())
def to_string(self):
import code
code.interact(local=locals())
return "<------>"
class BslStringImp:
"""Pretty printer for 'bsl::String_Imp<char>'
The implementation of 'bsl::string' ('bsl::basic_string<>' template) uses a
base template 'bsl::String_Imp<>' to handle the actual contents. This
implementation uses a small string optimization with an internal buffer
that depends on the architecture. The pretty printer for this type will
print a compact representation of the available information, encoding in
the capitalization of the message whether the current object is using the
small buffer ('size') or the dynamically allocated large buffer ('Size').
The 'print string-address' parameter controls whether the address of the
string buffer is printed.
# With print string-address on
data = 0x0x8051074 [size:5,capacity:19] "short"
# With print string-address off
data = [Size:24,capacity:34] "This is a long string!!!"
The size of the internal buffer is detected at runtime.
Note that the pretty printer only handles narrow character strings,
'bsl::string', and not wide character strings 'bsl::wstring' or any other
specialization of the 'bsl::basic_string<>' template.
The current implementation in BDE will set the length value to
'bsl::string::npos' on destruction. The pretty printer detects this as a
special value and reports that the string has been destroyed. If the
capacity of the string indicates that it was using the small string
optimization it then attempts to print the contents of the buffer, if the
small string optimization is not in place, the pretty printer will attempt
to print the contents of 'd_start_p' (pointer to the string that *has
already been deallocated*). Note that this is a *best effort* with no
guarantees, the object has been destroyed, the value may have already been
reused.
If 'print elements' is set, the value will be used to limit the number of
characters printed, and the string will terminate with a "..." indicating
more characters are present. Non-printable characters are written out as a
backslash and three octal digits.
Note: This is not intended for direct use.
Note: The implementation is not able to print strings with a length
greater or equal to 2^31.
"""
def __init__(self, val):
"""Precalculate the data needed to later print the string"""
self.val = val
length = val["d_length"]
if str(length) == "4294967295":
self.destroyed = True
self.length = int(-1)
else:
self.destroyed = False
self.length = int(val["d_length"])
self.capacity = int(val["d_capacity"])
short = val["d_short"]
self.isShort = self.capacity < short.type.sizeof
self.buffer = (
short["d_data"]["d_buffer"] if self.isShort else val["d_start_p"]
)
def to_string(self):
"""Format the string"""
str = None
if not self.destroyed:
str = '%s[%s:%d,capacity:%d] "%s"' % (
stringAddress(self.buffer),
"size" if self.isShort else "Size",
self.length,
self.capacity,
stringRep(self.buffer, self.length),
)
else:
if self.isShort:
str = "[DESTROYED, small buffer value]: %s" % self.buffer
else:
str = "[DESTROYED] %s" % self.buffer
return str
class BslVectorImp:
"""Printer for 'bsl::vectorBase<T>' specializations.
This pretty printer handles printing instances of the
'bsl::vectorBase<>' template used to hold the contents of
'bsl::vector<>'. The printer will dump the size and capacity of the
object followed by the sequence of values in the sequence.
[size:10,capacity:16] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
Note: This is not intended for direct use
"""
def __init__(self, val):
self.val = val
self.begin = val["d_dataBegin_p"]
self.end = val["d_dataEnd_p"]
self.size = int(self.end - self.begin)
self.capacity = int(val["d_capacity"])
def to_string(self):
return "[size:%d,capacity:%d]" % (self.size, self.capacity)
def display_hint(self):
return "map"
def children(self):
class VectorContentsIterator:
"""Iterator over the contents of the vector"""
def __init__(s, begin, end):
self.begin = begin
self.end = end
self.current = begin
def __iter__(s):
return s
def __next__(s):
if self.current == self.end:
raise StopIteration
name = int(self.current - self.begin)
value = self.current.dereference()
self.current += 1
return (name, value)
next = __next__
return keyValueIterator(VectorContentsIterator(self.begin, self.end))
class BslRbTreeIterator:
"""Helper class to produce iterations over a RB-tree
This is **not** a pretty printer, but a helper class to aid in the
implementation of pretty printers for sorted associative containers using
RB-Trees as underlying data structure.
"""
def __init__(self, type, sentinel):
self.sentinel = sentinel
self.current = sentinel["d_right_p"]
self.nodeType = gdb.lookup_type(
"BloombergLP::bslstl::TreeNode<%s>" % type
)
def __iter__(self):
return self
def __next__(self):
if self.current == self.sentinel.address:
raise StopIteration
treeNode = self.current.dereference().cast(self.nodeType)
self.current = self.nextNode(self.current)
return treeNode["d_value"]
next = __next__
def followPointer(self, pointer, name):
"""Follow the pointer specified by 'name' in the specified 'object'.
This function implements the equivalent in C++ of:
return pointer->name & ~1
"""
np = pointer.dereference()[name]
npi = np.cast(gdb.lookup_type("long long"))
| |
<reponame>S-Manglik/gs-quant
# Copyright 2018 <NAME>.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
# Chart Service will attempt to make public functions (not prefixed with _) from this module available. Such functions
# should be fully documented: docstrings should describe parameters and the return value, and provide a 1-line
# description. Type annotations should be provided for parameters.
from .analysis import LagMode, lag
from .statistics import *
from ..errors import *
from typing import Union
import numpy as np
import pandas as pd
from gs_quant.api.gs.data import GsDataApi
from gs_quant.data import DataContext
from gs_quant.datetime.date import DayCountConvention
from gs_quant.markets.securities import Asset
from gs_quant.target.common import Currency
from gs_quant.timeseries.datetime import align
"""
Econometrics timeseries library is for standard economic and time series analytics operations, including returns,
diffs, lags, volatilities and other numerical operations which are generally finance-oriented
"""
class AnnualizationFactor(IntEnum):
DAILY = 252
WEEKLY = 52
SEMI_MONTHLY = 26
MONTHLY = 12
QUARTERLY = 4
ANNUALLY = 1
class SharpeAssets(Enum):
USD = 'MAP35DA6K5B1YXGX'
AUD = 'MAFRZWJ790MQY0EW'
CHF = 'MAS0NN4ZX7NYXB36'
EUR = 'MA95W0N1214395N8'
GBP = 'MA41ZEFTWR8Q7HBM'
JPY = 'MA8GXV3SJ0TXH1JV'
SEK = 'MAGNZZY0GJ4TATNG'
def excess_returns_pure(price_series: pd.Series, spot_curve: pd.Series) -> pd.Series:
curve, bench_curve = align(price_series, spot_curve, Interpolate.INTERSECT)
e_returns = [curve.iloc[0]]
for i in range(1, len(curve)):
multiplier = 1 + curve.iloc[i] / curve.iloc[i - 1] - bench_curve.iloc[i] / bench_curve.iloc[i - 1]
e_returns.append(e_returns[-1] * multiplier)
return pd.Series(e_returns, index=curve.index)
def excess_returns(price_series: pd.Series, benchmark_or_rate: Union[Asset, Currency, float], *,
day_count_convention=DayCountConvention.ACTUAL_360) -> pd.Series:
if isinstance(benchmark_or_rate, float):
er = [price_series.iloc[0]]
for j in range(1, len(price_series)):
fraction = day_count_fraction(price_series.index[j - 1], price_series.index[j], day_count_convention)
er.append(er[-1] + price_series.iloc[j] - price_series.iloc[j - 1] * (1 + benchmark_or_rate * fraction))
return pd.Series(er, index=price_series.index)
if isinstance(benchmark_or_rate, Currency):
try:
marquee_id = SharpeAssets[benchmark_or_rate.value].value
except KeyError:
raise MqValueError(f"unsupported currency {benchmark_or_rate}")
else:
marquee_id = benchmark_or_rate.get_marquee_id()
with DataContext(price_series.index[0], price_series.index[-1]):
q = GsDataApi.build_market_data_query([marquee_id], QueryType.SPOT)
df = GsDataApi.get_market_data(q)
if df.empty:
raise MqValueError(f'could not retrieve risk-free rate {marquee_id}')
df = df[~df.index.duplicated(keep='first')] # handle bad data (duplicate rows)
return excess_returns_pure(price_series, df['spot'])
def _annualized_return(levels: pd.Series, rolling: Union[int, pd.DateOffset],
interpolation_method: Interpolate = Interpolate.NAN) -> pd.Series:
if isinstance(rolling, pd.DateOffset):
starting = [tstamp - rolling for tstamp in levels.index]
levels = interpolate(levels, method=interpolation_method)
points = list(
map(lambda d, v, i: pow(v / levels.get(i, np.nan),
365.25 / (d - i).days) - 1,
levels.index[1:],
levels.values[1:], starting[1:]))
else:
if interpolation_method is not Interpolate.NAN:
raise MqValueError(f'If w is not a relative date, method must be nan. You specified method: '
f'{interpolation_method.value}.')
starting = [0] * rolling
starting.extend([a for a in range(1, len(levels) - rolling + 1)])
points = list(
map(lambda d, v, i: pow(v / levels[i], 365.25 / (d - levels.index[i]).days) - 1, levels.index[1:],
levels.values[1:], starting[1:]))
points.insert(0, 0)
return pd.Series(points, index=levels.index)
def get_ratio_pure(er: pd.Series, w: Union[Window, int, str],
interpolation_method: Interpolate = Interpolate.NAN) -> pd.Series:
w = normalize_window(er, w or None) # continue to support 0 as an input for window
ann_return = _annualized_return(er, w.w, interpolation_method=interpolation_method)
long_enough = (er.index[-1] - w.w) >= er.index[0] if isinstance(w.w, pd.DateOffset) else w.w < len(er)
ann_vol = volatility(er, w).iloc[1:] if long_enough else volatility(er)
result = ann_return / ann_vol * 100
return apply_ramp(result, w)
def _get_ratio(input_series: pd.Series, benchmark_or_rate: Union[Asset, float, str], w: Union[Window, int, str], *,
day_count_convention: DayCountConvention, curve_type: CurveType = CurveType.PRICES,
interpolation_method: Interpolate = Interpolate.NAN) -> pd.Series:
if curve_type == CurveType.PRICES:
er = excess_returns(input_series, benchmark_or_rate, day_count_convention=day_count_convention)
else:
assert curve_type == CurveType.EXCESS_RETURNS
er = input_series
return get_ratio_pure(er, w, interpolation_method)
class RiskFreeRateCurrency(Enum):
USD = "USD"
AUD = "AUD"
CHF = "CHF"
EUR = "EUR"
GBP = "GBP"
JPY = "JPY"
SEK = "SEK"
_USD = "usd"
_AUD = "aud"
_CHF = "chf"
_EUR = "eur"
_GBP = "gbp"
_JPY = "jpy"
_SEK = "sek"
@plot_session_function
def excess_returns_(price_series: pd.Series, currency: RiskFreeRateCurrency = RiskFreeRateCurrency.USD) -> pd.Series:
"""
Calculate excess returns
:param price_series: price series
:param currency: currency for risk-free rate, defaults to USD
:return: excess returns
**Usage**
Given a price series P and risk-free rate R, excess returns E are defined as:
:math:`E_t = E_{t-1} + P_t - P_{t-1} * (1 + R * (D_t - D_{t-1}) / 360)`
The `Actual/360 <https://en.wikipedia.org/wiki/Day_count_convention#Actual/360>`_ day count convention is used.
**Examples**
Get excess returns from a price series.
>>> er = excess_returns(generate_series(100), USD)
"""
return excess_returns(price_series, Currency(currency.value), day_count_convention=DayCountConvention.ACTUAL_360)
@plot_session_function
def sharpe_ratio(series: pd.Series, currency: RiskFreeRateCurrency = RiskFreeRateCurrency.USD,
w: Union[Window, int, str] = None, curve_type: CurveType = CurveType.PRICES,
method: Interpolate = Interpolate.NAN) -> pd.Series:
"""
Calculate Sharpe ratio
:param series: series of prices or excess returns for an asset
:param currency: currency for risk-free rate, defaults to USD
:param w: Window or int: size of window and ramp up to use. e.g. Window(22, 10) where 22 is the window size
and 10 the ramp up value. If w is a string, it should be a relative date like '1m', '1d', etc.
Window size defaults to length of series.
:param curve_type: whether input series is of prices or excess returns, defaults to prices
:param method: interpolation method (default: nan). Used to calculate returns on dates without data (i.e. weekends)
when window is a relative date. Defaults to no interpolation.
:return: Sharpe ratio
**Usage**
Given a price series P, risk-free rate R, and window of size w returns the rolling
`Sharpe ratio <https://en.wikipedia.org/wiki/Sharpe_ratio>`_ S:
:math:`S_t = \\frac{(E_t / E_{t-w+1})^{365.25 / (D_t - D_{t-w})}-1}{volatility(E, w)_t}`
Excess returns E are defined as:
:math:`E_t = E_{t-1} + P_t - P_{t-1} * (1 + R * (D_t - D_{t-1}) / 360)`
where D is the date for a data point. The
`Actual/360 <https://en.wikipedia.org/wiki/Day_count_convention#Actual/360>`_ day count convention is used.
**Examples**
Get rolling sharpe ratio of a price series (with window of 22).
>>> sr = sharpe_ratio(generate_series(365, END_TODAY), USD, 22, PRICES)
**See also**
:func:`volatility`
"""
return _get_ratio(series, Currency(currency.value), w, day_count_convention=DayCountConvention.ACTUAL_360,
curve_type=curve_type, interpolation_method=method)
@plot_function
def returns(series: pd.Series, obs: Union[Window, int, str] = 1, type: Returns = Returns.SIMPLE) -> pd.Series:
"""
Calculate returns from price series
:param series: time series of prices
:param obs: number of observations or relative date e.g. 3d, 1w, 1m
:param type: returns type: simple, logarithmic or absolute
:return: date-based time series of return
**Usage**
Compute returns series from price levels, based on the value of *type*:
=========== =============================
Type Description
=========== =============================
simple Simple arithmetic returns
logarithmic Logarithmic returns
absolute Absolute returns
=========== =============================
*Simple*
Simple geometric change in asset prices, which can be aggregated across assets
:math:`Y_t = \\frac{X_t}{X_{t-obs}} - 1`
where :math:`X_t` is the asset price at time :math:`t`
*Logarithmic*
Natural logarithm of asset price changes, which can be aggregated through time
:math:`Y_t = log(X_t) - log(X_{t-obs})`
where :math:`X_t` is the asset price at time :math:`t`
*Absolute*
Absolute change in asset prices
:math:`Y_t = X_t - X_{t-obs}`
where :math:`X_t` is the asset price at time :math:`t`
**Examples**
Generate price series and take compute returns
>>> prices = generate_series(100)
>>> returns = returns(prices)
**See also**
:func:`prices`
"""
if series.size < 1:
return series
shifted_series = lag(series, obs, LagMode.TRUNCATE)
if type == Returns.SIMPLE:
ret_series = series / shifted_series - 1
elif type == Returns.LOGARITHMIC:
ret_series = series.apply(math.log) - shifted_series.apply(math.log)
elif type == Returns.ABSOLUTE:
ret_series = series - shifted_series
else:
raise MqValueError('Unknown returns type (use simple / logarithmic / absolute)')
return ret_series
@plot_function
def prices(series: pd.Series, initial: int = 1, type: Returns = Returns.SIMPLE) -> pd.Series:
"""
Calculate price levels from returns series
:param series: time series of returns
:param initial: initial price level
:param type: returns type: simple, logarithmic or absolute
:return: date-based time series of return
**Usage**
Compute price levels from returns series, based on the value of *type*:
=========== =============================
Type Description
=========== =============================
simple Simple arithmetic returns
logarithmic Logarithmic returns
absolute Absolute returns
=========== =============================
*Simple*
Compute asset price series from simple returns:
:math:`Y_t = (1 + X_{t-1}) Y_{t-1}`
where :math:`X_t` is the asset price at time :math:`t` and :math:`Y_0 | |
disconnected_outputs
def connection_pattern(self, node):
patterns = [[True, True, True], # x
[True, True, True], # scale
[True, True, True]] # bias
# Optional running_mean and running_var are only
# connected to their new values.
for i in range(3, len(node.inputs)):
patterns[0].append(True)
for pattern in patterns[1:]:
pattern.append(False)
patterns.append([False] * (i) + [True])
return patterns
class GpuDnnBatchNormGrad(GpuDnnBatchNormBase):
"""
Op for the cuDNN BatchNormalizationBackward function.
See GpuDnnBatchNormBase for parameters.
On application, takes input, dy, scale, mean, invstd and produces
dinput, dscale and dbias. Note that it does not need the bias.
Note: scale, mean and invstd must follow the same tensor layout!
"""
tensor_descs = ['bn_input', 'bn_doutput', 'bn_dinput', 'bn_params']
def infer_shape(self, node, shape):
# first output equals shape of x
# second and third output equal shape of scale
return [shape[0], shape[2], shape[2]]
def make_node(self, x, dy, scale, x_mean, x_invstd):
x = as_cuda_ndarray_variable(x)
dy = as_cuda_ndarray_variable(dy)
scale = as_cuda_ndarray_variable(scale)
x_mean = as_cuda_ndarray_variable(x_mean)
x_invstd = as_cuda_ndarray_variable(x_invstd)
assert x.ndim == dy.ndim == scale.ndim == x_mean.ndim == x_invstd.ndim
assert x.ndim in (4, 5)
return Apply(self, [x, dy, scale, x_mean, x_invstd], [x.type(), scale.type(), scale.type()])
def c_code(self, node, name, inputs, outputs, sub):
# super call to prepare common configuration
result = super(GpuDnnBatchNormGrad, self).c_code(node, name, inputs, outputs, sub)
# give sensible names to inputs and outputs
inp, doutp, scale, x_mean, x_invstd = inputs
dinp, dscale, dbias = outputs
# call cuDNN function
result += """
// set input tensor descriptors from input tensors
if (c_set_tensorNd(%(inp)s, bn_input_%(name)s) != 0)
{
%(fail)s
}
if (c_set_tensorNd(%(doutp)s, bn_doutput_%(name)s) != 0)
{
%(fail)s
}
if (c_set_tensorNd(%(scale)s, bn_params_%(name)s) != 0)
{
%(fail)s
}
// build and prepare the output variables
if ((CudaNdarray_prep_output(&%(dinp)s, %(inp)s->nd, CudaNdarray_HOST_DIMS(%(inp)s)) != 0) ||
(CudaNdarray_prep_output(&%(dscale)s, %(inp)s->nd, CudaNdarray_HOST_DIMS(%(scale)s)) != 0) ||
(CudaNdarray_prep_output(&%(dbias)s, %(inp)s->nd, CudaNdarray_HOST_DIMS(%(scale)s)) != 0))
{
%(fail)s
}
// set output tensor descriptor from output tensor
if (c_set_tensorNd(%(dinp)s, bn_dinput_%(name)s) != 0)
{
%(fail)s
}
{
const float alphaData = 1.;
const float betaData = 0.;
const float alphaParam = 1.;
const float betaParam = 0.;
err%(name)s = cudnnBatchNormalizationBackward(
_handle,
mode%(name)s,
(void*) &alphaData,
(void*) &betaData,
(void*) &alphaParam,
(void*) &betaParam,
bn_input_%(name)s,
CudaNdarray_DEV_DATA(%(inp)s),
bn_doutput_%(name)s,
CudaNdarray_DEV_DATA(%(doutp)s),
bn_dinput_%(name)s,
CudaNdarray_DEV_DATA(%(dinp)s),
bn_params_%(name)s,
CudaNdarray_DEV_DATA(%(scale)s),
CudaNdarray_DEV_DATA(%(dscale)s),
CudaNdarray_DEV_DATA(%(dbias)s),
epsilon%(name)s,
CudaNdarray_DEV_DATA(%(x_mean)s),
CudaNdarray_DEV_DATA(%(x_invstd)s)
);
}
""" % dict(name=name, inp=inp, doutp=doutp, scale=scale, x_mean=x_mean,
x_invstd=x_invstd, dinp=dinp, dscale=dscale, dbias=dbias, fail=sub['fail'])
return result
def dnn_batch_normalization_train(inputs, gamma, beta, mode='per-activation',
epsilon=1e-4, running_average_factor=0.1,
running_mean=None, running_var=None):
"""
Performs batch normalization of the given inputs, using the mean and
variance of the inputs.
Parameters
----------
mode : {'per-activation', 'spatial'}
Whether to normalize per activation or share normalization factors
across spatial dimensions (i.e., all dimensions past the second).
gamma : tensor
Learnable scale factors. Must match the dimensionality of `inputs`,
but have sizes of `1` for all axes normalized over (i.e., in the first
dimension for ``mode='per-activation'`, and additionally in all
dimensions past the second for ``mode='spatial'``).
beta : tensor
Learnable biases. Must match the tensor layout of `gamma`.
epsilon : float
Epsilon value used in the batch normalization formula. Minimum allowed
value is 1e-5 (imposed by cuDNN).
running_average_factor : float
Factor for updating the values or `running_mean` and `running_var`.
If the factor is close to one, the running averages will update quickly,
if the factor is close to zero it will update slowly.
running_mean : tensor or None
Previous value of the running mean. If this is given, the new value
``running_mean * (1 - r_a_factor) + batch mean * r_a_factor``
will be returned as one of the outputs of this function.
`running_mean` and `running_var` should either both be given or
both be None.
running_var : tensor or None
Previous value of the running variance. If this is given, the new value
``running_var * (1 - r_a_factor) + (m / (m - 1)) * batch var * r_a_factor``
will be returned as one of the outputs of this function,
where `m` is the product of lengths of the averaged-over dimensions.
`running_mean` and `running_var` should either both be given or
both be None.
Returns
-------
out : tensor
Batch-normalized inputs.
mean : tensor
Means of `inputs` across the normalization axes.
invstd : tensor
Inverse standard deviations of `inputs` across the normalization axes.
new_running_mean : tensor
New value of the running mean (only if both `running_mean` and
`running_var` were given).
new_running_var : tensor
New value of the running variance (only if both `running_var` and
`running_mean` were given).
Notes
-----
Request cuDNN 5 and Theano 0.9dev2 or more recent.
For 4d tensors, returned values are equivalent to:
.. code-block:: python
axes = 0 if mode == 'per-activation' else (0, 2, 3)
mean = inputs.mean(axes, keepdims=True)
var = inputs.var(axes, keepdims=True)
invstd = T.inv(T.sqrt(var + epsilon))
out = (inputs - mean) * gamma * invstd + beta
m = T.cast(T.prod(inputs.shape) / T.prod(mean.shape), 'float32')
running_mean = running_mean * (1 - running_average_factor) + \\
mean * running_average_factor
running_var = running_var * (1 - running_average_factor) + \\
(m / (m - 1)) * var * running_average_factor
For 5d tensors, the axes are (0, 2, 3, 4).
"""
ndim = inputs.ndim
if gamma.ndim != ndim or beta.ndim != ndim:
raise ValueError("gamma and beta must be of the same dimensionality "
"as inputs; got %d and %d instead of %d" %
(gamma.ndim, beta.ndim, ndim))
if (running_mean is None) != (running_var is None):
raise ValueError("running_mean and running_var must either both be "
"given or both be None")
if running_mean is not None and running_mean.ndim != ndim:
raise ValueError("running_mean must be of the same dimensionality "
"as inputs; got %d instead of %d" %
(running_mean.ndim, ndim))
if running_var is not None and running_var.ndim != ndim:
raise ValueError("running_var must be of the same dimensionality "
"as inputs; got %d instead of %d" %
(running_var.ndim, ndim))
if epsilon < 1e-5:
raise ValueError("epsilon must be at least 1e-5, got %f" % epsilon)
running_averages = (running_var is not None and running_var is not None)
if ndim < 4:
inputs = theano.tensor.shape_padright(inputs, 4 - ndim)
gamma = theano.tensor.shape_padright(gamma, 4 - ndim)
beta = theano.tensor.shape_padright(beta, 4 - ndim)
if running_averages:
running_mean = theano.tensor.shape_padright(running_mean, 4 - ndim)
running_var = theano.tensor.shape_padright(running_var, 4 - ndim)
elif ndim > 5:
inputs_shape = inputs.shape
params_shape = gamma.shape
inputs = theano.tensor.flatten(inputs, 5)
gamma = theano.tensor.flatten(gamma, 5)
beta = theano.tensor.flatten(beta, 5)
if running_averages:
running_mean = theano.tensor.flatten(running_mean, 5)
running_var = theano.tensor.flatten(running_var, 5)
batchnorm_op = GpuDnnBatchNorm(mode=mode, epsilon=epsilon,
running_average_factor=running_average_factor,
running_averages=running_averages)
if running_averages:
out, mean, invstd, new_running_mean, new_running_var = batchnorm_op(
gpu_contiguous(inputs), gpu_contiguous(gamma),
gpu_contiguous(beta),
running_mean=gpu_contiguous(running_mean),
running_var=gpu_contiguous(running_var))
if new_running_mean.broadcastable != running_mean.broadcastable:
new_running_mean = tensor.patternbroadcast(new_running_mean, running_mean.broadcastable)
if new_running_var.broadcastable != running_var.broadcastable:
new_running_var = tensor.patternbroadcast(new_running_var, running_var.broadcastable)
result = (out, mean, invstd, new_running_mean, new_running_var)
else:
result = batchnorm_op(gpu_contiguous(inputs), gpu_contiguous(gamma),
gpu_contiguous(beta))
if ndim < 4:
result = tuple(theano.tensor.flatten(r, ndim) for r in result)
elif ndim > 5:
result = (theano.tensor.reshape(result[0], inputs_shape),) + tuple(
theano.tensor.reshape(r, params_shape) for r in result[1:])
return result
def dnn_batch_normalization_test(inputs, gamma, beta, mean, var,
mode='per-activation', epsilon=1e-4):
"""
Performs batch normalization of the given inputs, using the given mean and
variance.
Parameters
----------
mode : {'per-activation', 'spatial'}
Whether to normalize per activation or share normalization factors
across spatial dimensions (i.e., all dimensions past the second).
gamma : tensor
Scale factors. Must match the dimensionality of `inputs`, but have
sizes of `1` for all axes normalized over (i.e., in the first dimension
for ``mode='per-activation'`, and additionally in all dimensions past
the second for ``mode='spatial'``).
beta : tensor
Biases. Must match the tensor layout of `gamma`.
mean : tensor
Means. Usually these are running averages computed during training.
Must match the tensor layout of `gamma`.
var : tensor
Variances. Usually these are running averages computed during training.
Must match the tensor layout of `gamma`.
epsilon : float
Epsilon value used in the batch normalization formula. Minimum allowed
value is 1e-5 (imposed by cuDNN).
Returns
-------
out : tensor
Batch-normalized inputs.
Notes
-----
Request cuDNN 5 and Theano 0.9dev2 or more recent.
For 4d tensors, the returned value is equivalent to:
.. code-block:: python
axes = (0,) if mode == 'per-activation' else (0, 2, 3)
gamma, beta, mean, var = (T.addbroadcast(t, *axes)
for t in (gamma, beta, mean, var))
out = (inputs - mean) * gamma / T.sqrt(var + epsilon) + | |
<gh_stars>1-10
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This package provides interface to help building static computational graph
for PaddlePaddle.
"""
import warnings
import numpy as np
import paddle.fluid as fluid
from pgl.utils import op
from pgl.utils import paddle_helper
from pgl.utils.logger import log
__all__ = ["BaseGraphWrapper", "GraphWrapper", "StaticGraphWrapper"]
def send(src, dst, nfeat, efeat, message_func):
"""Send message from src to dst.
"""
src_feat = op.read_rows(nfeat, src)
dst_feat = op.read_rows(nfeat, dst)
msg = message_func(src_feat, dst_feat, efeat)
return msg
def recv(dst, uniq_dst, bucketing_index, msg, reduce_function, num_nodes,
num_edges):
"""Recv message from given msg to dst nodes.
"""
if reduce_function == "sum":
if isinstance(msg, dict):
raise TypeError("The message for build-in function"
" should be Tensor not dict.")
try:
out_dim = msg.shape[-1]
init_output = fluid.layers.fill_constant(
shape=[num_nodes, out_dim], value=0, dtype=msg.dtype)
init_output.stop_gradient = False
empty_msg_flag = fluid.layers.cast(num_edges > 0, dtype=msg.dtype)
msg = msg * empty_msg_flag
output = paddle_helper.scatter_add(init_output, dst, msg)
return output
except TypeError as e:
warnings.warn(
"scatter_add is not supported with paddle version <= 1.5")
def sum_func(message):
return fluid.layers.sequence_pool(message, "sum")
reduce_function = sum_func
bucketed_msg = op.nested_lod_reset(msg, bucketing_index)
output = reduce_function(bucketed_msg)
output_dim = output.shape[-1]
empty_msg_flag = fluid.layers.cast(num_edges > 0, dtype=output.dtype)
output = output * empty_msg_flag
init_output = fluid.layers.fill_constant(
shape=[num_nodes, output_dim], value=0, dtype=output.dtype)
init_output.stop_gradient = True
final_output = fluid.layers.scatter(init_output, uniq_dst, output)
return final_output
class BaseGraphWrapper(object):
"""This module implement base class for graph wrapper.
Currently our PGL is developed based on static computational mode of
paddle (we'll support dynamic computational model later). We need to build
model upon a virtual data holder. BaseGraphWrapper provide a virtual
graph structure that users can build deep learning models
based on this virtual graph. And then feed real graph data to run
the models. Moreover, we provide convenient message-passing interface
(send & recv) for building graph neural networks.
NOTICE: Don't use this BaseGraphWrapper directly. Use :code:`GraphWrapper`
and :code:`StaticGraphWrapper` to create graph wrapper instead.
"""
def __init__(self):
self.node_feat_tensor_dict = {}
self.edge_feat_tensor_dict = {}
self._edges_src = None
self._edges_dst = None
self._num_nodes = None
self._indegree = None
self._edge_uniq_dst = None
self._edge_uniq_dst_count = None
self._node_ids = None
self._graph_lod = None
self._num_graph = None
self._data_name_prefix = ""
def __repr__(self):
return self._data_name_prefix
def send(self, message_func, nfeat_list=None, efeat_list=None):
"""Send message from all src nodes to dst nodes.
The UDF message function should has the following format.
.. code-block:: python
def message_func(src_feat, dst_feat, edge_feat):
'''
Args:
src_feat: the node feat dict attached to the src nodes.
dst_feat: the node feat dict attached to the dst nodes.
edge_feat: the edge feat dict attached to the
corresponding (src, dst) edges.
Return:
It should return a tensor or a dictionary of tensor. And each tensor
should have a shape of (num_edges, dims).
'''
pass
Args:
message_func: UDF function.
nfeat_list: a list of names or tuple (name, tensor)
efeat_list: a list of names or tuple (name, tensor)
Return:
A dictionary of tensor representing the message. Each of the values
in the dictionary has a shape (num_edges, dim) which should be collected
by :code:`recv` function.
"""
if efeat_list is None:
efeat_list = {}
if nfeat_list is None:
nfeat_list = {}
src, dst = self.edges
nfeat = {}
for feat in nfeat_list:
if isinstance(feat, str):
nfeat[feat] = self.node_feat[feat]
else:
name, tensor = feat
nfeat[name] = tensor
efeat = {}
for feat in efeat_list:
if isinstance(feat, str):
efeat[feat] = self.edge_feat[feat]
else:
name, tensor = feat
efeat[name] = tensor
msg = send(src, dst, nfeat, efeat, message_func)
return msg
def recv(self, msg, reduce_function):
"""Recv message and aggregate the message by reduce_fucntion
The UDF reduce_function function should has the following format.
.. code-block:: python
def reduce_func(msg):
'''
Args:
msg: A LodTensor or a dictionary of LodTensor whose batch_size
is equals to the number of unique dst nodes.
Return:
It should return a tensor with shape (batch_size, out_dims). The
batch size should be the same as msg.
'''
pass
Args:
msg: A tensor or a dictionary of tensor created by send function..
reduce_function: UDF reduce function or strings "sum" as built-in function.
The built-in "sum" will use scatter_add to optimized the speed.
Return:
A tensor with shape (num_nodes, out_dims). The output for nodes with no message
will be zeros.
"""
output = recv(
dst=self._edges_dst,
uniq_dst=self._edge_uniq_dst,
bucketing_index=self._edge_uniq_dst_count,
msg=msg,
reduce_function=reduce_function,
num_edges=self._num_edges,
num_nodes=self._num_nodes)
return output
@property
def edges(self):
"""Return a tuple of edge Tensor (src, dst).
Return:
A tuple of Tensor (src, dst). Src and dst are both
tensor with shape (num_edges, ) and dtype int64.
"""
return self._edges_src, self._edges_dst
@property
def num_nodes(self):
"""Return a variable of number of nodes
Return:
A variable with shape (1,) as the number of nodes in int64.
"""
return self._num_nodes
@property
def graph_lod(self):
"""Return graph index for graphs
Return:
A variable with shape [None ] as the Lod information of multiple-graph.
"""
return self._graph_lod
@property
def num_graph(self):
"""Return a variable of number of graphs
Return:
A variable with shape (1,) as the number of Graphs in int64.
"""
return self._num_graph
@property
def edge_feat(self):
"""Return a dictionary of tensor representing edge features.
Return:
A dictionary whose keys are the feature names and the values
are feature tensor.
"""
return self.edge_feat_tensor_dict
@property
def node_feat(self):
"""Return a dictionary of tensor representing node features.
Return:
A dictionary whose keys are the feature names and the values
are feature tensor.
"""
return self.node_feat_tensor_dict
def indegree(self):
"""Return the indegree tensor for all nodes.
Return:
A tensor of shape (num_nodes, ) in int64.
"""
return self._indegree
class StaticGraphWrapper(BaseGraphWrapper):
"""Implement a graph wrapper that the data of the graph won't
be changed and it can be fit into the GPU or CPU memory. This
can reduce the time of swapping large data from GPU and CPU.
Args:
name: The graph data prefix
graph: The static graph that should be put into memory
place: fluid.CPUPlace or fluid.CUDAPlace(n) indicating the
device to hold the graph data.
Examples:
If we have a immutable graph and it can be fit into the GPU or CPU.
we can just use a :code:`StaticGraphWrapper` to pre-place the graph
data into devices.
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
from pgl.graph import Graph
from pgl.graph_wrapper import StaticGraphWrapper
place = fluid.CPUPlace()
exe = fluid.Excecutor(place)
num_nodes = 5
edges = [ (0, 1), (1, 2), (3, 4)]
feature = np.random.randn(5, 100)
edge_feature = np.random.randn(3, 100)
graph = Graph(num_nodes=num_nodes,
edges=edges,
node_feat={
"feature": feature
},
edge_feat={
"edge_feature": edge_feature
})
graph_wrapper = StaticGraphWrapper(name="graph",
graph=graph,
place=place)
# build your deep graph model
# Initialize parameters for deep graph model
exe.run(fluid.default_startup_program())
# Initialize graph data
graph_wrapper.initialize(place)
"""
def __init__(self, name, graph, place):
super(StaticGraphWrapper, self).__init__()
self._data_name_prefix = name
self._initializers = []
self.__create_graph_attr(graph)
def __create_graph_attr(self, graph):
"""Create graph attributes for paddlepaddle.
"""
src, dst, eid = graph.sorted_edges(sort_by="dst")
indegree = graph.indegree()
nodes = graph.nodes
uniq_dst = nodes[indegree > 0]
uniq_dst_count = indegree[indegree > 0]
uniq_dst_count = np.cumsum(uniq_dst_count, dtype='int32')
uniq_dst_count = np.insert(uniq_dst_count, 0, 0)
graph_lod = graph.graph_lod
num_graph = graph.num_graph
num_edges = len(src)
if num_edges == 0:
# Fake Graph
src = np.array([0], dtype="int64")
dst = np.array([0], dtype="int64")
eid = np.array([0], dtype="int64")
uniq_dst_count = np.array([0, 1], dtype="int32")
uniq_dst = np.array([0], dtype="int64")
edge_feat = {}
for key, value in graph.edge_feat.items():
edge_feat[key] = value[eid]
node_feat = graph.node_feat
self.__create_graph_node_feat(node_feat, self._initializers)
self.__create_graph_edge_feat(edge_feat, self._initializers)
self._num_edges, init = paddle_helper.constant(
dtype="int64",
value=np.array(
[num_edges], dtype="int64"),
name=self._data_name_prefix + '/num_edges')
self._initializers.append(init)
self._num_graph, init = paddle_helper.constant(
dtype="int64",
value=np.array(
[num_graph], dtype="int64"),
name=self._data_name_prefix + '/num_graph')
self._initializers.append(init)
self._edges_src, init = paddle_helper.constant(
dtype="int64",
value=src,
name=self._data_name_prefix + '/edges_src')
self._initializers.append(init)
self._edges_dst, init = paddle_helper.constant(
dtype="int64",
value=dst,
name=self._data_name_prefix + '/edges_dst')
self._initializers.append(init)
self._num_nodes, init = paddle_helper.constant(
dtype="int64",
hide_batch_size=False,
value=np.array([graph.num_nodes]),
name=self._data_name_prefix + '/num_nodes')
self._initializers.append(init)
self._edge_uniq_dst, init = paddle_helper.constant(
| |
<gh_stars>0
from nose.tools import set_trace
from collections import defaultdict
import datetime
import base64
import os
import json
import logging
import re
from config import (
Configuration,
temp_config,
)
from util import LanguageCodes
from util.xmlparser import XMLParser
from util.http import (
HTTP,
RemoteIntegrationException,
)
from coverage import CoverageFailure
from model import (
Contributor,
DataSource,
DeliveryMechanism,
LicensePool,
Edition,
Identifier,
Representation,
Subject,
)
from metadata_layer import (
SubjectData,
ContributorData,
FormatData,
IdentifierData,
CirculationData,
Metadata,
)
from config import Configuration
from coverage import BibliographicCoverageProvider
class ThetaAPI(object):
PRODUCTION_BASE_URL = "http://acstheta.dlogics.com/bookstore/api"
QA_BASE_URL = "http://thetaapiqa.baker-taylor.com/Services/VendorAPI/"
DATE_FORMAT = "%m-%d-%Y %H:%M:%S"
access_token_endpoint = 'accesstoken'
availability_endpoint = 'availability/v2'
log = logging.getLogger("Theta API")
def __init__(self, _db, username=None, library_id=None, password=<PASSWORD>,
base_url=None):
self._db = _db
(env_library_id, env_username,
env_password, env_base_url) = self.environment_values()
self.library_id = library_id or env_library_id
self.username = username or env_username
self.password = <PASSWORD> <PASSWORD>
self.base_url = base_url or env_base_url
if self.base_url == 'qa':
self.base_url = self.QA_BASE_URL
elif self.base_url == 'production':
self.base_url = self.PRODUCTION_BASE_URL
self.token = "<PASSWORD>"
@classmethod
def environment_values(cls):
config = Configuration.integration('Theta')
values = []
for name in [
'library_id',
'username',
'password',
'server',
]:
value = config.get(name)
if value:
value = value.encode("utf8")
values.append(value)
return values
@classmethod
def from_environment(cls, _db):
# Make sure all environment values are present. If any are missing,
# return None
values = cls.environment_values()
if len([x for x in values if not x]):
cls.log.info(
"No Theta client configured."
)
return None
return cls(_db)
@property
def source(self):
return DataSource.lookup(self._db, DataSource.THETA)
@property
def authorization_headers(self):
authorization = u":".join([self.username, self.password, self.library_id])
authorization = authorization.encode("utf_16_le")
authorization = base64.b64encode(authorization)
return dict(Authorization="Basic " + authorization)
def refresh_bearer_token(self):
url = self.base_url + self.access_token_endpoint
headers = self.authorization_headers
response = self._make_request(
url, 'post', headers, allowed_response_codes=[200]
)
return self.parse_token(response.content)
def request(self, url, method='get', extra_headers={}, data=None,
params=None, exception_on_401=False):
"""Make an HTTP request, acquiring/refreshing a bearer token
if necessary.
"""
if not self.token:
self.token = self.refresh_bearer_token()
headers = dict(extra_headers)
headers['Authorization'] = "Bearer " + self.token
headers['Library'] = self.library_id
if exception_on_401:
disallowed_response_codes = ["401"]
else:
disallowed_response_codes = None
response = self._make_request(
url=url, method=method, headers=headers,
data=data, params=params,
disallowed_response_codes=disallowed_response_codes
)
if response.status_code == 401:
# This must be our first 401, since our second 401 will
# make _make_request raise a RemoteIntegrationException.
#
# The token has expired. Get a new token and try again.
self.token = None
return self.request(
url=url, method=method, extra_headers=extra_headers,
data=data, params=params, exception_on_401=True
)
else:
return response
def availability(self, patron_id=None, since=None, title_ids=[]):
url = self.base_url + self.availability_endpoint
args = dict()
if since:
since = since.strftime(self.DATE_FORMAT)
args['updatedDate'] = since
if patron_id:
args['patronId'] = patron_id
if title_ids:
args['titleIds'] = ','.join(title_ids)
response = self.request(url, params=args)
return response
@classmethod
def create_identifier_strings(cls, identifiers):
identifier_strings = []
for i in identifiers:
if isinstance(i, Identifier):
value = i.identifier
else:
value = i
identifier_strings.append(value)
return identifier_strings
@classmethod
def parse_token(cls, token):
data = json.loads(token)
return data['access_token']
def _make_request(self, url, method, headers, data=None, params=None,
**kwargs):
"""Actually make an HTTP request."""
return HTTP.request_with_timeout(
method, url, headers=headers, data=data,
params=params, **kwargs
)
class MockThetaAPI(ThetaAPI):
def __init__(self, _db, with_token=True, *args, **kwargs):
with temp_config() as config:
config[Configuration.INTEGRATIONS]['Theta'] = {
'library_id' : 'a',
'username' : 'b',
'password' : 'c',
'server' : 'http://theta.test/',
}
super(MockThetaAPI, self).__init__(_db, *args, **kwargs)
if with_token:
self.token = "<PASSWORD>"
self.responses = []
self.requests = []
def queue_response(self, status_code, headers={}, content=None):
from testing import MockRequestsResponse
self.responses.insert(
0, MockRequestsResponse(status_code, headers, content)
)
def _make_request(self, url, *args, **kwargs):
self.requests.append([url, args, kwargs])
response = self.responses.pop()
return HTTP._process_response(
url, response, kwargs.get('allowed_response_codes'),
kwargs.get('disallowed_response_codes')
)
class ThetaBibliographicCoverageProvider(BibliographicCoverageProvider):
"""Fill in bibliographic metadata for Theta records.
Currently this is only used by BibliographicRefreshScript. It's
not normally necessary because the Theta API combines
bibliographic and availability data.
"""
def __init__(self, _db, input_identifier_types=None,
metadata_replacement_policy=None, theta_api=None,
**kwargs):
# We ignore the value of input_identifier_types, but it's
# passed in by RunCoverageProviderScript, so we accept it as
# part of the signature.
self.parser = BibliographicParser()
theta_api = theta_api or ThetaAPI(_db)
super(ThetaBibliographicCoverageProvider, self).__init__(
_db, theta_api, DataSource.THETA,
batch_size=25,
metadata_replacement_policy=metadata_replacement_policy,
**kwargs
)
def process_batch(self, identifiers):
identifier_strings = self.api.create_identifier_strings(identifiers)
response = self.api.availability(title_ids=identifier_strings)
seen_identifiers = set()
batch_results = []
for metadata, availability in self.parser.process_all(response.content):
identifier, is_new = metadata.primary_identifier.load(self._db)
if not identifier in identifiers:
# Theta told us about a book we didn't ask
# for. This shouldn't happen, but if it does we should
# do nothing further.
continue
seen_identifiers.add(identifier.identifier)
result = self.set_metadata(identifier, metadata)
if not isinstance(result, CoverageFailure):
result = self.handle_success(identifier)
batch_results.append(result)
# Create a CoverageFailure object for each original identifier
# not mentioned in the results.
for identifier_string in identifier_strings:
if identifier_string not in seen_identifiers:
identifier, ignore = Identifier.for_foreign_id(
self._db, Identifier.THETA_ID, identifier_string
)
result = CoverageFailure(
identifier, "Book not in collection", data_source=self.output_source, transient=False
)
batch_results.append(result)
return batch_results
def handle_success(self, identifier):
return self.set_presentation_ready(identifier)
def process_item(self, identifier):
results = self.process_batch([identifier])
return results[0]
class ThetaParser(XMLParser):
NS = {"axis": "http://axis360api.baker-taylor.com/vendorAPI"}
SHORT_DATE_FORMAT = "%m/%d/%Y"
FULL_DATE_FORMAT_IMPLICIT_UTC = "%m/%d/%Y %I:%M:%S %p"
FULL_DATE_FORMAT = "%m/%d/%Y %I:%M:%S %p +00:00"
def _xpath1_boolean(self, e, target, ns, default=False):
text = self.text_of_optional_subtag(e, target, ns)
if text is None:
return default
if text == 'true':
return True
else:
return False
def _xpath1_date(self, e, target, ns):
value = self.text_of_optional_subtag(e, target, ns)
if value is None:
return value
try:
attempt = datetime.datetime.strptime(
value, self.FULL_DATE_FORMAT_IMPLICIT_UTC)
value += ' +00:00'
except ValueError:
pass
return datetime.datetime.strptime(value, self.FULL_DATE_FORMAT)
class BibliographicParser(ThetaParser):
DELIVERY_DATA_FOR_THETA_FORMAT = {
"Blio" : None,
"Acoustik" : None,
"ePub" : (Representation.EPUB_MEDIA_TYPE, DeliveryMechanism.ADOBE_DRM),
"PDF" : (Representation.PDF_MEDIA_TYPE, DeliveryMechanism.ADOBE_DRM),
}
log = logging.getLogger("Theta Bibliographic Parser")
@classmethod
def parse_list(self, l):
"""Turn strings like this into lists:
FICTION / Thrillers; FICTION / Suspense; FICTION / General
Ursu, Anne ; Fortune, Eric (ILT)
"""
return [x.strip() for x in l.split(";")]
def __init__(self, include_availability=True, include_bibliographic=True):
self.include_availability = include_availability
self.include_bibliographic = include_bibliographic
def process_all(self, string):
for i in super(BibliographicParser, self).process_all(
string, "//axis:title", self.NS):
yield i
def extract_availability(self, circulation_data, element, ns):
identifier = self.text_of_subtag(element, 'axis:titleId', ns)
primary_identifier = IdentifierData(Identifier.THETA_ID, identifier)
if not circulation_data:
circulation_data = CirculationData(
data_source=DataSource.THETA,
primary_identifier=primary_identifier,
)
availability = self._xpath1(element, 'axis:availability', ns)
total_copies = self.int_of_subtag(availability, 'axis:totalCopies', ns)
available_copies = self.int_of_subtag(
availability, 'axis:availableCopies', ns)
size_of_hold_queue = self.int_of_subtag(
availability, 'axis:holdsQueueSize', ns)
availability_updated = self.text_of_optional_subtag(
availability, 'axis:updateDate', ns)
if availability_updated:
try:
attempt = datetime.datetime.strptime(
availability_updated, self.FULL_DATE_FORMAT_IMPLICIT_UTC)
availability_updated += ' +00:00'
except ValueError:
pass
availability_updated = datetime.datetime.strptime(
availability_updated, self.FULL_DATE_FORMAT)
circulation_data.licenses_owned=total_copies
circulation_data.licenses_available=available_copies
circulation_data.licenses_reserved=0
circulation_data.patrons_in_hold_queue=size_of_hold_queue
return circulation_data
# Theta authors with a special role have an abbreviation after their names,
# e.g. "<NAME> (FRW)"
role_abbreviation = re.compile("\(([A-Z][A-Z][A-Z])\)$")
generic_author = object()
role_abbreviation_to_role = dict(
INT=Contributor.INTRODUCTION_ROLE,
EDT=Contributor.EDITOR_ROLE,
PHT=Contributor.PHOTOGRAPHER_ROLE,
ILT=Contributor.ILLUSTRATOR_ROLE,
TRN=Contributor.TRANSLATOR_ROLE,
FRW=Contributor.FOREWORD_ROLE,
ADP=generic_author, # Author of adaptation
COR=generic_author, # Corporate author
)
@classmethod
def parse_contributor(cls, author, primary_author_found=False):
if primary_author_found:
default_author_role = Contributor.AUTHOR_ROLE
else:
default_author_role = Contributor.PRIMARY_AUTHOR_ROLE
role = default_author_role
match = cls.role_abbreviation.search(author)
if match:
role_type = match.groups()[0]
role = cls.role_abbreviation_to_role.get(
role_type, Contributor.UNKNOWN_ROLE)
if role is cls.generic_author:
role = default_author_role
author = author[:-5].strip()
return ContributorData(
sort_name=author, roles=role)
def extract_bibliographic(self, element, ns):
"""Turn bibliographic metadata into a Metadata and a CirculationData objects,
and return them as a tuple."""
# TODO: These are consistently empty (some are clearly for
# audiobooks) so I don't know what they do and/or what format
# they're in.
#
# annotation
# edition
# narrator
# runtime
identifier = self.text_of_subtag(element, 'axis:titleId', ns)
isbn = self.text_of_optional_subtag(element, 'axis:isbn', ns)
title = self.text_of_subtag(element, 'axis:productTitle', ns)
contributor = self.text_of_optional_subtag(
element, 'axis:contributor', ns)
contributors = []
found_primary_author = False
if contributor:
for c in self.parse_list(contributor):
contributor = self.parse_contributor(
c, found_primary_author)
if Contributor.PRIMARY_AUTHOR_ROLE in contributor.roles:
found_primary_author = True
contributors.append(contributor)
subject = self.text_of_optional_subtag(element, 'axis:subject', ns)
subjects = []
if subject:
for subject_identifier in self.parse_list(subject):
subjects.append(
SubjectData(
type=Subject.BISAC, identifier=subject_identifier,
weight=1
)
)
publication_date = self.text_of_optional_subtag(
element, 'axis:publicationDate', ns)
if publication_date:
publication_date = datetime.datetime.strptime(
publication_date, self.SHORT_DATE_FORMAT)
series = self.text_of_optional_subtag(element, 'axis:series', ns)
publisher = self.text_of_optional_subtag(element, 'axis:publisher', ns)
imprint = self.text_of_optional_subtag(element, 'axis:imprint', ns)
audience = self.text_of_optional_subtag(element, 'axis:audience', ns)
if audience:
subjects.append(
SubjectData(
type=Subject.THETA_AUDIENCE,
identifier=audience,
weight=1,
)
)
language = self.text_of_subtag(element, 'axis:language', ns)
# We don't use this for anything.
# file_size = self.int_of_optional_subtag(element, 'theta:fileSize', ns)
primary_identifier = IdentifierData(Identifier.THETA_ID, identifier)
identifiers = []
if isbn:
identifiers.append(IdentifierData(Identifier.ISBN, isbn))
formats = []
acceptable = False
seen_formats = []
for format_tag in self._xpath(
element, 'axis:availability/axis:availableFormats/axis:formatName',
ns
):
informal_name = format_tag.text
seen_formats.append(informal_name)
if informal_name not in self.DELIVERY_DATA_FOR_THETA_FORMAT:
self.log("Unrecognized Theta format name for %s: | |
se_per = pd.Series(res).sort_index()
se_per.to_excel(workingDir + 'se_percentile_variation_%s.xlsx' % q)
se_per.plot()
plt.savefig(workingDir + 'se_percentile_variation_%s.png' % q, dpi=300)
plt.clf()
#os.remove(workingDir + 'combined_%s_aligned.h5' % M)
return
def analyzeCase(self, df_expr, toggleCalculateMajorMetric = True, exprCutoff = 0.05, toggleExportFigureData = True, toggleCalculateMeasures = True, suffix = '', saveDir = '', toggleGroupBatches = True, dpi = 300, toggleAdjustText = True, markersLabelsRepelForce = 1.5, figureSize=(8, 22), toggleAdjustFigureHeight=True, noPlot = False, halfWindowSize = 10, printStages = True, externalPanelsData = None, toggleIncludeHeatmap = True, addDeprecatedPanels = False, includeClusterNumber = True, togglePublicationFigure = False):
'''Analyze, calculate, and generate plots for individual experiment
Parameters:
df_expr: pandas.Dataframe
Gene expression data
toggleCalculateMajorMatric: boolean, Default True
Whether to calculate cdist of major metric. This is a legacy parameter
exprCutoff: float, Default 0.05
Cutoff for percent expression in a batch of input data
toggleExportFigureData: boolean, Default True
Whether to export figure data
toggleCalculateMeasures: boolean, Default True
Whether to calculate measures
suffix: str, Default ''
Name of experiment
saveDir: str, Default ''
Exerything is exported to this directory, should be unique for each dataset
toggleGroupBatches: boolean, Default True
Whether to group batches or save per-batch distance measure
dpi: int or 'figure', Default 300
Resolution in dots per inch, if 'float' use figures dpi value
toggleAdjustText: boolean, Default True
Whether to use (external) module to minimize text overlap in figure
figure_size: tuple, Default (8, 20)
Width, height in inches
toggleAdjustFigureHeight: boolean, Default True
Whether to adjust figure height
noPlot: boolean, Default False
Whether to generate plot
halfWindowSize: int, Default 10
Moving average half-window size
printStages: boolean, Default True
Whether to print stage status to output
externalPanelsData: dict, Default None
Dictionary containing additional panels data
toggleIncludeHeatmap: boolean, Default True
Whether to include heatmap in figure
addDeprecatedPanels: boolean, Default False
Whether to include deprecated panels
Returns:
None
Usage:
self.analyzeCase(df_expr)
'''
stimulators, inhibitors = self.knownRegulators, []
#if togglePublicationFigure:
# toggleExportFigureData = True
def calculateMajorMetricAndGeneStats(df_expr, saveDir, groupBatches, selGenes, exprCutoff):
'''Calculate cdist of metric (e.g. correlation)
Calculate fraction of cells expressing each gene, and median of non-zero gene expression (per batch)
Parameters:
df_expr: pandas.DataFrame
Gene expression of one species, one cluster (subset of clusters)
saveDir: str
Exerything is exported to this directory, should be unique for each dataset
groupBatches: boolean
Whether to take median across batches
selGenes: list
List of receptors, or transcription factors
exprCutoff: float
Cutoff for percent expression of input data
Returns:
None
Usage:
calculateMajorMetricAndGeneStats(df_expr, saveDir, groupBatches, selGenes, exprCutoff)
'''
print('Received expression data of shape:', df_expr.shape, flush=True)
np.savetxt(os.path.join(saveDir, 'size.txt'), np.array(df_expr.shape), fmt='%i')
# For each batch calculate gene expression distance metric
print('Calculating distance metric', flush=True)
df_measure = get_df_distance(df_expr, metric=self.majorMetric, genes=selGenes, analyzeBy='batch', minSize=10, groupBatches=groupBatches, cutoff=exprCutoff, nCPUs=self.nCPUs)
print('Recording major metric (shape: %s, %s) to h5' % df_measure.shape, flush=True)
df_measure.to_hdf(os.path.join(saveDir, self.metricsFile), key=self.majorMetric, mode='a', complevel=4, complib='zlib')
# For each batch calculate fraction of cells expressing each gene
df_fraction = df_expr.replace(0, np.nan).replace(0., np.nan).groupby(axis=1, level='batch').agg('count') /\
df_expr.fillna(0.).groupby(axis=1, level='batch').agg('count')
print('Recording fractions (shape: %s, %s) to h5' % df_fraction.shape, flush=True)
df_fraction.to_hdf(os.path.join(saveDir, 'perGeneStats.h5'), key='df_fraction', mode='a', complevel=4, complib='zlib')
# For each batch calculate median of non-zero values of each gene expression
df_median_expr = df_expr.replace(0, np.nan).replace(0., np.nan).groupby(axis=1, level='batch').agg(np.nanmedian)
print('Recording median expression (shape: %s, %s) to h5' % df_fraction.shape, flush=True)
df_median_expr.to_hdf(os.path.join(saveDir, 'perGeneStats.h5'), key='df_expression', mode='a', complevel=4, complib='zlib')
# For each batch calculate median of non-zero values of each gene expression
se_count = df_expr.fillna(0.).groupby(axis=1, level='batch').agg('count').iloc[0]
print('Recording per batch counts to h5', flush=True)
se_count.to_hdf(os.path.join(saveDir, 'perGeneStats.h5'), key='se_count', mode='a', complevel=4, complib='zlib')
return
def makeCombinationPlot(df, n_clusters = 10, adjustText = toggleAdjustText):
'''Builds and plots dendrogram, heatmap, and bargraphs.
Parameters:
df: pandas.DataFrame
All calculated measurement
metric: str, Default 'euclidean'
Name of metric used to build dendrogram and identify clusters in it
Metric has to be of type "Euclidean" to use linkage method "Ward"
With any other metric (e.g. correlation distance) use linkage method "average" etc.
Metric 'euclidean_missing' used commonly-non-missing points only
linkageMethod: str, Default 'ward'
Linkage algorithm to use
n_clusters: int, Default 10
Specific number of clusters to find
adjustText: str, Default toggleAdjustText
Whether to use module to fix text overlap in figure
Returns:
dict:
Figure data for export
Usage:
makeCombinationPlot(df)
'''
nonlocal figureSize, self, togglePublicationFigure, markersLabelsRepelForce, includeClusterNumber
metric = self.dendrogramMetric
linkageMethod = self.dendrogramLinkageMethod
if metric == 'euclidean_missing':
metric = metric_euclidean_missing
if self.panels is None:
self.panels = self.standardPanels
if addDeprecatedPanels:
self.panels += deprecatedPanels
self.panels += self.combinationPanels
def addDendro(fig, dataGenes, M, coords, metric = metric, linkageMethod = 'ward', linewidth = 0.25, adjustText = adjustText, fontsize = 6):
genesSubset = list(stimulators) + list(inhibitors)
ax = fig.add_axes(coords, frame_on=False)
Z = hierarchy.linkage(np.nan_to_num(M, nan=max(M)), method=linkageMethod, optimal_ordering=True)
origLineWidth = matplotlib.rcParams['lines.linewidth']
matplotlib.rcParams['lines.linewidth'] = linewidth
cmap = cm.gist_ncar(np.linspace(0, 0.5, n_clusters + 1))
hierarchy.set_link_color_palette([matplotlib.colors.rgb2hex(rgb[:3]) for rgb in cmap])
D = hierarchy.dendrogram(Z, ax=ax, color_threshold = (Z[-n_clusters,2] + Z[-n_clusters+1,2]) / 2, above_threshold_color='k', orientation='top')
hierarchy.set_link_color_palette(None)
matplotlib.rcParams['lines.linewidth'] = origLineWidth
reindexed = pd.Index(dataGenes[D['leaves']]).reindex(pd.Index(genesSubset).intersection(dataGenes))
genes = reindexed[0][reindexed[1] > -1].values
locations = reindexed[1][reindexed[1] > -1]
if True:
tickLabelsColors = np.array(['navy']*len(dataGenes), dtype=np.dtype('U20'))
xtickslabels = np.array(['']*len(dataGenes), dtype=np.dtype('U20'))
for gene, location in zip(genes, locations):
xtickslabels[location] = gene
tickLabelsColors[location] = 'green' if (gene in stimulators) else 'red'
ax.set_xticklabels(xtickslabels, fontsize=4)
ax.tick_params(axis='y', labelsize=4, width=0.25, length=1)
ax.set_yticklabels([])
ax.set_yticks([])
for xtick, color in zip(ax.get_xticklabels(), tickLabelsColors):
xtick.set_color(color)
texts = []
origPos = []
for xpos, xtext, color in zip(ax.get_xticks(), xtickslabels, tickLabelsColors):
if xtext != '':
texts.append(ax.text(xpos, -2., xtext, fontsize=fontsize, rotation=90, va='top', ha='center', color=color))
origPos.append(xpos)
ticks_x = []
ticks_y = []
vdistance = -0.01 * ax.get_ylim()[1]
for tick in ax.get_xticks():
ticks_x.extend([tick, tick, None])
ticks_y.extend([0, vdistance, None])
ax.plot(ticks_x, ticks_y, color='k', lw=0.4, clip_on=False)
ax.set_xticklabels([])
if adjustText:
adjustTexts1D(texts, fig, ax)
#adjust_text(texts, va='top', ha='center', autoalign='x', lim=400, only_move={'text':'x'}, force_text=(markersLabelsRepelForce, 0.5))
v = 0.05 * ax.get_ylim()[1]
for text, opos in zip(texts, origPos):
text._y = -v
ax.plot([text._x, opos], [text._y, 0.], color=text._color, lw=0.5, clip_on=False)
if True:
clusters = scipy.cluster.hierarchy.fcluster(Z, t=n_clusters, criterion='maxclust')[D['leaves']] - 1
clusterBoundaries = (np.where(clusters - np.roll(clusters, 1) != 0)[0]/ len(D['leaves'])) * ax.get_xlim()[1]
clusterBoundaries = np.append(clusterBoundaries, ax.get_xlim()[1])
clusterCenters = clusterBoundaries[:-1] + ((clusterBoundaries - np.roll(clusterBoundaries, 1))/2.)[1:]
vposition = (Z[-n_clusters,2] + Z[-n_clusters+1,2]) / 5
if includeClusterNumber:
for cluster, position in zip(np.unique(clusters), clusterCenters):
ltext = ax.text(position, vposition, '#%s' % cluster, fontsize=fontsize, color='white', va='center', ha='center')
ltext.set_path_effects([path_effects.Stroke(linewidth=1., foreground='k'), path_effects.Normal()])
return {'order': D['leaves'],
'M': squareform(M)[:, D['leaves']][D['leaves'], :],
'genes': genes,
'allGenes': dataGenes[D['leaves']],
'locations': locations,
'tickLabelsColors': tickLabelsColors,
'xtickslabels': xtickslabels,
'clusters': clusters,
'clusterBoundaries': clusterBoundaries / 10.,
'clusterCenters': clusterCenters / 10.}
def addHeatmap(fig, dataArgs, coords, adjustText = adjustText, fontsize = 6):
plottingMajorMetricOfSelected = False
if plottingMajorMetricOfSelected:
M = dataArgs['majorMetricOfSelected']
else:
M = dataArgs['M']
order = dataArgs['order']
genes = dataArgs['genes']
locations = dataArgs['locations']
tickLabelsColors = dataArgs['tickLabelsColors']
tickslabels = dataArgs['xtickslabels']
clusters = dataArgs['clusters']
clusterBoundaries = dataArgs['clusterBoundaries']
clusterCenters = dataArgs['clusterCenters']
ax = fig.add_axes(coords, frame_on=True)
masked_M = np.ma.array(M, mask=np.isnan(M))
if plottingMajorMetricOfSelected:
cmap = copy.copy(plt.cm.bwr)
cmap.set_bad('grey')
vmin, vmax = -1, 1
else:
cmap = copy.copy(plt.cm.bwr) # Greens_r
cmap.set_bad('red')
vmin, vmax = None, None
im = ax.imshow(masked_M, cmap=cmap, aspect='auto', vmin=vmin, vmax=vmax, interpolation='None', extent=(-0.5, M.shape[0] - 0.5, M.shape[1] - 0.5, -0.5))
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# Selected x tick labels
if True:
ax.set_xticks(range(len(tickslabels)))
ax.set_xticklabels(tickslabels, fontsize=4)
for xtick, color in zip(ax.get_xticklabels(), tickLabelsColors):
xtick.set_color(color)
texts = []
origPos = []
for xpos, xtext, color in zip(ax.get_xticks(), tickslabels, tickLabelsColors):
if xtext != '':
texts.append(ax.text(xpos, 1.01*ax.get_ylim()[0], xtext, fontsize=fontsize, rotation=90, va='top', ha='center', color=color))
origPos.append(xpos)
ax.set_xticklabels([])
ax.set_xticks([])
if adjustText:
adjustTexts1D(texts, fig, ax)
#adjust_text(texts, va='top', ha='center', autoalign='x', lim=400, only_move={'text':'x'}, force_text=(1.05*markersLabelsRepelForce, 0.5))
v = ax.get_ylim()[0]
for text, opos in zip(texts, origPos):
text._y = 1.01 * v
ax.plot([text._x, opos], [text._y, v], color=text._color, lw=0.5, clip_on=False)
# Selected y tick labels
if True:
ax.set_yticks(range(len(tickslabels)))
ax.set_yticklabels(tickslabels, fontsize=4)
for ytick, color in zip(ax.get_yticklabels(), tickLabelsColors):
ytick.set_color(color)
texts = []
origPos = []
for ypos, xtext, color in zip(ax.get_yticks(), tickslabels, tickLabelsColors):
if xtext != '':
texts.append(ax.text(-0.01*ax.get_xlim()[1], ypos, xtext, fontsize=fontsize, va='center', ha='right', color=color))
origPos.append(ypos)
ax.set_yticklabels([])
ax.set_yticks([])
if adjustText:
adjustTexts1D([ele for ele in reversed(texts)], fig, ax)
#adjust_text(texts, va='center', ha='right', autoalign='y', lim=400, only_move={'text':'y'}, force_text=(1.05*markersLabelsRepelForce, 0.5))
v = -0.01 * ax.get_xlim()[1]
for text, opos in | |
str(y + 1) + ";" + str(x) + "H" + string)
@staticmethod
def beep():
""" Emit a short attention sound.
"""
curses.write("\07")
@staticmethod
def curs_set(setting):
""" Set the cursor state. visibility can be set to 0, 1, for invisible, normal.
"""
if setting == 0:
curses.write("\33[?25l")
elif setting == 1:
curses.write("\33[?25h")
elif setting == 2:
raise AttributeError("curs_set (2) [very visible] not implemented")
else:
raise AttributeError("curs_set argument must be 0 or 1: " + setting)
@staticmethod
def delay_output(milliseconds):
""" Insert an ms millisecond pause in output.
"""
time.sleep(milliseconds / 1000)
@staticmethod
def flushinp():
""" Flush all input buffers.
"""
# TODO: There has to be a better way to clear the usb serial buffer
while supervisor.runtime.serial_bytes_available:
sys.stdin.read(1)
@staticmethod
def _get_fg(attr):
""" Decodes the attr var to get the foreground color
"""
_cl = attr & curses.foreground
return None if _cl is 256 else _cl
@staticmethod
def _get_bg(attr):
""" Decodes the attr var to get the background color
"""
_cl = (attr & curses.background) >> 9
return None if _cl is 256 else _cl
@staticmethod
# TODO: May not need this method anymore
def clean_ansi(string):
""" Cleans a color ansi code out of a string
"""
# Get the location of the first escape sequence. If non exists, this will return a -1 and
# the loop will be skipped
_l = string.find("\33[")
# While there is another escape sequence to remove, loop
while _l is not -1:
# Cut the sequence out of the string. Starting at the escape sequence, look for
# the letter "m", which denotes the end of the sequence. Set the string to be that
# portion of the string which is before and after the escape.
string = string[:_l] + string[string.find("m", _l) + 1 :]
# Find the next escape sequence, if there is one. If non exists, this will return a -1
# and the loop will end
_l = string.find("\33[")
# Return the modified string
return string
# For compatability with the curses codebase, ignore this pylint error
# pylint: disable=invalid-name
class window:
""" Window objects, as returned by initscr() and newwin()
"""
def __init__(self, window_size=(0, 0), y_pos=0, x_pos=0):
self.window_size = window_size
self.x_pos = x_pos
self.y_pos = y_pos
def addnstr(self, y, x, n, string, attr=None):
""" Paint at most n characters of the character string str at (y, x) with attributes attr,
overwriting anything previously on the display.
"""
return self.addstr(y, x, string[0:n], attr)
def addstr(self, y, x, string, attr=None):
""" Paint the character string str at (y, x) with attributes attr, overwriting anything
previously on the display.
"""
# TODO: Add support for A_ attributes (like blink, underline, etc)
# Set prefix and postfix to None here incase they are not set to something else later
prefix = None
postfix = None
# See if there is an attribute
if attr:
# pylint: disable=protected-access
fg = curses._get_fg(attr)
bg = curses._get_bg(attr)
# 16 color mode
if curses.color_mode is curses.MODE_16:
# TODO: Add support for intensity (currently set at 1)
# We are using 16 colors
if fg or bg:
postfix = "\33[m"
# Try to translate the color from 256 to 16. If there is a key error, then there
# is not a translation between the color modes. In that case, we write nothing
# to prefix
try:
if fg:
fg = curses._COLOR_256_16_FG[fg]
except KeyError:
fg = curses._COLOR_256_16_FG[curses.COLOR_BLACK]
try:
if bg:
bg = curses._COLOR_256_16_BG[bg]
except KeyError:
bg = curses._COLOR_256_16_BG[curses.COLOR_BLACK]
if fg and bg:
prefix = "\33[1;" + str(fg) + ";1;" + str(bg) + "m"
else:
# If fg and bg arent set, then only one of them is. Use which ever setting
# only
prefix = "\33[1;" + str(fg or bg) + "m"
# 256 color mode
else:
if fg or bg:
postfix = "\33[m"
if fg and bg:
prefix = "\33[38;5;" + str(fg) + ";48;5;" + str(bg) + "m"
elif fg:
prefix = "\33[38;5;" + str(fg) + "m"
else: # bg
prefix = "\33[48;5;" + str(bg) + "m"
# Split the input string by newline characters and grab each split string as an individual.
# If there are no newline characters in the string, the for loop will run only once
lst = string.split("\n")
lst_len = len(lst) - 1 # This line minus one to reduce work in the loop
for idx, line in enumerate(lst):
# While this split string is longer than the window, starting at the given x position,
# cut it into smaller strings and simulate a newline
while x + len(line) > self.window_size[1]:
# Cut the string so that it will fit in this line
self._addstr(y, x, prefix, line[0 : self.window_size[1] - x], None)
# Set the prefix to an empty string because we have already written it to the
# screen
prefix = ""
# Move to the next line, and set the cursor to the start of the window
y = y + 1
x = 0
# Use the remainder of the cut string above for next time loop
line = line[self.window_size[1] - x :]
# After the string has been cut until it can fit in a single line, print the string. If
# the string was never bigger than a single line, then only this _addstr will run. If
# we are not at the end of our for loop, then we have arrived at a newline character.
# In this case, print the line plus padded spaces to the end of the line. This is to
# replicate curses
if idx < lst_len:
self._addstr(
y, x, prefix, line + " " * (self.window_size[1] - len(line)), None,
)
# Set the prefix to an empty string because we have already written it to the
# screen
prefix = ""
# Now we have reached the end of the first split string. If there are more strings
# split based on newline characters, then we want to prep for them next loop. Go
# ahead and move to the next line, and set the x position to 0
y = y + 1
x = 0
else:
# Since this will always be run at least once, at the very end of the string, we
# add the postfix here
self._addstr(y, x, prefix, line, postfix)
# Don't need to worry about clearing prefix or postfix here because this is the
# end of the loop
def _addstr(self, y, x, prefix, string, postfix):
""" Internal function for writing to the screen. Checks to ensure that the given string
will not exceed the boundries of the window.
Also checks to ensure that the last character in the window is not written to. This
seems to be a quirk with curses that needs to be emulated.
"""
str_len = len(string)
# Ensure that we have not written up to the last character in the window. This is
# a quirk with curses that needs to be replicated
if (
# Ensure that y is not greater than the window size
y > self.window_size[0]
# Ensure that y is not negative
or y < 0
# Ensure that x plus whatever we are writing to the screen is less than or equal to
# the width of the window
or x + str_len > self.window_size[1]
# Ensure that x is not negative
or x < 0
):
raise curses.error(
"A Out of Bounds: ("
+ str(y)
+ ","
+ str(x)
+ "->"
+ str(str_len)
+ ") "
+ str(self.window_size)
)
# Make sure that the last character in the window will not be written to
if y >= self.window_size[0] and x + str_len >= self.window_size[1]:
raise curses.error("Can't write to the last cell in a window")
# Write the string to the screen
self._write_pos(y, x, prefix, string, postfix)
def _write_pos(self, y, | |
# ============================================================================
#
# Copyright (c) 2007-2010 Integral Technology Solutions Pty Ltd,
# All Rights Reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE
# LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# FOR FURTHER INFORMATION PLEASE SEE THE INTEGRAL TECHNOLOGY SOLUTIONS
# END USER LICENSE AGREEMENT (ELUA).
#
# ============================================================================
import validation_helper as helper
from java.io import File
def run(config):
if validateAdminServerProperty(config):
return False
return True
def validateAdminServerProperty(domainProperties):
error = 0
helper.printHeader('[VALIDATING] admin server properties')
adminPort = domainProperties.getProperty('wls.admin.listener.port')
if not adminPort is None and len(adminPort)>0:
try:
int(adminPort)
except ValueError:
log.error('Please verify wls.admin.listener.port [' + str(adminPort) + '] property.')
else:
if int(adminPort)<0 or int(adminPort)>65535:
log.error('Please verify wls.admin.listener.port property, port number is not in valid range [0-65535].')
else:
log.debug('Admin server port [' + str(adminPort) + '] is valid.')
enableSSL = domainProperties.getProperty('wls.admin.listener.enableSSL')
if not enableSSL is None and len(enableSSL)>0:
if not enableSSL.upper()=='TRUE' and not enableSSL.upper()=='FALSE':
error = 1
log.error('The wls.admin.listener.enableSSL property supports only [true,false].')
else:
log.debug('Admin server ssl enable property [' + str(enableSSL) + '] is valid.')
if enableSSL.upper()=='TRUE':
sslPort = domainProperties.getProperty('wls.admin.listener.sslPort')
if not sslPort is None and len(sslPort)>0:
try:
int(sslPort)
except ValueError:
log.error('Please verify wls.admin.listener.sslPort [' + str(sslPort) + '] property.')
else:
if int(sslPort)<0 or int(sslPort)>65535:
log.error('Please verify wls.admin.listener.sslPort property, port number is not in valid range [0-65535].')
else:
log.debug('Admin server ssl port [' + str(sslPort) + '] is valid.')
adminchprotocol = domainProperties.getProperty('wls.admin.channel.protocol')
if not adminchprotocol is None and len(adminchprotocol)>0:
if not adminchprotocol=='t3' and not adminchprotocol=='t3s' and not adminchprotocol=='http' and not adminchprotocol=='https' and not adminchprotocol=='iiop' and not adminchprotocol=='iiops' and not adminchprotocol=='ldap' and not adminchprotocol=='ldaps' and not adminchprotocol=='admin':
error = 1
log.error('The wls.admin.channel.protocol property supports only [t3,t3s,http,https,iiop,iiops,ldap,ldaps,admin].')
else:
log.debug('Admin channel protocol property [' + str(adminchprotocol) + '] is valid.')
adminChannelPort = domainProperties.getProperty('wls.admin.channel.listener.port')
if not adminChannelPort is None and len(adminChannelPort)>0:
try:
int(adminChannelPort)
except ValueError:
log.error('Please verify wls.admin.channel.listener.port [' + str(adminChannelPort) + '] property.')
else:
if int(adminChannelPort)<0 or int(adminChannelPort)>65535:
log.error('Please verify wls.admin.channel.listener.port property, port number is not in valid range [0-65535].')
else:
log.debug('Admin channel port [' + str(adminChannelPort) + '] is valid.')
adminChannelPublicPort = domainProperties.getProperty('wls.admin.channel.listener.publicPort')
if not adminChannelPublicPort is None and len(adminChannelPublicPort)>0:
try:
int(adminChannelPublicPort)
except ValueError:
log.error('Please verify wls.admin.channel.listener.publicPort [' + str(adminChannelPublicPort) + '] property.')
else:
if int(adminChannelPublicPort)<0 or int(adminChannelPublicPort)>65535:
log.error('Please verify wls.admin.channel.listener.publicPort property, port number is not in valid range [0-65535].')
else:
log.debug('Admin channel public port [' + str(adminChannelPublicPort) + '] is valid.')
httpEnable = domainProperties.getProperty('wls.admin.channel.httpEnable')
if not httpEnable is None and len(httpEnable)>0:
if not httpEnable.upper()=='TRUE' and not httpEnable.upper()=='FALSE':
error = 1
log.error('The wls.admin.channel.httpEnable property supports only [true,false].')
else:
log.debug('Admin http channel enable property [' + str(httpEnable) + '] is valid.')
enableTunneling = domainProperties.getProperty('wls.admin.enableTunneling')
if not enableTunneling is None and len(enableTunneling)>0:
if not enableTunneling.upper()=='TRUE' and not enableTunneling.upper()=='FALSE':
error = 1
log.error('The wls.admin.enableTunneling property supports only [true,false].')
else:
log.debug('Admin tunnelling enable property [' + str(enableTunneling) + '] is valid.')
admincustomlog = domainProperties.getProperty('wls.admin.log.custom')
if not admincustomlog is None and len(admincustomlog)>0:
if not admincustomlog.upper()=='TRUE' and not admincustomlog.upper()=='FALSE':
error = 1
log.error('The wls.admin.log.custom property supports only [true,false].')
else:
log.debug('Admin custom log enable property [' + str(admincustomlog) + '] is valid.')
if admincustomlog.upper()=='TRUE':
filename = domainProperties.getProperty('wls.admin.log.filename')
if not filename is None and len(filename)>0:
file = File(filename)
if file.isAbsolute():
if not file.exists():
log.debug('[NOTE] Please make sure the user running this script has permission to create directory and file [' + str(filename) + '].')
limitNumberOfFile = domainProperties.getProperty('wls.admin.log.limitNumOfFile')
if not limitNumberOfFile is None and len(limitNumberOfFile)>0:
if not limitNumberOfFile.upper()=='TRUE' and not limitNumberOfFile.upper()=='FALSE':
error = 1
log.error('The wls.admin.log.limitNumOfFile property supports only [true,false].')
else:
log.debug('Admin log limit number of file property [' + str(limitNumberOfFile) + '] is valid.')
fileToRetain = domainProperties.getProperty('wls.admin.log.fileToRetain')
if not fileToRetain is None and len(fileToRetain)>0:
if not fileToRetain is None and len(fileToRetain)>0:
try:
int(fileToRetain)
except ValueError:
log.error('Please verify wls.admin.log.fileToRetain [' + str(fileToRetain) + '] property.')
else:
if int(fileToRetain)<1 or int(fileToRetain)>99999:
log.error('Please verify wls.admin.log.fileToRetain property, number is not in valid range [1-99999].')
else:
log.debug('Admin log file to retain [' + str(fileToRetain) + '] is valid.')
logRotateOnStartup = domainProperties.getProperty('wls.admin.log.rotateLogOnStartup')
if not logRotateOnStartup is None and len(logRotateOnStartup)>0:
if not logRotateOnStartup.upper()=='TRUE' and not logRotateOnStartup.upper()=='FALSE':
error = 1
log.error('The wls.admin.log.rotateLogOnStartup property supports only [true,false].')
else:
log.debug('Admin log rotate on startup property [' + str(logRotateOnStartup) + '] is valid.')
rotationType = domainProperties.getProperty('wls.admin.log.rotationType')
if not rotationType is None and len(rotationType)>0:
if not rotationType == 'bySize' and not rotationType == 'byTime':
error = 1
log.error('The wls.admin.log.rotationType property supports only [bySize,byTime].')
else:
log.debug('Admin log rotation type property [' + str(rotationType) + '] is valid.')
if rotationType == 'bySize':
fileMinSize = domainProperties.getProperty('wls.admin.log.fileMinSize')
if not fileMinSize is None and len(fileMinSize)>0:
try:
int(fileMinSize)
except ValueError:
log.error('Please verify wls.admin.log.fileMinSize [' + str(fileMinSize) + '] property.')
else:
if int(fileMinSize)<0 or int(fileMinSize)>65535:
log.error('Please verify wls.admin.log.fileMinSize [' + str(fileMinSize) + '] property, number is not in valid range [0-65535].')
else:
log.debug('Admin log file min size [' + str(fileMinSize) + '] is valid.')
if rotationType == 'byTime':
rotationTime = domainProperties.getProperty('wls.admin.log.rotationTime')
if not rotationTime is None and len(rotationTime)>0:
if rotationTime.find(':')==-1:
error = 1
log.error('Please verify wls.admin.log.rotationTime [' + str(rotationTime) + '] property, the property supports time format [HH:MM].')
else:
if len(rotationTime)<4 or len(rotationTime)>5:
error = 1
log.error('The wls.admin.log.rotationTime [' + str(rotationTime) + '] property, the property supports time format [HH:MM].')
else:
log.debug('Admin log rotation time [' + str(rotationTime) + '] is valid.')
fileTimespan = domainProperties.getProperty('wls.admin.log.fileTimeSpan')
if not fileTimespan is None and len(fileTimespan)>0:
try:
int(fileTimespan)
except ValueError:
log.error('Please verify wls.admin.log.fileTimeSpan [' + str(fileTimespan) + '] property.')
else:
if int(fileTimespan)<1:
log.error('Please verify wls.admin.log.fileTimeSpan [' + str(fileTimespan) + '] property, number is not in valid range [>=1].')
else:
log.debug('Admin log file timespan [' + str(fileTimespan) + '] is valid.')
rotationDir = domainProperties.getProperty('wls.admin.log.rotationDir')
if not rotationDir is None and len(rotationDir)>0:
file = File(rotationDir)
if file.isAbsolute():
if not file.exists():
log.debug('[NOTE] Please make sure the user running this script has permission to create directory and file [' + str(rotationDir) + '].')
fileSeverity = domainProperties.getProperty('wls.admin.log.logFileSeverity')
if not fileSeverity is None and len(fileSeverity)>0:
if not fileSeverity == 'Debug' and not fileSeverity == 'Info' and not fileSeverity == 'Warning':
error = 1
log.error('The wls.admin.log.logFileSeverity property supports only [Debug,Info,Warning].')
else:
log.debug('Admin log file severity property [' + str(fileSeverity) + '] is valid.')
broadcastSeverity = domainProperties.getProperty('wls.admin.log.broadcastSeverity')
if not broadcastSeverity is None and len(broadcastSeverity)>0:
if not broadcastSeverity == 'Trace' and not broadcastSeverity == 'Debug' and not broadcastSeverity == 'Info' and not broadcastSeverity == 'Notice' and not broadcastSeverity == 'Warning' and not broadcastSeverity == 'Error' and not broadcastSeverity == 'Critical' and not broadcastSeverity == 'Alert' and not broadcastSeverity == 'Emergency' and not broadcastSeverity == 'Off':
error = 1
log.error('The wls.admin.log.broadcastSeverity property supports only [Trace,Debug,Info,Notice,Warning,Error,Critical,Alert,Emergency,Off].')
else:
log.debug('Admin broadcast severity property [' + str(broadcastSeverity) + '] is valid.')
memoryBufferSeverity = domainProperties.getProperty('wls.admin.log.memoryBufferSeverity')
if not memoryBufferSeverity is None and len(memoryBufferSeverity)>0:
if not memoryBufferSeverity == 'Trace' and not memoryBufferSeverity == 'Debug' and not fileSeverity == 'Info' and not fileSeverity == 'Notice' and not fileSeverity == 'Warning' and not fileSeverity == 'Error' and not fileSeverity == 'Critical' and not fileSeverity == 'Alert' and not fileSeverity == 'Emergency' and not fileSeverity == 'Off':
error = 1
log.error('The wls.admin.log.memoryBufferSeverity property supports only [Trace,Debug,Info,Notice,Warning,Error,Critical,Alert,Emergency,Off].')
else:
log.debug('Admin memory buffer severity property [' + str(memoryBufferSeverity) + '] is valid.')
adminhttpcustomlog = domainProperties.getProperty('wls.admin.httplog.enable')
if not adminhttpcustomlog is None and len(adminhttpcustomlog)>0:
if not adminhttpcustomlog.upper()=='TRUE' and not adminhttpcustomlog.upper()=='FALSE':
error = 1
log.error('The wls.admin.httplog.enable property supports only [true,false].')
else:
log.debug('Admin http custom log enable property [' + str(adminhttpcustomlog) + '] is valid.')
if adminhttpcustomlog.upper()=='TRUE':
filename = domainProperties.getProperty('wls.admin.httplog.filename')
| |
<reponame>joke-lee/s3-tests
import nose
import random
import string
from nose.plugins.attrib import attr
import uuid
from nose.tools import eq_ as eq
from . import (
get_client
)
region_name = ''
# recurssion function for generating arithmetical expression
def random_expr(depth):
# depth is the complexity of expression
if depth==1 :
return str(int(random.random() * 100) + 1)+".0"
return '(' + random_expr(depth-1) + random.choice(['+','-','*','/']) + random_expr(depth-1) + ')'
def generate_s3select_where_clause(bucket_name,obj_name):
a=random_expr(4)
b=random_expr(4)
s=random.choice([ '<','>','==','<=','>=','!=' ])
try:
eval( a )
eval( b )
except ZeroDivisionError:
return
# generate s3select statement using generated randome expression
# upon count(0)>0 it means true for the where clause expression
# the python-engine {eval( conditional expression )} should return same boolean result.
s3select_stmt = "select count(0) from stdin where " + a + s + b + ";"
res = remove_xml_tags_from_result( run_s3select(bucket_name,obj_name,s3select_stmt) ).replace(",","")
nose.tools.assert_equal(int(res)>0 , eval( a + s + b ))
def generate_s3select_expression_projection(bucket_name,obj_name):
# generate s3select statement using generated randome expression
# statement return an arithmetical result for the generated expression.
# the same expression is evaluated by python-engine, result should be close enough(Epsilon)
e = random_expr( 4 )
try:
eval( e )
except ZeroDivisionError:
return
if eval( e ) == 0:
return
res = remove_xml_tags_from_result( run_s3select(bucket_name,obj_name,"select " + e + " from stdin;",) ).replace(",","")
# accuracy level
epsilon = float(0.000001)
# both results should be close (epsilon)
assert (1 - (float(res.split("\n")[1]) / eval( e )) ) < epsilon
@attr('s3select')
def get_random_string():
return uuid.uuid4().hex[:6].upper()
def test_generate_where_clause():
# create small csv file for testing the random expressions
single_line_csv = create_random_csv_object(1,1)
bucket_name = "test"
obj_name = get_random_string() #"single_line_csv.csv"
upload_csv_object(bucket_name,obj_name,single_line_csv)
for _ in range(100):
generate_s3select_where_clause(bucket_name,obj_name)
@attr('s3select')
def test_generate_projection():
# create small csv file for testing the random expressions
single_line_csv = create_random_csv_object(1,1)
bucket_name = "test"
obj_name = get_random_string() #"single_line_csv.csv"
upload_csv_object(bucket_name,obj_name,single_line_csv)
for _ in range(100):
generate_s3select_expression_projection(bucket_name,obj_name)
def create_csv_object_for_datetime(rows,columns):
result = ""
for _ in range(rows):
row = ""
for _ in range(columns):
row = row + "{}{:02d}{:02d}-{:02d}{:02d}{:02d},".format(random.randint(0,100)+1900,random.randint(1,12),random.randint(1,28),random.randint(0,23),random.randint(0,59),random.randint(0,59),)
result += row + "\n"
return result
def create_random_csv_object(rows,columns,col_delim=",",record_delim="\n",csv_schema=""):
result = ""
if len(csv_schema)>0 :
result = csv_schema + record_delim
for _ in range(rows):
row = ""
for _ in range(columns):
row = row + "{}{}".format(random.randint(0,1000),col_delim)
result += row + record_delim
return result
def create_random_csv_object_string(rows,columns,col_delim=",",record_delim="\n",csv_schema=""):
result = ""
if len(csv_schema)>0 :
result = csv_schema + record_delim
for _ in range(rows):
row = ""
for _ in range(columns):
if random.randint(0,9) == 5:
row = row + "{}{}".format(''.join(random.choice(string.ascii_letters) for m in range(10)) + "aeiou",col_delim)
else:
row = row + "{}{}".format(''.join("cbcd" + random.choice(string.ascii_letters) for m in range(10)) + "vwxyzzvwxyz" ,col_delim)
result += row + record_delim
return result
def upload_csv_object(bucket_name,new_key,obj):
client = get_client()
client.create_bucket(Bucket=bucket_name)
client.put_object(Bucket=bucket_name, Key=new_key, Body=obj)
# validate uploaded object
c2 = get_client()
response = c2.get_object(Bucket=bucket_name, Key=new_key)
eq(response['Body'].read().decode('utf-8'), obj, 's3select error[ downloaded object not equal to uploaded objecy')
def run_s3select(bucket,key,query,column_delim=",",row_delim="\n",quot_char='"',esc_char='\\',csv_header_info="NONE"):
s3 = get_client()
r = s3.select_object_content(
Bucket=bucket,
Key=key,
ExpressionType='SQL',
InputSerialization = {"CSV": {"RecordDelimiter" : row_delim, "FieldDelimiter" : column_delim,"QuoteEscapeCharacter": esc_char, "QuoteCharacter": quot_char, "FileHeaderInfo": csv_header_info}, "CompressionType": "NONE"},
OutputSerialization = {"CSV": {}},
Expression=query,)
result = ""
for event in r['Payload']:
if 'Records' in event:
records = event['Records']['Payload'].decode('utf-8')
result += records
return result
def remove_xml_tags_from_result(obj):
result = ""
for rec in obj.split("\n"):
if(rec.find("Payload")>0 or rec.find("Records")>0):
continue
result += rec + "\n" # remove by split
return result
def create_list_of_int(column_pos,obj,field_split=",",row_split="\n"):
list_of_int = []
for rec in obj.split(row_split):
col_num = 1
if ( len(rec) == 0):
continue
for col in rec.split(field_split):
if (col_num == column_pos):
list_of_int.append(int(col))
col_num+=1
return list_of_int
@attr('s3select')
def test_count_operation():
csv_obj_name = get_random_string()
bucket_name = "test"
num_of_rows = 1234
obj_to_load = create_random_csv_object(num_of_rows,10)
upload_csv_object(bucket_name,csv_obj_name,obj_to_load)
res = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin;") ).replace(",","")
nose.tools.assert_equal( num_of_rows, int( res ))
@attr('s3select')
def test_column_sum_min_max():
csv_obj = create_random_csv_object(10000,10)
csv_obj_name = get_random_string()
bucket_name = "test"
upload_csv_object(bucket_name,csv_obj_name,csv_obj)
csv_obj_name_2 = get_random_string()
bucket_name_2 = "testbuck2"
upload_csv_object(bucket_name_2,csv_obj_name_2,csv_obj)
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select min(int(_1)) from stdin;") ).replace(",","")
list_int = create_list_of_int( 1 , csv_obj )
res_target = min( list_int )
nose.tools.assert_equal( int(res_s3select), int(res_target))
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select min(int(_4)) from stdin;") ).replace(",","")
list_int = create_list_of_int( 4 , csv_obj )
res_target = min( list_int )
nose.tools.assert_equal( int(res_s3select), int(res_target))
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select avg(int(_6)) from stdin;") ).replace(",","")
list_int = create_list_of_int( 6 , csv_obj )
res_target = float(sum(list_int ))/10000
nose.tools.assert_equal( float(res_s3select), float(res_target))
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select max(int(_4)) from stdin;") ).replace(",","")
list_int = create_list_of_int( 4 , csv_obj )
res_target = max( list_int )
nose.tools.assert_equal( int(res_s3select), int(res_target))
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select max(int(_7)) from stdin;") ).replace(",","")
list_int = create_list_of_int( 7 , csv_obj )
res_target = max( list_int )
nose.tools.assert_equal( int(res_s3select), int(res_target))
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select sum(int(_4)) from stdin;") ).replace(",","")
list_int = create_list_of_int( 4 , csv_obj )
res_target = sum( list_int )
nose.tools.assert_equal( int(res_s3select), int(res_target))
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select sum(int(_7)) from stdin;") ).replace(",","")
list_int = create_list_of_int( 7 , csv_obj )
res_target = sum( list_int )
nose.tools.assert_equal( int(res_s3select) , int(res_target) )
# the following queries, validates on *random* input an *accurate* relation between condition result,sum operation and count operation.
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name_2,csv_obj_name_2,"select count(0),sum(int(_1)),sum(int(_2)) from stdin where (int(_1)-int(_2)) == 2;" ) )
count,sum1,sum2,d = res_s3select.split(",")
nose.tools.assert_equal( int(count)*2 , int(sum1)-int(sum2 ) )
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0),sum(int(_1)),sum(int(_2)) from stdin where (int(_1)-int(_2)) == 4;" ) )
count,sum1,sum2,d = res_s3select.split(",")
nose.tools.assert_equal( int(count)*4 , int(sum1)-int(sum2) )
@attr('s3select')
def test_nullif_expressions():
csv_obj = create_random_csv_object(10000,10)
csv_obj_name = get_random_string()
bucket_name = "test"
upload_csv_object(bucket_name,csv_obj_name,csv_obj)
res_s3select_nullif = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where nullif(_1,_2) is null ;") ).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where _1 == _2 ;") ).replace("\n","")
nose.tools.assert_equal( res_s3select_nullif, res_s3select)
res_s3select_nullif = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where not nullif(_1,_2) is null ;") ).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where _1 != _2 ;") ).replace("\n","")
nose.tools.assert_equal( res_s3select_nullif, res_s3select)
res_s3select_nullif = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where nullif(_1,_2) == _1 ;") ).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where _1 != _2 ;") ).replace("\n","")
nose.tools.assert_equal( res_s3select_nullif, res_s3select)
@attr('s3select')
def test_lowerupper_expressions():
csv_obj = create_random_csv_object(1,10)
csv_obj_name = get_random_string()
bucket_name = "test"
upload_csv_object(bucket_name,csv_obj_name,csv_obj)
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select lower("AB12cd$$") from stdin ;') ).replace("\n","")
nose.tools.assert_equal( res_s3select, "ab12cd$$,")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select upper("ab12CD$$") from stdin ;') ).replace("\n","")
nose.tools.assert_equal( res_s3select, "AB12CD$$,")
@attr('s3select')
def test_in_expressions():
# purpose of test: engine is process correctly several projections containing aggregation-functions
csv_obj = create_random_csv_object(10000,10)
csv_obj_name = get_random_string()
bucket_name = "test"
upload_csv_object(bucket_name,csv_obj_name,csv_obj)
res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where int(_1) in(1);')).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where int(_1) == 1;')).replace("\n","")
nose.tools.assert_equal( res_s3select_in, res_s3select )
res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where int(_1) in(1,0);')).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where int(_1) == 1 or int(_1) == 0;')).replace("\n","")
nose.tools.assert_equal( res_s3select_in, res_s3select )
res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_2) from stdin where int(_2) in(1,0,2);')).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_2) from stdin where int(_2) == 1 or int(_2) == 0 or int(_2) == 2;')).replace("\n","")
nose.tools.assert_equal( res_s3select_in, res_s3select )
res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_2) from stdin where int(_2)*2 in(int(_3)*2,int(_4)*3,int(_5)*5);')).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_2) from stdin where int(_2)*2 == int(_3)*2 or int(_2)*2 == int(_4)*3 or int(_2)*2 == int(_5)*5;')).replace("\n","")
nose.tools.assert_equal( res_s3select_in, res_s3select )
res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where characterlength(_1) == 2 and substr(_1,2,1) in ("3");')).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where _1 like "_3";')).replace("\n","")
nose.tools.assert_equal( res_s3select_in, res_s3select )
@attr('s3select')
def test_like_expressions():
csv_obj = create_random_csv_object_string(10000,10)
csv_obj_name = get_random_string()
bucket_name = "test"
upload_csv_object(bucket_name,csv_obj_name,csv_obj)
res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _1 like "%aeio%";')).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substr(_1,11,4) == "aeio" ;')).replace("\n","")
nose.tools.assert_equal( res_s3select_in, res_s3select )
res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _1 like "cbcd%";')).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substr(_1,1,4) == "cbcd";')).replace("\n","")
nose.tools.assert_equal( res_s3select_in, res_s3select )
res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _3 like "%y[y-z]";')).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substr(_3,charlength(_3),1) between "y" and "z" and substr(_3,charlength(_3)-1,1) == "y";')).replace("\n","")
nose.tools.assert_equal( res_s3select_in, res_s3select )
res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _2 like "%yz";')).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substr(_2,charlength(_2),1) == "z" and substr(_2,charlength(_2)-1,1) == "y";')).replace("\n","")
nose.tools.assert_equal( res_s3select_in, res_s3select )
res_s3select_in = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _3 like "c%z";')).replace("\n","")
res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substr(_3,charlength(_3),1) == | |
import os
import shutil
import time
import pandas as pd
from os import path as P
import sys
from cooka.dao.dao import DatasetDao, ModelDao
from cooka.dao import db
from cooka.dao.entity import DatasetEntity, MessageEntity
from cooka.common import util, consts
from cooka.common.exceptions import EntityNotExistsException, IllegalParamException
from cooka.common.log import log_web as logger
from cooka.common.model import AnalyzeJobConf, AnalyzeStep, JobStep, SampleConf, LocaleInfo, RespPreviewDataset, \
DatasetStats, FeatureValueCount, FeatureType, FeatureCorrelation
class DatasetService:
dataset_dao = DatasetDao()
model_dao = ModelDao()
def add_analyze_process_step(self, dataset_name, analyze_job_name, step: JobStep):
step_type = step.type
with db.open_session() as s:
# 1.1. check dataset exists
d = s.query(DatasetEntity).filter(DatasetEntity.name == dataset_name).first()
if d is None:
raise EntityNotExistsException(DatasetEntity, dataset_name)
# 1.2. check event type, one type one record
messages = s.query(MessageEntity).filter(MessageEntity.author == analyze_job_name).all()
for m in messages:
if step_type == util.loads(m.content).get('type'):
raise Exception(f"Event type = {step_type} already exists .")
# 2. handle event
with db.open_session() as s:
# 2.1. create a new message
content = util.dumps(step.to_dict())
message = MessageEntity(id=util.short_uuid(), author=analyze_job_name, content=content, create_datetime=util.get_now_datetime())
s.add(message)
# 2.2. handle analyze event
if step_type == AnalyzeStep.Types.Analyzed:
# update temporary dataset
# todo handle failed analyze
if step.status == JobStep.Status.Succeed:
hints = step.extension.pop("hints")
d_stats = DatasetStats.load_dict(step.extension)
features_str = [f.to_dict() for f in d_stats.features]
update_fields = \
{
"has_header": d_stats.has_header,
"extension": step.extension,
"n_cols": d_stats.n_cols,
"n_rows": d_stats.n_rows,
"features": features_str,
"hints": hints,
"feature_summary": d_stats.feature_summary.to_dict(),
"status": DatasetEntity.Status.Analyzed
}
else:
update_fields = {
"status": DatasetEntity.Status.Failed
}
self.dataset_dao.update_by_name(s, dataset_name, update_fields)
elif step_type == AnalyzeStep.Types.PatchCorrelation:
# 1. check dataset status, only analyzed can calc relativity
dataset = self.dataset_dao.require_by_name(s, dataset_name)
if dataset.status != AnalyzeStep.Types.Analyzed:
raise ValueError(f"Dataset {dataset_name} status is not {AnalyzeStep.Types.Analyzed} .")
request_label_col = step.extension.get("label_col")
if request_label_col != dataset.label_col:
raise ValueError(f"Dataset {dataset_name} label col is {dataset.label_col} but received result is for {request_label_col}")
# 2. read extension
corr_dict = step.extension.get('corr')
# 3. load & update features
features = dataset.to_dataset_stats().features
for f in features:
correlation = corr_dict.get(f.name)
f.correlation = FeatureCorrelation(value=correlation, status=FeatureCorrelation.calc_status(correlation, request_label_col==f.name))
# 4. sort features by abs correlation
features = sorted(features, key=lambda f: abs(f.correlation.value), reverse=True)
feature_dict_list = []
for f in features:
feature_dict_list.append(f.to_dict())
# 5. push back database
self.dataset_dao.update_by_name(s, dataset_name, {"features": feature_dict_list})
def brevity_dataset_pagination(self, req_dict):
# 1. read param
page_num = util.require_in_dict(req_dict, 'page_num', int, default=1)
page_size = util.require_in_dict(req_dict, 'page_size', int, default=10)
query = util.get_from_dict(req_dict, 'query', str)
order_by = util.require_in_dict(req_dict, 'order_by', str, default="create_datetime")
order = util.require_in_dict(req_dict, 'order', str, default="desc")
allow_order_by_fields = ["create_datetime", "n_experiments", "size", "n_rows", "n_cols"]
if order_by not in allow_order_by_fields:
raise ValueError(f"Order by field should in {','.join(allow_order_by_fields)}, but input is: {order_by}")
allow_order_strategies = ["desc", "asc"]
if order not in allow_order_strategies:
raise ValueError(f"order strategy should in {','.join(allow_order_strategies)}, but input is: {order}")
if page_num < 1:
raise ValueError("Param page_num should > 1 .")
if page_size < 0:
raise ValueError("Param page_size should > 0 .")
def _handle(model_dao, session, dataset: DatasetEntity):
d = util.sqlalchemy_obj_to_dict(dataset)
d['file_path'] = util.relative_path(dataset.file_path)
d['create_datetime'] = util.to_timestamp(dataset.create_datetime)
d['n_experiments'] = model_dao.query_n_experiment(session, dataset.name)
del d['features']
del d['feature_summary']
del d['extension']
return d
# 2. query
with db.open_session() as s:
datasets, total = self.dataset_dao.pagination(s, page_num, page_size, query, order_by, order)
datasets = [_handle(self.model_dao, s, d) for d in datasets]
return datasets, total
def create_dataset(self, dataset_name, temporary_dataset_name):
with db.open_session() as s:
# 1. check temporary dataset
temporary_dataset = s.query(DatasetEntity).filter(DatasetEntity.name == temporary_dataset_name).first()
if temporary_dataset is None:
raise EntityNotExistsException(DatasetEntity, temporary_dataset_name)
if temporary_dataset.status != DatasetEntity.Status.Analyzed:
raise IllegalParamException('dataset_name', temporary_dataset_name, f'Dataset is not ready, status is {temporary_dataset.status}')
# 2. check dataset name
new_dataset_dir = P.join(consts.PATH_DATASET, dataset_name)
if P.exists(new_dataset_dir):
raise IllegalParamException('dataset_name', dataset_name, f'File path {new_dataset_dir} of dataset already exits')
# 3. Read temporary dataset
# temporary_dataset_dict = util.loads(temporary_dataset.extension)
# 4. make dataset dir, can not rollback, but should enough robust below
new_dataset_dir = P.join(consts.PATH_DATASET, dataset_name)
os.makedirs(new_dataset_dir, exist_ok=False)
# 5. move file
file_path = temporary_dataset.get_abs_file_path()
new_dataset_file_path = P.join(new_dataset_dir, f'data{util.get_file_suffix(file_path)}')
shutil.copy(file_path, new_dataset_file_path)
# 6. create meta.json
# temporary_dataset_dict['name'] = dataset_name
# temporary_dataset_dict['create_datetime'] = util.get_now_long()
# with open(P.join(new_dataset_dir, 'meta.json'), 'w') as f:
# f.write(util.dumps(temporary_dataset_dict))
properties = {"is_temporary": False,
"name": dataset_name,
"file_path": new_dataset_file_path}
# 7. change status
affect_rows = s.query(DatasetEntity).filter(DatasetEntity.name == temporary_dataset_name, DatasetEntity.is_temporary == True).update(properties)
if affect_rows != 1:
raise Exception("Update dataset failed.")
def retrieve(self, dataset_name, n_top_value):
with db.open_session() as s:
dataset = self.dataset_dao.require_by_name(s, dataset_name)
dict_value = util.sqlalchemy_obj_to_dict(dataset)
dict_value['file_path'] = util.relative_path(dataset.file_path)
if dataset.status == DatasetEntity.Status.Analyzed:
for i, f in enumerate(dict_value['features']):
if f['type'] in [FeatureType.Categorical, FeatureType.Continuous]:
if f['unique']['value'] > n_top_value:
# calc top {n_count_value}
extension = f['extension']
sorted(extension['value_count'], key=lambda _: _['value'])
top_value_count = extension['value_count'][: n_top_value]
remain_value_count = extension['value_count'][n_top_value:]
remain_count = 0
for remain_dict in remain_value_count:
remain_count = remain_count + remain_dict['value']
top_value_count.append(
FeatureValueCount(type="Remained_SUM", value=remain_count).to_dict()
)
dict_value['features'][i]['extension']['value_count'] = top_value_count
# extension['value_count'] = top_value_count
# dict_value['detail'] = dict_value['extension']
extension = dict_value.pop('extension')
dict_value['extension'] = {"sample_conf": extension['sample_conf']}
return dict_value
def delete(self, dataset_name):
with db.open_session() as s:
# 1. delete record
dataset = self.dataset_dao.require_by_name(s, dataset_name)
is_temporary = dataset.is_temporary
self.dataset_dao.delete(s, dataset_name)
# 2. delete file only not temporary
if is_temporary is False:
if "/" not in dataset_name and len(dataset_name) > 1:
if len(consts.PATH_DATASET) > 1:
dataset_dir = P.join(consts.PATH_DATASET, dataset_name)
if P.exists(dataset_dir) and P.isdir(dataset_dir):
logger.info(f"Remove file at: {dataset_dir}")
shutil.rmtree(dataset_dir)
else:
raise ValueError(f"dataset dir {dataset_dir} is not dir or not exists, may be a bug here.")
else:
raise ValueError("Data dir too short, can not delete. ")
else:
raise ValueError("dataset name contains '/' or length too short.")
def create_temporary_dataset(self, req_dict):
sample_strategy = util.require_in_dict(req_dict, 'sample_strategy', str, 'random_rows')
if SampleConf.Strategy.Percentage == sample_strategy:
percentage = util.get_from_dict(req_dict, 'percentage', int, 30)
n_rows = None
elif SampleConf.Strategy.RandomRows == sample_strategy:
n_rows = util.get_from_dict(req_dict, 'n_rows', int, 1000)
percentage = None
elif SampleConf.Strategy.WholeData == sample_strategy:
n_rows = None
percentage = None
else:
raise ValueError(f"Not support sample strategy: {sample_strategy}")
upload_took = util.require_in_dict(req_dict, 'upload_took', float)
file_path = util.require_in_dict(req_dict, 'file_path', str)
source_type = util.require_in_dict(req_dict, 'source_type', str)
sample_conf = SampleConf(sample_strategy=sample_strategy, percentage=percentage, n_rows=n_rows)
# 1. validate param
if source_type not in [DatasetEntity.SourceType.Upload, DatasetEntity.SourceType.Import]:
raise IllegalParamException('source_type', source_type, f'Should in {",".join([DatasetEntity.SourceType.Upload, DatasetEntity.SourceType.Import])}')
if source_type == DatasetEntity.SourceType.Upload:
upload_file_prefix = P.join(consts.FIELD_TMP, consts.FIELD_UPLOAD)
if not file_path.startswith(upload_file_prefix):
raise ValueError(f"For upload file should path should start with {upload_file_prefix} but it's {file_path}")
else:
# fix relative path
file_path = P.join(consts.DATA_DIR, file_path)
if not P.exists(file_path):
raise ValueError(f"File={file_path} not exists")
if not P.isfile(file_path):
raise ValueError(f"File={file_path} is not a file")
util.validate_sample_conf(sample_conf)
# 2. create
if source_type == DatasetEntity.SourceType.Upload:
return self._create_temporary_dataset(source_type, file_path, upload_took, sample_conf)
elif source_type == DatasetEntity.SourceType.Import:
t1 = time.time()
internal_path = util.temporary_upload_file_path(P.basename(file_path))
os.makedirs(P.dirname(internal_path), exist_ok=True)
shutil.copy(file_path, internal_path)
took = time.time() - t1
logger.info(f"Copy file to {internal_path}")
return self._create_temporary_dataset(source_type, internal_path, took, sample_conf)
else:
raise IllegalParamException('source_type', source_type, f'should one of {",".join([DatasetEntity.SourceType.Upload, DatasetEntity.SourceType.Import])}')
def is_dataset_exists(self, dataset_name):
with db.open_session() as s:
d = self.dataset_dao.find_by_name(s, dataset_name)
exists_in_db = d is not None
exists_file = P.exists(P.join(consts.PATH_DATABASE, dataset_name))
return exists_in_db or exists_file
def choose_temporary_dataset_name(self, file_name):
# 1. cut suffix
file_name = util.cut_suffix(file_name)
# 2. try 1000 times use num name
for i in range(1000):
if i == 0:
candidate_name = file_name
else:
candidate_name = f"{file_name}_{i}"
if not self.is_dataset_exists(candidate_name):
return candidate_name
# 3. if all num name used, try datetime name
return f'{file_name}_{util.human_datetime()}'
@staticmethod
def replace_None(s):
if s is None:
return ""
else:
return s
def _create_temporary_dataset(self, source_type, file_path, took, sample_conf: SampleConf):
now = util.get_now_datetime()
file_name = P.basename(file_path)
temporary_dataset_name = self.choose_temporary_dataset_name(file_name) # use a long name
analyze_job_name = util.analyze_data_job_name(util.cut_suffix(file_name), now)
file_size = P.getsize(file_path)
# 2. create record
td = DatasetEntity(name=temporary_dataset_name,
file_size=file_size,
is_temporary=True,
status=DatasetEntity.Status.Created,
source_type=source_type,
file_path=file_path,
file_name=file_name,
create_datetime=now, last_update_datetime=now)
with db.open_session() as s:
s.add(td)
# 3. send file transfer step
if source_type == DatasetEntity.SourceType.Upload:
step = JobStep(type=AnalyzeStep.Types.Upload,
status=AnalyzeStep.Status.Succeed,
extension={"file_size": file_size, "file_path": file_path},
took=took, datetime=util.get_now_long())
self.add_analyze_process_step(temporary_dataset_name, analyze_job_name, step)
elif source_type == DatasetEntity.SourceType.Import:
step = JobStep(type=AnalyzeStep.Types.Copy,
status=AnalyzeStep.Status.Succeed,
extension={"file_size": file_size,
"file_path": file_path},
took=took,
datetime=util.get_now_long())
self.add_analyze_process_step(temporary_dataset_name, analyze_job_name, step)
# 4. create analyze config
conf = AnalyzeJobConf(job_name=analyze_job_name,
dataset_name=temporary_dataset_name,
sample_conf=sample_conf,
path=file_path,
temporary_dataset=True,
label_col=None)
# 5. start new process
analyze_config_string = util.dumps(conf.to_dict())
logger.info(f"Analyze job conf: {analyze_config_string}")
python_executable = sys.executable
temporary_dataset_dir = util.temporary_dataset_dir(temporary_dataset_name)
os.makedirs(temporary_dataset_dir, exist_ok=True)
std_log = P.join(temporary_dataset_dir, f"{analyze_job_name}.log")
command = f"nohup {python_executable} {util.script_path('analyze_job.py')} --file_path={file_path} --job_name={analyze_job_name} --dataset_name={temporary_dataset_name} --sample_strategy={sample_conf.sample_strategy} --n_rows={self.replace_None(sample_conf.n_rows)} --percentage={self.replace_None(sample_conf.percentage)} --server_portal={consts.SERVER_PORTAL} 1>{std_log} 2>&1 &"
logger.info(f"Run analyze job command: \n{command}")
logger.info(f"Log file:\ntail -f {std_log}")
# JobManager.instance().run_job(job)
os.system(command) # ha ha ha
return temporary_dataset_name, analyze_job_name
def preview(self, dataset_name: str, page_num: int, page_size: int) -> RespPreviewDataset:
"""
Args:
dataset_name:
page_num: start from 1
page_size:
Returns:
"""
# 1. validation params
if page_num < 1:
raise ValueError("Param page_num should >= 1'")
if page_size < 1:
raise ValueError("Param page_size should >= | |
puts it into V[X]
self.V[0xF] = getLSB(self.V[b1]) # Getting the LSB thanks to our great function above
self.V[b2] = (self.V[b1] >> 1) # Dividing by 2 with binary values is the same as shifting 1 bit to the right. That is why we get the LSB beforehand, because we want to know what it was before we deleted it
elif(result == 0x0007): # 8XY7 : V[X] = V[Y] - V[X]. Same as 8XY5, but VX and VY are reverted in the operation
self.V[0xF] = 1
if(self.V[b1] < self.V[b2]):
self.V[0xF] = 0
self.V[b2] = self.V[b1] - self.V[b2]
elif(result == 0x000E): # 8XYE : sets V[0xF] to the MSB of V[Y], multiplies V[Y] by 2 and puts it into V[X]
self.V[0xF] = getMSB(self.V[b1], 8) # Getting the MSB thanks to our great function above. 8 represents the max length of our binary value.
self.V[b2] = (self.V[b1] << 1) # Multiplying by 2 with binary values is like shifting 1 bit to the left. Same as for the division, we want to know what was the deleted bit
elif(result == 0x9000): # 9xxx
if((opcode & 0x000F) == 0x0000): # 9XY0 : skip next inscrution if V[X] != V[Y]
if(self.V[b2] != self.V[b1]): # It works the same as those "skip instruction if" opcodes we did before
self.PC += 2
elif(result == 0xA000): # ANNN : sets I = NNN
self.I = (b2<<8) + (b1<<4) + b0 # Always remember to shift our different N
elif(result == 0xB000): # BNNN : jumps to location NNN + V[0]
self.PC = (b2<<8) + (b1<<4) + b0 + self.V[0x0] # We set the PC to NNN + V[0]
self.PC -= 2 # Like our other jumps, we need to decrement the PC in order to reach the instruction we want
elif(result == 0xC000): # CXKK : sets V[X] = (random byte) AND KK
self.V[b2] = randint(0, 255) & ((b1 << 4) + b0) # Our random byte is just a value between 0 and 255, and we do a bitwise AND with KK. Beware of the bit shifting
elif(result == 0xD000): # DXYN : displays a sprite of length N at coordinates (V[X], V[Y]) from the memory starting at I. Quite tricky
self.DXYN(b2, b1, b0) # We just give X, Y and N to another function detailed later
elif(result == 0xE000): # EXxx
result = opcode & 0x000F
if(result == 0x000E): # EX9E : skips next instruction if the key of value V[X] is pressed
if self.key[self.V[b2]] == 1: # Same as always, we do the if and then we increase the PC
self.PC += 2
elif(result == 0x0001): # EXA1 : skips next instruction if the key of value V[X] isn't pressed
if self.key[self.V[b2]] == 0: # Same as before
self.PC += 2
elif(result == 0xF000): # FXxx
# Here opcodes aren't in numerical order because I'm a jackass. Sorry
result = opcode & 0x000F
if(result == 0x0003): # FX33 : stores BCD representation of V[X] in memory from the adress I
# BCD representation is a way to easily get decimal digits from a binary number.
# It consists in separating each digit of a decimal number and storing them independantly. Useful to store scores for instance.
# Ex.: 231 will get you 2, 3 and 1.
# Here we get those 3 digits (because V[X] is an 8 bits number which goes to 255) and store them in the memory at adress I for the first one, I+1 for the second and I+2 for the last one.
# So : 2 -> memory[I]
# 3 -> memory[I+1]
# 1 -> memory[I+2]
self.memory[self.I] = self.V[b2] // 100 # // It is an euclidian division, without the rest. So by doing this with 231, we get 2 which is what we want
self.memory[self.I+1] = (self.V[b2] - (self.memory[self.I] * 100)) // 10 # To get the second digit, we substract from the original number the hundreds we just got (2*100) so we have 231-200=31. We then do 31//10 and we get 3
self.memory[self.I+2] = self.V[b2] - self.memory[self.I]*100 - self.memory[self.I+1]*10 # For the units, we just substract the hundreds and tens. 231 - (2*100) - (3*10) = 1
elif(result == 0x0005): # FXx5
result = opcode & 0x00F0
if(result == 0x0010): # FX15 : sets the delay timer to the value of V[X]
self.delay_timer = self.V[b2]
elif(result == 0x0050): # FX55 : copies the values from V[0] to V[X] (included) in the memory, starting from the adress I and I must be set after its last adress modified
for i in range(0,b2+1): # For each value from 0 to X included
self.memory[self.I+i] = self.V[i] # Sets V[i] into memory[I+i]
self.I += b2 + 1 # Sets I to I + X + 1 so we are the next cell than the last we modified
elif(result == 0x0060): # FX65 : copies the values starting from the adress I into V[0] to V[X], and sets I to after the latest value retrieved
for i in range(0, b2+1): # The for loop works the same as before, going from 0 to X included
self.V[i] = self.memory[self.I+i] # The assignation is just reversed
self.I += b2 + 1 # Sets I to I + X + 1
elif(result == 0x0007): # FX07 : V[X] = delay timer
self.V[b2] = self.delay_timer
elif(result == 0x0008): # FX18 : sound timer = V[X]
self.sound_timer = self.V[b2]
elif(result == 0x0009): # FX29 : sets I = location of the character sprite corresponding to the value of V[X]
# Because we put our sprites from the adress 0, we just have to do I = V[X]*5.
# If we want the sprite for 0, 0*5 = adress 0
# But if we want the sprite for 8, 8*5 = adress 40
# If we didn't put the sprites at 0x00, we would just have added to I the starting adress of the sprites. But by putting them at 0x00 we don't even have to do that
self.I = self.V[b2]*5
elif(result == 0x000A): # FX0A : waits for a key press, and stores the pressed key in V[X]. It stops everything, so we will use an infinite loop.
# The side effect is that all the Python script is locked until we press the key.
# I'm pretty sure we could avoid the infinite loop here and keeping the rest of the code by making the PC goes a step before at the end of the instruction if no key is pressed
# This way if no key is pressed, the script still listens to event (to quit, etc.) but the next instruction is still this one
# And if we press a key, we don't decrement the PC and all goes on
isKeyPressed = False # Boolean to get out of the loop (redundant with the break)
while not isKeyPressed: # While no key is pressed we stay inside the loop
for idx, val in enumerate(self.key): # For each available key
if(val == 1): # If the current one is pressed
isKeyPressed = True # We set the boolean to True
self.V[b2] = idx # Store the value of the key in V[X]
break # Break out of the loop
elif(result == 0x000E): # FX1E : sets I = I + V[X]
self.I += self.V[b2]
self.PC += 2 # After we executed our instruction, we go to the next step with the PC, which is 2 memory cells later because we have 16 bits opcodes and a cell is only 8 bits
########################################################################################################
def DXYN(self, b2, b1, b0): # Function called by the instruction DXYN
self.V[0xF] = 0 # V[0xF] stores 0 by default, and 1 if during the instruction we erase a pixel (setting it to black, or 0)
for i in range(b0): # For each line of our sprite
line = self.memory[self.I+i] # We are reading the sprite starting from the I adress and increasing it for each line
currentY = self.V[b1] + i # We set the initial Y axis position to the one given by V[Y] and we add to it the i counter to get the current line position
if currentY < len(self.screen.pixels[0]): # If we are in available height of the screen
for j in range(8): # A sprite is 8 bits in | |
fontSize = self._viewFontSize)
if self.noteReversed:
text(chr(int('25B2', 16)), (xN + 8, Ycontrol + txtup))
else:
text(chr(int('25BC', 16)), (xN + 8, Ycontrol + txtup))
else:
fillRGB(colorBKG)
rect(xN, Ycontrol+1, wNote, self._lineSize)
fillRGB(COLOR_GREY_50)
font(self._viewFontName, fontSize = self._viewFontSize)
if self.noteReversed:
text(chr(int('25B2', 16)), (xN + 8, Ycontrol + txtup))
else:
text(chr(int('25BC', 16)), (xN + 8, Ycontrol + txtup))
fillRGB(colorTXT)
rtxt = self.noteTitle
_w, _h = textSize(rtxt)
if _w + 17 > wNote:
font(self._viewFontName, fontSize = self._viewFontSize - 2)
text(rtxt, (xN + 5, Ycontrol + txtsm+ titleY))
else:
font(self._viewFontName, fontSize = self._viewFontSize)
text(rtxt, (xN + 5, Ycontrol + txtup+ titleY))
restore()
idModeSelected = False
idModeShowAll = True
idFilterSide1 = 'side1'
idFilterBoth = 'both'
idFilterSide2 = 'side2'
class TDKernListView(VanillaBaseObject):
nsViewClass = NSView
def __init__ (self, posSize, selectionCallback=None, window=None, commandCallback = None, previewGlyph = False):
xw, yw, tx, ty = posSize
self._window = window
self._linesToDisplay = []
# self._font = None
self.font = None
self.hashKernDic = None
self.kernDB = None
self._viewArray = []
self._selectedLines = []
self._pairsList2idx = {}
self._currentKernListState = {}
self._setToView = []
self._grouppedList = []
self._idxListGroupped = {}
self._ungrouppedList = []
self._idxListUngroupped = {}
self._listKeyGlyphsLeft = {}
self._listKeyGlyphsRight = {}
self._errorpairslist = []
self._lastSelectedIdx = 0
self._positionYselected = 0
self._lineSize = 20 # 1800 # 1800=1000upm; 2400=2000upm
self._previewGlyph = previewGlyph
self._previewWidthHalf = 40
if previewGlyph:
self._lineSize = 45
self._scalefactorUI = 1
self._lineCount = 0
self._sortName = None
self._sortReverse = None
self.groupsSortedTop = False
self._viewingMode = idModeSelected
self._filterMode = idFilterBoth
self.darkmode = KERNTOOL_UI_DARKMODE
self.darkmodeWarm = KERNTOOL_UI_DARKMODE_WARMBACKGROUND
self.maxX = 0
self._selectionCallback = selectionCallback
self._commandCallback = commandCallback
self.showselection = False
self._setupView(self.nsViewClass, (xw, yw, tx, ty)) # (0, 0, -0, 106)
self.macos = MACOS_VERSION
self.canvas = Canvas((0, 0, -0, -0),
delegate = self, # canvasSize = (100, 101),
hasHorizontalScroller = False,
hasVerticalScroller = True,
autohidesScrollers = True,
# backgroundColor = NSColor.whiteColor(),
drawsBackground = False,
# acceptsMouseMoved = True
)
self.canvas.scrollView.getNSScrollView().setBorderType_(NSNoBorder)
def updatePanel(self):
self.canvas.update()
def getCorrectPreviwWidth(self):
# g = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
# list(filter(lambda n: n.name in g, self.font))
s = sorted(self.font, key = lambda w: w.width)
wp = s[-1].width * .030
self._previewWidthHalf = wp + 10
if self._previewWidthHalf > 75:
self._previewWidthHalf = 75
def setFont(self, font):
self.font = font
self.hashKernDic = TDHashKernDic(font)
self.kernDB = TDKernDB(self.font, self.hashKernDic)
# self.kernDB.makeSortedList()
self.getCorrectPreviwWidth()
self.refreshView()
def updateStatusbar(self):
self._window.menuStatusBar.setLabelValue(self._window.labelTotalPairsID, str(len(self.font.kerning)))
self._window.menuStatusBar.setLabelValue(self._window.labelShowedPairsID, str(len(self._viewArray)))
self._window.menuStatusBar.setLabelValue(self._window.labelSelectedPairsID, str(len(self._selectedLines)))
def setPreviewMode(self, previewMode = False):
self._previewGlyph = previewMode
self._lineSize = 20 # 1800 # 1800=1000upm; 2400=2000upm
if self._previewGlyph:
self._lineSize = 45
self.refreshView()
self.scrollToLine(0)
def resetView(self):
self.compileLines(self.kernDB.db, sorting = self._sortName, reverse = self._sortReverse)
self.scrollToLine(0)
def setViewingMode(self, mode = idModeShowAll, sorting = None, reverse = False, filterMode = idFilterBoth):
self._viewingMode = mode
self._filterMode = filterMode
if sorting:
self._sortName = sorting
if reverse:
self._sortReverse = reverse
else:
if reverse != self._sortReverse:
self._sortReverse = reverse
if self._viewingMode == idModeSelected:
self.setGlyphsToView(self.font.selection, filterMode = filterMode)
else:
self.compileLines(self.kernDB.db, sorting = self._sortName, reverse = self._sortReverse)
self.scrollToLine(0)
def refreshView(self, fullrefresh = True):
if self._viewingMode == idModeSelected:
self.setGlyphsToView(self.font.selection, filterMode = self._filterMode)
else:
if fullrefresh:
self.compileLines(self.kernDB.db, sorting = self._sortName, reverse = self._sortReverse)
def scrollToLine (self, linenumber):
if not self._viewArray: return
visibleWidth = self.canvas.scrollView.getNSScrollView().documentVisibleRect().size.width
visibleHeight = self.canvas.scrollView.getNSScrollView().documentVisibleRect().size.height
posXscroller = self.canvas.scrollView.getNSScrollView().documentVisibleRect().origin.x
posYscroller = self.canvas.scrollView.getNSScrollView().documentVisibleRect().origin.y
scale = self._scalefactorUI
xpos = 0
ypos = 0
if linenumber < 0 or linenumber > len(self._viewArray): return
self._selectedLine = linenumber
self._positionYselected = self._viewArray[linenumber][idY0]
firstItemInLine = linenumber
maxY = self._viewArray[-1][idY0]
y0 = (maxY + (-1 * self._positionYselected)) * scale
y1 = y0 + (self._lineSize * scale)
if y0 < posYscroller:
ypos = y0
elif y1 - posYscroller > visibleHeight:
offset = visibleHeight - self._lineSize * scale
ypos = y0 - offset # + posYscroller
else:
return firstItemInLine
point = NSPoint(xpos, ypos)
self.canvas.scrollView.getNSScrollView().contentView().scrollToPoint_(point)
self.canvas.scrollView.getNSScrollView().reflectScrolledClipView_(
self.canvas.scrollView.getNSScrollView().contentView())
return firstItemInLine
def mouseDown (self, event):
visibleHeight = self.canvas.scrollView.getNSScrollView().documentVisibleRect().size.height
Y_local_pos = self.canvas.scrollView.getNSScrollView().documentVisibleRect().origin.y
X_window_pos = event.locationInWindow().x
Y_window_pos = event.locationInWindow().y
X_local_pos = self.canvas.scrollView.getNSScrollView().documentVisibleRect().origin.x
Y_local_pos = self.canvas.scrollView.getNSScrollView().documentVisibleRect().origin.y
xW, yW, x2W, y2W = self.getPosSize()
x = X_window_pos + X_local_pos # - self._letterStep
y = Y_window_pos + y2W + Y_local_pos
self.showselection = True
maxY = 0
if self._viewArray:
maxY = self._viewArray[-1][idY0]
yoff = ((self._lineCount - 1) * self._lineSize) # + Ygap
if yoff < visibleHeight:
yoff = visibleHeight - self._lineSize * self._lineCount
else:
yoff = 0
for idx, item in enumerate(self._viewArray):
y0 = maxY + (-1 * item[idY0]) + yoff
y1 = y0 + self._lineSize
if (y0 < y and y < y1):
if decodeModifiers(event.modifierFlags()) == 'Cmd':
if idx in self._selectedLines:
self._selectedLines.remove( idx )
else:
self._selectedLines.append( idx )
self._lastSelectedIdx = idx
elif decodeModifiers(event.modifierFlags()) == 'Shift':
if idx > self._lastSelectedIdx:
for i in range(self._lastSelectedIdx, idx+1):
if i not in self._selectedLines:
self._selectedLines.append( i )
self._lastSelectedIdx = i
elif idx < self._lastSelectedIdx:
# print ('revers')
for i in range(idx, self._lastSelectedIdx):
if i not in self._selectedLines:
self._selectedLines.append( i )
self._lastSelectedIdx = i
# print ('shift last', self._lastSelectedIdx)
else:
self._selectedLines = []
self._selectedLines.append( idx )
self._lastSelectedIdx = idx
self.canvas.update()
self.updateStatusbar()
if self._selectionCallback:
self._selectionCallback(self._selectedLines)
break
def keyDown (self, event):
keypress = decodeCanvasKeys(event.keyCode(), event.modifierFlags())
commands = translateKeyCodesToKernToolCommands(keypress)
# if commands['command'] == COMMAND_ENTER:
# self.showselection = True
# self.menuSelectedCallback()
# self.canvas.update()
if commands['command'] == COMMAND_ESCAPE:
self._selectedLines = []
self.canvas.update()
if commands['command'] == COMMAND_SELECT_ALL:
self._selectedLines = []
for i, item in enumerate(self._viewArray):
self._selectedLines.append(i)
self.updateStatusbar()
self.canvas.update()
if commands['command'] == COMMAND_DELETE_PAIR:
# self.showselection = True
if self._commandCallback:
self._commandCallback({'command': COMMAND_DELETE_PAIR})
if commands['command'] == COMMAND_SPACEKEY:
self.prepare4sendSelectedPairsToKernTool()
if commands['command'] == COMMAND_NEXT_LINE_SHORT:
n = self._lastSelectedIdx + 1
if n > len(self._viewArray)-1:
n = 0
self.scrollToLine(n)
item = self._viewArray[n]
# if decodeModifiers(event.modifierFlags()) == 'Shift':
# if item['idx'] in self._selectedLines:
# self._selectedLines.remove(item['idx'])
# else:
# self._selectedLines.append(item['idx'])
# self._lastSelectedIdx = item['idx']
# else:
self._selectedLines = []
self._selectedLines.append(n)
self._lastSelectedIdx = n
self.canvas.update()
if commands['command'] == COMMAND_PREV_LINE_SHORT:
n = self._lastSelectedIdx - 1
if n == -1:
n = len(self._viewArray)-1
self.scrollToLine(n)
item = self._viewArray[n]
# if decodeModifiers(event.modifierFlags()) == 'Shift':
# if item['idx'] in self._selectedLines:
# self._selectedLines.remove(item['idx'])
# else:
# self._selectedLines.append(item['idx'])
# self._lastSelectedIdx = item['idx']
# else:
self._selectedLines = []
self._selectedLines.append(n)
self._lastSelectedIdx = n
self.canvas.update()
self.updateStatusbar()
def refreshKernPair(self, pair):
self.kernDB.updateKernPair(pair)
# save selected pairs
pairsselected = []
for i in self._selectedLines:
item = self._viewArray[i]
pairsselected.append((item[idNameL], item[idNameR]))
self._selectedLines = []
self.refreshView()
# restore selected pairs, except deleted
for pair in pairsselected:
if pair in self._pairsList2idx:
self._selectedLines.append(self._pairsList2idx[pair])
self.canvas.update()
def setGlyphsToView(self, glyphlist, filterMode = idFilterBoth):
self._currentKernListState = {}
listL = []
listR = []
if not glyphlist: return
self._setToView = []
# self._setToView = list(glyphlist)
for glyphname in glyphlist:
if filterMode == idFilterSide1 or filterMode == idFilterBoth:
self._setToView.append(glyphname)
self._setToView.append(self.hashKernDic.getGroupNameByGlyph(glyphname, side = 'L'))
if filterMode == idFilterSide2 or filterMode == idFilterBoth:
self._setToView.append(glyphname)
self._setToView.append(self.hashKernDic.getGroupNameByGlyph(glyphname, side = 'R'))
for pair, item in self.kernDB.db.items():
(l,r) = pair
# if l in self._setToView or r in self._setToView:
if l in self._setToView and (filterMode == idFilterSide1 or filterMode == idFilterBoth):
self._currentKernListState[(l, r)] = item
if r in self._setToView and (filterMode == idFilterSide2 or filterMode == idFilterBoth):
self._currentKernListState[(l, r)] = item
if self._currentKernListState:
self.compileLines(self._currentKernListState, sorting = self._sortName, reverse = self._sortReverse)
else:
self._viewArray = []
# self.resetView()
self.canvas.update()
self.updateStatusbar()
def getPairByIndex(self, idx):
try:
item = self._viewArray[idx]
except:
print ('wrong index of pair',idx)
return None
l, r, n, kl, kr = item[idNameL], item[idNameR], item[idNote], self.hashKernDic.getKeyGlyphByGroupname(item[idNameL]), self.hashKernDic.getKeyGlyphByGroupname(item[idNameR])
if n == PAIR_INFO_EMPTY:
return None
if (l,r) in self.font.kerning:
return (l,r, kl,kr)
else:
print('pair not founded', l,r)
def getListofSelectedPairs_KeyGlyphs(self):
pairs = []
leftlist = []
rightlist = []
pairsbyglyphkey = []
if self._selectedLines:
for idx in sorted(self._selectedLines):
p = self.getPairByIndex(idx)
if p:
pairs.append(self.getPairByIndex(idx))
# print(self.getPairByIndex(idx))
for pair in pairs:
l,r, kl, kr = pair
if l.startswith(ID_KERNING_GROUP):
if l in self.font.groups and len(self.font.groups[l])>0:
l = self.font.groups[l][0]
# else:
# leftlist.append(l)
if r.startswith(ID_KERNING_GROUP):
if r in self.font.groups and len(self.font.groups[r])>0:
r = self.font.groups[r][0]
# else:
# rightlist.append(r)
if l and r:
pairsbyglyphkey.append((l,r))
return pairsbyglyphkey
def getListOfSelectedPairs(self):
pairs = []
if self._selectedLines:
for idx in sorted(self._selectedLines):
p = self.getPairByIndex(idx)
if p:
l, r, kl, kr = self.getPairByIndex(idx)
pairs.append((l,r))
return pairs
def prepare4sendSelectedPairsToKernTool(self):
if self._commandCallback:
pairs = self.getListofSelectedPairs_KeyGlyphs()
# print (pairs)
self._commandCallback({'command':COMMAND_SPACEKEY, 'pairs': pairs})
def compileLines(self, listkern = None, sorting = 'left', reverse = False):
lineStep = self._lineSize
# if not listkern: return
self._viewArray = []
self._pairsList2idx = {}
self._sortName = sorting
self._sortReverse = reverse
Ypos = 0
idx = 0
self._currentKernListState = listkern
self.kernDB.makeSortedList(self._currentKernListState, order = sorting, reverse = reverse)
# print(self.kernDB.sortedList)
for item in self.kernDB.sortedList:
l , r = item[0]
sl, sr, gl, gr, v, n, kgl, kgr = item[1]
self._viewArray.append([
Ypos, # idY0
sl, #getDisplayNameGroup(l), # idDispL
sr, #getDisplayNameGroup(r), # idDispR
v, # idKern
n, # idNote
l, # idNameL
r, # idNameR
kgl, #self.hashKernDic.getKeyGlyphByGroupname(l), # idGlyphL
kgr, #self.hashKernDic.getKeyGlyphByGroupname(r), # idGlyphR
gl, # idGroupL
gr # idGroupR
])
self._pairsList2idx[(l, r)] = idx
Ypos += lineStep
idx += 1
self._lineCount = idx
self.recalculateFrame()
self.canvas.update()
self.updateStatusbar()
def recalculateFrame (self, canvaswidth=None):
# scalefactor = 1#self._scalefactorUI
if canvaswidth:
visibleWidth = canvaswidth
else:
visibleWidth = self.canvas.scrollView.getNSScrollView().documentVisibleRect().size.width
self.visibleWidth = visibleWidth
visibleHeight = self.canvas.scrollView.getNSScrollView().documentVisibleRect().size.height
Y_local_pos = self.canvas.scrollView.getNSScrollView().documentVisibleRect().origin.y
X_local_pos = self.canvas.scrollView.getNSScrollView().documentVisibleRect().origin.x
yoff = self._lineCount * self._lineSize #* scalefactor # + self.Ygap*2
Y_min_window = Y_local_pos
Y_max_window = Y_local_pos + visibleHeight
if yoff < visibleHeight:
yoff = visibleHeight
self.canvas._view.setFrame_(NSMakeRect(0, 0, visibleWidth, yoff))
self.maxX = visibleWidth + 60
def scrollwheel (self, event):
# print (event)
#
scaleUI = self._scalefactorUI
# deltaX = event.deltaX()
deltaY = event.deltaY()
if deltaY == 0 : return
scaleScroll = 5#abs(deltaY)/10
# if abs(deltaY) < 3:
# scaleScroll = .2
# if abs(deltaY) > 3 and abs(deltaY) < 8:
# scaleScroll = .6
# if abs(deltaY) > 8 and abs(deltaY) < 15:
# scaleScroll = 1.1
# if abs(deltaY) > 30:
# scaleScroll = 10
visibleWidth = self.canvas.scrollView.getNSScrollView().documentVisibleRect().size.width
visibleHeight = self.canvas.scrollView.getNSScrollView().documentVisibleRect().size.height
posXscroller = self.canvas.scrollView.getNSScrollView().documentVisibleRect().origin.x
posYscroller = self.canvas.scrollView.getNSScrollView().documentVisibleRect().origin.y
xW, yW, wW, hW = self.getPosSize()
# xpoint = posXscroller - (deltaX * scaleScroll)
ypoint = posYscroller + (deltaY * scaleScroll)
# if xpoint > self.maxXX - visibleWidth: # - visibleWidth:
# xpoint = self.maxXX - visibleWidth # - self.visibleWidth #- visibleWidth
# if xpoint < xW:
# xpoint = 0
if ypoint < 0:
ypoint = 0
# return
maxY = 0
if self._viewArray:
maxY = (self._lineCount -1) * self._lineSize # self._viewArray[-1]['y0']
if posYscroller + visibleHeight - self._lineSize * scaleUI > maxY * scaleUI:
ypoint = maxY * scaleUI - visibleHeight + self._lineSize * scaleUI
elif posYscroller + visibleHeight - self._lineSize * scaleUI == maxY * scaleUI and deltaY > 0:
ypoint = maxY * scaleUI - visibleHeight + self._lineSize * scaleUI
point = NSPoint(0, ypoint)
self.canvas.scrollView.getNSScrollView().contentView().scrollToPoint_(point)
self.canvas.scrollView.getNSScrollView().reflectScrolledClipView_(
self.canvas.scrollView.getNSScrollView().contentView())
# time.sleep(0.09)
if self.macos == '15':
self.canvas.update()
if self.macos == '16':
self.canvas.update()
# self.canvas.update()
def draw (self):
def drawException (x, y):
s = 1.6
newPath()
moveTo((x + s * 4, y + s * 8))
lineTo((x + s * 1, y + s * 3))
lineTo((x + s * 4, y + s * 3))
lineTo((x + s * 4, y + s * 0))
lineTo((x + s * 7, y + s * 5))
lineTo((x + s * 4, y + s * 5))
closePath()
drawPath()
self.recalculateFrame()
self._viewFontName = 'Menlo'
self._viewFontSize = 12
font(self._viewFontName, | |
encapsulated" if I1iII11ii1 [ 2 ] == None else "" , IiiIIi1 , lisp_hex_string ( oOoO0O00o ) . zfill ( 4 ) ,
# iIii1I11I1II1 + i11iIiiIii / OoOoOO00
# I1ii11iIi11i % OoOoOO00 * OoOoOO00 % o0oOOo0O0Ooo * II111iiii / OoOoOO00
lisp_hex_string ( O00000OO00OO ) . zfill ( 4 ) ) )
if 73 - 73: OoOoOO00 + OOooOOo * II111iiii . OOooOOo % I1Ii111 % oO0o
if 79 - 79: I1ii11iIi11i % I11i
if 78 - 78: i11iIiiIii % I1Ii111 + iIii1I11I1II1 + iII111i
if 66 - 66: I1IiiI - o0oOOo0O0Ooo
if 67 - 67: oO0o . iII111i * Ii1I - OOooOOo / oO0o
if ( iIOOo00ooO [ 0 ] [ 0 ] != 0 or iIOOo00ooO [ - 1 ] [ 3 ] == False ) : return ( None )
oOoOOoO00Oo0 = iIOOo00ooO [ 0 ]
for oO in iIOOo00ooO [ 1 : : ] :
O00000OO00OO = oO [ 0 ]
IiiO00o0OoO00ooo , oOooii111 = oOoOOoO00Oo0 [ 0 ] , oOoOOoO00Oo0 [ 1 ]
if ( IiiO00o0OoO00ooo + oOooii111 != O00000OO00OO ) : return ( None )
oOoOOoO00Oo0 = oO
if 90 - 90: iII111i . Oo0Ooo * o0oOOo0O0Ooo % I11i . OoOoOO00
lisp_reassembly_queue . pop ( oOoO0O00o )
if 63 - 63: I1ii11iIi11i + OoOoOO00 - Ii1I + OoO0O00 - II111iiii
if 47 - 47: I1IiiI * O0 + I1ii11iIi11i - OOooOOo
if 24 - 24: i1IIi / i1IIi + I11i * II111iiii / IiII
if 8 - 8: I11i . I11i + I11i % OoooooooOO / ooOoO0o
if 25 - 25: I1IiiI / OoO0O00
packet = iIOOo00ooO [ 0 ] [ 2 ]
for oO in iIOOo00ooO [ 1 : : ] : packet += oO [ 2 ] [ 20 : : ]
if 92 - 92: oO0o % I1IiiI / OoO0O00 - I11i
dprint ( "{} fragments arrived for packet 0x{}, length {}" . format ( bold ( "All" , False ) , lisp_hex_string ( oOoO0O00o ) . zfill ( 4 ) , len ( packet ) ) )
if 36 - 36: i1IIi * iIii1I11I1II1 + I1ii11iIi11i + iII111i - II111iiii
if 48 - 48: oO0o + OoOoOO00 - OoO0O00 . II111iiii * i11iIiiIii . OoooooooOO
if 37 - 37: OoooooooOO + O0 . I11i % OoOoOO00
if 57 - 57: I1Ii111 . OOooOOo + I1Ii111 . iIii1I11I1II1 / oO0o / O0
if 88 - 88: I1Ii111
iiiIIiiIi = socket . htons ( len ( packet ) )
Ii1I1i1IiiI = packet [ 0 : 2 ] + struct . pack ( "H" , iiiIIiiIi ) + packet [ 4 : 6 ] + struct . pack ( "H" , 0 ) + packet [ 8 : 10 ] + struct . pack ( "H" , 0 ) + packet [ 12 : 20 ]
if 16 - 16: Oo0Ooo . ooOoO0o / OoO0O00 / o0oOOo0O0Ooo . OoooooooOO * OoO0O00
if 50 - 50: II111iiii + I11i . OoooooooOO . I1Ii111 - OOooOOo
Ii1I1i1IiiI = lisp_ip_checksum ( Ii1I1i1IiiI )
return ( Ii1I1i1IiiI + packet [ 20 : : ] )
if 83 - 83: oO0o
if 100 - 100: I1Ii111 + o0oOOo0O0Ooo * oO0o / oO0o . oO0o + iII111i
if 71 - 71: II111iiii + iII111i + O0 % Oo0Ooo / I1IiiI
if 52 - 52: Oo0Ooo . I1Ii111 * i1IIi / Oo0Ooo / OoO0O00
if 29 - 29: iII111i
if 91 - 91: Oo0Ooo - IiII
if 47 - 47: iII111i / OOooOOo + iII111i
if 69 - 69: I1IiiI . I1ii11iIi11i
def lisp_get_crypto_decap_lookup_key ( addr , port ) :
oo0o00OO = addr . print_address_no_iid ( ) + ":" + str ( port )
if ( lisp_crypto_keys_by_rloc_decap . has_key ( oo0o00OO ) ) : return ( oo0o00OO )
if 18 - 18: I11i * I1IiiI
oo0o00OO = addr . print_address_no_iid ( )
if ( lisp_crypto_keys_by_rloc_decap . has_key ( oo0o00OO ) ) : return ( oo0o00OO )
if 42 - 42: i1IIi . I1Ii111 - ooOoO0o + I11i / oO0o
if 60 - 60: i1IIi + OoooooooOO % i11iIiiIii / IiII % Oo0Ooo + I1IiiI
if 87 - 87: Ii1I % OoooooooOO % I1Ii111 * i11iIiiIii * OoOoOO00
if 78 - 78: I11i
if 62 - 62: iIii1I11I1II1 . o0oOOo0O0Ooo . ooOoO0o % oO0o % O0 % oO0o
for oOO0O in lisp_crypto_keys_by_rloc_decap :
OO0o = oOO0O . split ( ":" )
if ( len ( OO0o ) == 1 ) : continue
OO0o = OO0o [ 0 ] if len ( OO0o ) == 2 else ":" . join ( OO0o [ 0 : - 1 ] )
if ( OO0o == oo0o00OO ) :
oOoo0oO = lisp_crypto_keys_by_rloc_decap [ oOO0O ]
lisp_crypto_keys_by_rloc_decap [ oo0o00OO ] = oOoo0oO
return ( oo0o00OO )
if 71 - 71: I11i * I1ii11iIi11i * OOooOOo * o0oOOo0O0Ooo
if 53 - 53: I1IiiI % I1IiiI
return ( None )
if 80 - 80: OoO0O00 - i11iIiiIii / iII111i * I1ii11iIi11i / I1IiiI - I1Ii111
if 85 - 85: IiII
if 72 - 72: iII111i * OoOoOO00
if 65 - 65: iIii1I11I1II1 / iIii1I11I1II1 % O0 / II111iiii . OOooOOo . O0
if 65 - 65: I11i
if 35 - 35: o0oOOo0O0Ooo - i11iIiiIii
if 78 - 78: ooOoO0o - II111iiii - i1IIi
if 18 - 18: OoooooooOO % OoOoOO00 - IiII / oO0o . OOooOOo . I1IiiI
if 77 - 77: I1ii11iIi11i . OoO0O00 / OoOoOO00 / O0
if 67 - 67: ooOoO0o % I11i % oO0o
if 74 - 74: II111iiii
def lisp_build_crypto_decap_lookup_key ( addr , port ) :
addr = addr . print_address_no_iid ( )
IIIIII = addr + ":" + str ( port )
if 32 - 32: i1IIi % iIii1I11I1II1 . O0 % i11iIiiIii / i11iIiiIii
if ( lisp_i_am_rtr ) :
if ( lisp_rloc_probe_list . has_key ( addr ) ) : return ( addr )
if 75 - 75: I1ii11iIi11i - IiII . II111iiii / i1IIi
if 76 - 76: II111iiii * O0 - Oo0Ooo + OoooooooOO
if 37 - 37: OoooooooOO + i11iIiiIii
if 20 - 20: I1IiiI + iII111i + O0 * O0
if 18 - 18: I11i - I11i . OoOoOO00 . ooOoO0o
if 31 - 31: ooOoO0o
for O00OOoOOO0O0O in lisp_nat_state_info . values ( ) :
for o0i1i in O00OOoOOO0O0O :
if ( addr == o0i1i . address ) : return ( IIIIII )
if 87 - 87: OoooooooOO + OOooOOo - I1ii11iIi11i / I1IiiI + ooOoO0o - Oo0Ooo
if 19 - 19: ooOoO0o + I1ii11iIi11i - ooOoO0o
return ( addr )
if 17 - 17: I11i * i1IIi + iIii1I11I1II1 % I1IiiI
return ( IIIIII )
if 44 - 44: IiII + I1IiiI . Ii1I % Oo0Ooo
if 97 - 97: O0
if 95 - 95: OoO0O00 % iII111i / I1IiiI * OoooooooOO
if 31 - 31: iIii1I11I1II1
if 62 - 62: o0oOOo0O0Ooo - iII111i / II111iiii . o0oOOo0O0Ooo
if 20 - 20: iIii1I11I1II1 % OOooOOo
if 91 - 91: ooOoO0o
def lisp_set_ttl ( lisp_socket , ttl ) :
try :
lisp_socket . setsockopt ( socket . SOL_IP , socket . IP_TTL , ttl )
except :
lprint ( "socket.setsockopt(IP_TTL) not supported" )
pass
if 96 - 96: I1IiiI . OOooOOo
return
if 94 - 94: OoooooooOO + II111iiii % ooOoO0o - II111iiii / O0
if 34 - 34: IiII % oO0o
if 54 - 54: I1IiiI
if 80 - 80: OoOoOO00 . I1IiiI / I1ii11iIi11i . iII111i
if 31 - 31: I11i * o0oOOo0O0Ooo
if 17 - 17: Ii1I * iIii1I11I1II1
if 9 - 9: o0oOOo0O0Ooo - IiII
def lisp_is_rloc_probe_request ( lisp_type ) :
lisp_type = struct . unpack ( "B" , lisp_type ) [ 0 ]
return ( lisp_type == 0x12 )
if 78 - 78: i11iIiiIii . o0oOOo0O0Ooo
if 72 - 72: Oo0Ooo % II111iiii + O0 * OoOoOO00 | |
rows/columns for new rows/columns to add
if num_missing_rows > 0 or num_missing_cols > 0:
# Row and column offsets which indicate the difference in position of the row or column being
# added and the position of the value in the data. I.e., a row being added would have the value:
# value = get_data_point(draw_row['start'] + data_row_offset)
data_row_offset = self._settings['display_start_row']
data_column_offset = self._settings['display_start_col']
# If window has been vertically enlarged, add new rows to fit
if num_missing_rows > 0:
draw_row = {'start': self._settings['num_display_rows'],
'stop': self._settings['num_display_rows'] + num_missing_rows}
draw_col = {'start': 0, 'stop': self._settings['num_display_cols']}
# Special case where we need to add rows at the beginning because user has tried to expand
# screen after scrolling all the way down
if data_row_offset + draw_row['stop'] > self._settings['num_rows']:
# Erases all rows, except header row, from the screen
erase_row = {'start': 0,
'stop': self._settings['num_display_rows']}
self._erase_data(erase_row, draw_col)
# We start from beginning and add all requested rows
draw_row = {'start': 0,
'stop': num_display_rows}
data_row_offset = self._settings['num_rows'] - num_display_rows
# Adjust the display start row to account for the new starting value
self._settings['display_start_row'] = data_row_offset
# Draw additional data rows
self._settings['num_display_rows'] = num_display_rows
self._draw_data(draw_row, draw_col, data_row_offset, data_column_offset)
# Draw additional 'row count' rows if necessary
row_count_drawn = next((True for data_box in self._data_boxes
if data_box['col'] == -1
if data_box['row'] >= draw_row['stop'] - 1
), False)
if not row_count_drawn:
self._draw_data(draw_row, {'start': -1, 'stop': -1}, data_row_offset, 0)
# If window has been horizontally enlarged, add new columns to fit
if num_missing_cols > 0:
draw_col = {'start': self._settings['num_display_cols'],
'stop': self._settings['num_display_cols'] + num_missing_cols}
draw_row = {'start': 0, 'stop': self._settings['num_display_rows']}
# Special case where we need to add columns at the beginning because user has tried to
# expand screen after scrolling all the way to the right
if data_column_offset + draw_col['stop'] > self._settings['num_columns']:
# Erases all columns, except row count column, from the screen
erase_col = {'start': 0,
'stop': self._settings['num_display_cols']}
erase_row = {'start': -1,
'stop': self._settings['num_display_rows']}
self._erase_data(erase_row, erase_col)
# We start from beginning and add all requested columns
draw_col = {'start': 0,
'stop': num_display_cols}
data_column_offset = self._settings['num_columns'] - num_display_cols
# Adjust the display start column to account for the new starting value
self._settings['display_start_col'] = data_column_offset
# Draw additional data columns
self._settings['num_display_cols'] = num_display_cols
self._draw_data(draw_row, draw_col, data_row_offset, data_column_offset)
# Draw additional 'column names' row if necessary
column_names_drawn = next((True for data_box in self._data_boxes
if data_box['row'] == -1
if data_box['col'] >= draw_col['stop'] - 1
), False)
if not column_names_drawn:
self._draw_data({'start': -1, 'stop': -1}, draw_col, 0, data_column_offset)
# Calculate start and stop rows/columns for new rows/columns to remove (note that this should
# not be an elif with adding columns because in rare cases it is possible to result in both
# the window growing in one dimension and getting smaller in another despite this method
# expecting to be called each moment the window size is modified; this can happen when the window
# is resized many times very quickly)
if num_missing_rows < 0 or num_missing_cols < 0:
# Window has been vertically shrunk, erase extra rows that are outside the screen
if num_missing_rows < 0:
draw_row = {'start': self._settings['num_display_rows'] + num_missing_rows,
'stop': self._settings['num_display_rows']}
draw_col = {'start': -1, 'stop': self._settings['num_display_cols']}
# Remove extra rows
self._settings['num_display_rows'] = num_display_rows
self._erase_data(draw_row, draw_col)
# Window has been horizontally shrunk, erase extra columns that are outside the screen
if num_missing_cols < 0:
draw_col = {'start': self._settings['num_display_cols'] + num_missing_cols,
'stop': self._settings['num_display_cols']}
draw_row = {'start': -1, 'stop': self._settings['num_display_rows']}
# Remove extra columns
self._settings['num_display_cols'] = num_display_cols
self._erase_data(draw_row, draw_col)
# Adjusts scrollbar length based on new number of rows and columns being displayed
self._set_scrollbar_length(num_display_rows, num_display_cols)
def close(self):
self.data = None
super(TabularViewWindow, self).close()
class FieldDefinitionToolTip(ToolTip):
""" A tooltip that pops up on mouse-over of Field name and shows its meta data. """
def __init__(self, table_structure_window, field_entry, field_physical_idx):
super(FieldDefinitionToolTip, self).__init__(field_entry, '', delay=300)
# Set instance variables
self._structure_window = table_structure_window
self._field_short_idx = field_physical_idx
def show_contents(self):
# Determine the real index of the field this tooltip should be for
settings = self._structure_window.settings
column_idx = self._field_short_idx + settings['display_start_col']
meta_data = self._structure_window.data[column_idx].meta_data
frame = Frame(self._tip_window, relief='solid', bd=1, bg='#ffffe0')
frame.pack()
# Adds `count` method to Text widget, which is broken in Python 2
def count_bugfix(self, index1, index2, *args):
args = [self._w, "count"] + ["-" + arg for arg in args] + [index1, index2]
return self.tk.call(*args)
Text.count = count_bugfix
max_line_width = 80
text_pad = Text(frame, wrap='word', width=max_line_width, relief='flat',
highlightthickness=0, bd=0, bg='#ffffe0')
# Create tree view for meta data inside text_pad
TreeView(text_pad, meta_data,
header_font=Window.get_font(weight='bold'),
key_font=Window.get_font(weight='underline'),
value_font=Window.get_font(name='monospace'),
spacing_font=Window.get_font())
text_pad.config(state='disabled')
text_pad.pack(padx=5, pady=(2, 0))
# Adjust text pad size to fit its content
text_lines = text_pad.get('1.0', 'end').strip().splitlines()
width = 0
height = len(text_lines)
for i, line in enumerate(text_lines):
line_width = len(line)
if line_width > max_line_width:
width = max_line_width
height += ceil(line_width / max_line_width) - 1
elif line_width > width:
width = line_width + 1
text_pad.config(width=width, height=height)
# To set correct tooltip height (one which properly accounts for extra lines due to wordwrap),
# we must use displaylines. However displaylines is accurate only once width is correctly set
# and tooltip is drawn, and sometimes not even then.
try:
text_pad.update()
display_height = text_pad.count('1.0', 'end', 'displaylines')
if height > display_height:
text_pad.config(height=display_height)
# Likely due to the count bugfix above, sometimes text_pad is destroyed by the time
# `count` runs, and this raises an exception.
except TclError:
pass
# Transforms array-like data into a TableStructure
def array_like_to_table(array_like, name, data_type, format=None, full_label=None, array_label=None,
_copy_data=True):
# Create a dictionary for a fake Meta_Field, containing only name, data_type (the two things needed to
# display a field) and format if it exists
meta_dict = {'name': name,
'data_type': data_type}
if format is not None:
meta_dict['format'] = format
array_like = np.asanyarray(array_like)
array_depth = array_like.ndim
# Store the new table fields
fields = []
# For 1D arrays, we add them as a single field. For 3D arrays, it is likely the array has dimensions
# something like [time, x, y]. Therefore, instead of displaying [time, x] and opening a table for y for
# each [time, x], we instead display only table for [time], and store [x,y] as a sub-table for each
# [time].
if (array_depth == 1) or (array_depth == 3):
meta_field = Meta_Field(meta_dict)
meta_field.shape = array_like.shape
fields.append(PDS_array(array_like, meta_field))
# For arrays other than 1D and 3D
else:
# Rotate the array such that the first 2 axes are in column-major order (it is assumed the array is
# coming in as row-major, since PDS4 requires that storage order)
array_like = array_like.swapaxes(0, 1)
# Add each column (potentially an array instead of single valued) to the table
for field_num, field_data in enumerate(array_like):
meta_field = Meta_Field(meta_dict)
meta_field['name'] = '{0} : {1}'.format(field_num, meta_field['name'])
meta_field.shape = field_data.shape
fields.append(PDS_array(field_data, meta_field))
# Initialize the TableStructure
kwargs = {'structure_id': name, 'structure_label': array_label, 'full_label': full_label}
if _copy_data:
table_structure = TableStructure.from_fields(fields, no_scale=True, decode_strings=False, **kwargs)
else:
# We override the TableStructure class so that some of its typical functionality is retained
# if we simply assign fields without converting them into a structured ``ndarray``. However this
# breaks a lot of other typical functionality, such as record access. This is needed to show arrays
# as tables without copying the data.
class _TableStructure(TableStructure):
@property
def fields(self):
return self.data
table_structure = _TableStructure(structure_data=fields, **kwargs)
return table_structure
def array_structure_to_table(array_structure, _copy_data=True):
""" Transform an Array Structure to a Table Structure.
Parameters
----------
array_structure : ArrayStructure
A PDS4 array structure.
_copy_data : bool, optional
If True, data will be input into an structured NumPy array, thus requiring a copy. If False, data
will be assigned as a list of existing data. However this results in a non-standard TableStructure
that does not have typical functionality.
Returns
-------
TableStructure
"""
table_structure = array_like_to_table(array_structure.data, array_structure.id,
array_structure.meta_data['Element_Array']['data_type'],
full_label=array_structure.full_label,
array_label=array_structure.label,
_copy_data=_copy_data)
return table_structure
def open_table(viewer, table_structure):
""" Open a table view for a TableStructure.
Parameters
----------
viewer : PDS4Viewer
An instance of PDS4Viewer.
table_structure : TableStructure
Table structure from which to display.
Returns
-------
| |
'8',
'range': '0 to 255',
'resolution': 'binary',
'type': 'binary bit-mapped',
},
343: {
'length': '1 character',
'name': 'coolant pump differential pressure',
'period': '1.0 s',
'pid': 343,
'priority': '6',
'range': '-416 kpa to 412.75 kpa',
'resolution': Fraction(13, 4),
'type': 'signed short integer',
'units': 'kpa',
},
344: {
'length': '1 character',
'name': 'driver logon status',
'period': 'on change',
'pid': 344,
'priority': '8',
'range': '0 to 255',
'resolution': 'binary',
'type': 'binary bit-mapped',
},
345: {
'length': '1 character',
'name': 'suspension control status #1',
'period': '1.0 s',
'pid': 345,
'priority': '5',
'range': '0 to 255',
'resolution': 'binary',
'type': 'binary bit-mapped',
},
346: {
'length': '1 character',
'name': 'suspension control status #2',
'period': '1.0 s',
'pid': 346,
'priority': '5',
'range': '0 to 255',
'resolution': 'binary',
'type': 'binary bit-mapped',
},
347: {
'length': '1 character',
'name': 'farebox probe type',
'period': 'on request',
'pid': 347,
'priority': '8',
'range': '0 to 255',
'resolution': 'binary',
'type': 'binary bit-mapped',
},
348: {
'length': '1 character',
'name': 'cargo refrigeration system operating mode, zone #1',
'period': '10.0 s',
'pid': 348,
'priority': '8',
'range': '0 to 255',
'resolution': 'binary',
'type': 'unsigned short integer',
},
349: {
'length': '1 character',
'name': 'cargo refrigeration system operating mode, zone #2',
'period': '10.0 s',
'pid': 349,
'priority': '8',
'range': '0 to 255',
'resolution': 'binary',
'type': 'unsigned short integer',
},
350: {
'length': '1 character',
'name': 'cargo refrigeration system operating mode, zone #3',
'period': '10.0 s',
'pid': 350,
'priority': '8',
'range': '0 to 255',
'resolution': 'binary',
'type': 'unsigned short integer',
},
351: {
'length': '1 character',
'name': 'turbocharger compressor inlet temperature',
'period': '1.0 s',
'pid': 351,
'priority': '5',
'range': '0 to 255 °f',
'resolution': Fraction(1, 1),
'type': 'unsigned short integer',
'units': '°f',
},
352: {
'length': '1 character',
'name': 'turbocharger #2 speed',
'period': '1.0 s',
'pid': 352,
'priority': '4',
'range': '0 to 127500 rpm',
'resolution': Fraction(500, 1),
'type': 'unsigned short integer',
'units': 'rpm',
},
353: {
'length': '1 character',
'name': 'fuel leakage status',
'period': '1.0 s',
'pid': 353,
'priority': '5',
'range': '0 to 255',
'resolution': 'binary',
'type': 'binary bit-mapped',
},
354: {
'length': '1 character',
'name': 'relative humidity',
'period': '10.0 s',
'pid': 354,
'priority': '7',
'range': '0.0 to 102.0%',
'resolution': Fraction(2, 5),
'type': 'unsigned short integer',
'units': '%',
},
355: {
'length': '1 character',
'name': 'engine oil life',
'period': 'on request',
'pid': 355,
'priority': '7',
'range': '0.0 to 102.0%',
'resolution': Fraction(2, 5),
'type': 'unsigned short integer',
'units': '%',
},
356: {
'length': '1 character',
'name': 'fifth wheel coupling status',
'period': 'on change',
'pid': 356,
'priority': '4',
'range': '0 to 255',
'resolution': 'binary',
'type': 'binary bit-mapped',
},
357: {
'length': '1 character',
'name': 'ride adjustment pressure',
'period': '1.0 s',
'pid': 357,
'priority': '5',
'range': '0.0 to 1055 kpa (0.0 to 153.0 lbf/in2)',
'resolution': Fraction(207, 50),
'type': 'unsigned short integer',
'units': 'kpa',
},
358: {
'length': '1 character',
'name': 'air suspension #2 pressure',
'period': '1.0 s',
'pid': 358,
'priority': '5',
'range': '0.0 to 1055 kpa (0.0 to 153.0 lbf/in2)',
'resolution': Fraction(207, 50),
'type': 'unsigned short integer',
'units': 'kpa',
},
359: {
'length': '1 character',
'name': 'air suspension #1 pressure',
'period': '1.0 s',
'pid': 359,
'priority': '5',
'range': '0.0 to 1055 kpa (0.0 to 153.0 lbf/in2)',
'resolution': Fraction(207, 50),
'type': 'unsigned short integer',
'units': 'kpa',
},
360: {
'length': '1 character',
'name': 'axle #4 lift air pressure',
'period': '1.0 s',
'pid': 360,
'priority': '5',
'range': '0.0 to 1055 kpa (0.0 to 153.0 lbf/in2)',
'resolution': Fraction(207, 50),
'type': 'unsigned short integer',
'units': 'kpa',
},
361: {
'length': '1 character',
'name': 'axle #3 lift air pressure',
'period': '1.0 s',
'pid': 361,
'priority': '5',
'range': '0.0 to 1055 kpa (0.0 to 153.0 lbf/in2)',
'resolution': Fraction(207, 50),
'type': 'unsigned short integer',
'units': 'kpa',
},
362: {
'length': '1 character',
'name': 'percent exhaust gas recirculation valve #2 position',
'period': '1.0 s',
'pid': 362,
'priority': '4',
'range': '0.0 to 127.5%',
'resolution': Fraction(1, 2),
'type': 'unsigned short integer',
'units': '%',
},
363: {
'length': '1 character',
'name': 'hydraulic retarder control air pressure',
'period': '1.0 s',
'pid': 363,
'priority': '5',
'range': '0.0 to 1055 kpa (0.0 to 153.0 lbf/in2)',
'resolution': Fraction(207, 50),
'type': 'unsigned short integer',
'units': 'kpa',
},
364: {
'length': '1 character',
'name': 'hvac unit discharge temperature',
'period': 'on request',
'pid': 364,
'priority': '7',
'range': '–320.0 to +317.5 °f',
'resolution': Fraction(5, 2),
'type': 'signed short integer',
'units': '°f',
},
365: {
'length': '1 character',
'name': 'weighing system status command',
'period': 'on request',
'pid': 365,
'priority': '8',
'range': '0 to 255',
'resolution': 'binary',
'type': 'binary bit-mapped',
},
366: {
'length': '1 character',
'name': 'engine oil level high/low',
'period': '10.0 s',
'pid': 366,
'priority': '6',
'range': '–60.6 to 60.1 l (–128 to +127 pt)',
'resolution': Fraction(473, 1000),
'type': 'signed short integer',
'units': 'l',
},
367: {
'length': '1 character',
'name': 'lane tracking system status',
'period': '0.5 s',
'pid': 367,
'priority': '6',
'range': '0 to 255',
'resolution': 'binary',
'type': 'binary bit-mapped',
},
368: {
'length': '1 character',
'name': 'lane departure indication',
'period': '0.1 s when active, or on change of state',
'pid': 368,
'priority': '2',
'range': '0 to 255',
'resolution': 'binary',
'type': 'binary bit-mapped',
},
369: {
'length': '1 character',
'name': 'distance to rear object (reverse)',
'pid': 369,
'priority': '2',
'range': '0.0 to 25.0 m (0.0 to 82.0 ft)',
'resolution': Fraction(1, 10),
'type': 'unsigned integer',
'units': 'm',
},
370: {
'length': '1 character',
'name': 'trailer pneumatic brake control line pressure',
'period': '1.0 s',
'pid': 370,
'priority': '5',
'range': '0.0 to 1055 kpa (0.0 to 153.0 lbf/in2)',
'resolution': Fraction(207, 50),
'type': 'unsigned short integer',
'units': 'kpa',
},
371: {
'length': '1 character',
'name': 'trailer pneumatic supply line pressure',
'period': '1.0 s',
'pid': 371,
'priority': '5',
'range': '0.0 to 1055 kpa (0.0 to 153.0 lbf/in2)',
'resolution': Fraction(207, 50),
'type': 'unsigned short integer',
'units': 'kpa',
},
372: {
'length': '1 character',
'name': 'remote accelerator',
'period': '0.1 s',
'pid': 372,
'priority': '4',
'range': '0.0 to 102.0%',
'resolution': Fraction(2, 5),
'type': 'unsigned short integer',
'units': '%',
},
373: {
'length': '1 character',
'name': 'center rear drive axle temperature',
'period': '1.0 s',
'pid': 373,
'priority': '5',
'range': '0.0 to 306.0 °f',
'resolution': Fraction(6, 5),
'type': 'unsigned short integer',
'units': '°f',
},
374: {
'length': '1 character',
'name': 'alternator ac voltage',
'period': 'on request',
'pid': 374,
'priority': '7',
'range': '0 to 31.875 v',
'resolution': Fraction(1, 8),
'type': 'unsigned short integer',
'units': 'v',
},
375: {
'length': '1 character',
'name': 'fuel return pressure',
'period': 'on request',
'pid': 375,
'priority': '7',
'range': '0 to 127.5 psi',
'resolution': Fraction(1, 2),
'type': 'unsigned short integer',
'units': 'psi',
},
376: {
'length': '1 character',
'name': 'fuel pump inlet vacuum',
'period': 'on request',
'pid': 376,
'priority': '7',
'range': '0 to 51.0 in hg',
'resolution': Fraction(1, 5),
'type': 'unsigned short integer',
'units': 'in',
},
377: {
'length': '1 character',
'name': 'compression unbalance',
'period': 'on request',
'pid': 377,
'priority': '7',
'range': '0.0 to 127.5%',
'resolution': Fraction(1, 2),
'type': 'unsigned short integer',
'units': '%',
},
378: {
'length': '1 character',
'name': 'fare collection unit status',
'period': 'on change',
'pid': 378,
'priority': '6',
'range': '0 to 255',
'resolution': 'binary',
'type': 'binary bit-mapped',
},
379: {
'length': '1 character',
'name': 'door status',
'period': 'on change or on request',
'pid': 379,
'priority': '7',
'range': '0 to 255',
'resolution': 'binary',
'type': 'binary bit-mapped',
},
380: {
'length': '1 character',
'name': 'articulation angle',
'period': '1.0 s',
'pid': 380,
'priority': '8',
'range': '–128 to +127 degree',
'resolution': Fraction(1, 1),
'type': 'signed short integer',
'units': 'degree',
},
381: {
'length': '1 character',
'name': 'vehicle use status',
'period': 'as needed (10 s updates while an unauthorized condition exists)',
'pid': 381,
'priority': '7',
'range': '0 to 255',
'resolution': 'binary bit-mapped',
'type': 'unsigned short integer',
},
382: {
'length': '1 character',
'name': 'transit silent alarm status',
'period': 'as needed',
'pid': 382,
'priority': '7',
'range': '0 to 255',
'resolution': 'binary bit-mapped',
'type': 'unsigned short integer',
},
383: {
'length': '1 character',
'name': 'vehicle acceleration',
'period': 'as requested',
| |
name, returntype, args, varargs = self.parse_function_header()
return NativeFunction(name, returntype, args, varargs)
elif self.tokens[self.i] == VAR:
self.i += 1
name = self.identifier()
self.expect(COLON)
type = self.parse_type()
return NativeVariable(name, type)
else:
assert False
def parse_exception(self):
self.expect(EXCEPTION)
name = []
while True:
name.append(self.identifier())
if self.tokens[self.i] is not DOT:
break
self.i += 1
return ExceptionDeclaration(name)
def parse_interface(self):
self.expect(INTERFACE)
name = self.identifier()
while self.tokens[self.i] is FUNCTION:
self.i += 1
method = self.identifier()
args = self.parse_function_parameters()
self.expect(END)
self.expect(INTERFACE)
def parse_exit_statement(self):
self.expect(EXIT)
label = self.tokens[self.i].name
self.i += 1
arg = None
if label == "PROCESS":
arg = self.tokens[self.i].name
self.i += 1
return ExitStatement(label, arg)
def parse_export(self):
self.expect(EXPORT)
if isinstance(self.tokens[self.i], Identifier):
name = self.identifier()
else:
return self.parse_statement()
def parse_if_statement(self):
self.expect(IF)
condition_statements = []
else_statements = []
while True:
if self.tokens[self.i] is VALID:
tests = []
while True:
self.i += 1
ptr = self.parse_expression()
if self.tokens[self.i] is AS:
self.i += 1
name = self.identifier()
else:
assert isinstance(ptr, IdentifierExpression)
name = ptr.name
tests.append((ptr, name))
if self.tokens[self.i] is not COMMA:
break
cond = ValidPointerExpression(tests)
elif self.tokens[self.i] is IMPORTED:
self.i += 1
name = self.identifier()
cond = FunctionCallExpression(StringLiteralExpression(neon_runtime_isModuleImported), [("name", StringLiteralExpression(name), False)])
else:
cond = self.parse_expression()
self.expect(THEN)
statements = []
while (self.tokens[self.i] is not ELSIF
and self.tokens[self.i] is not ELSE
and self.tokens[self.i] is not END
and self.tokens[self.i] is not END_OF_FILE):
s = self.parse_statement()
if s is not None:
statements.append(s)
condition_statements.append((cond, statements))
if self.tokens[self.i] is not ELSIF:
break
self.i += 1
if self.tokens[self.i] is ELSE:
self.i += 1
while self.tokens[self.i] is not END and self.tokens[self.i] is not END_OF_FILE:
s = self.parse_statement()
if s is not None:
else_statements.append(s)
self.expect(END)
self.expect(IF)
return IfStatement(condition_statements, else_statements)
def parse_increment_statement(self):
if self.tokens[self.i] is INC:
delta = 1
elif self.tokens[self.i] is DEC:
delta = -1
else:
assert False, self.tokens[self.i]
self.i += 1
expr = self.parse_expression()
return IncrementStatement(expr, delta)
def parse_for_statement(self):
self.expect(FOR)
var = self.identifier()
self.expect(ASSIGN)
start = self.parse_expression()
self.expect(TO)
end = self.parse_expression()
step = None
if self.tokens[self.i] is STEP:
self.i += 1
step = self.parse_expression()
label = "FOR"
if self.tokens[self.i] is LABEL:
self.i += 1
label = self.identifier()
self.expect(DO)
statements = []
while self.tokens[self.i] is not END and self.tokens[self.i] is not END_OF_FILE:
s = self.parse_statement()
if s is not None:
statements.append(s)
self.expect(END)
self.expect(FOR)
return ForStatement(var, start, end, step, label, statements)
def parse_foreach_statement(self):
self.expect(FOREACH)
var = self.identifier()
self.expect(IN)
array = self.parse_expression()
index = None
if self.tokens[self.i] is INDEX:
self.i += 1
index = self.identifier()
label = "FOREACH"
if self.tokens[self.i] is LABEL:
self.i += 1
label = self.identifier()
self.expect(DO)
statements = []
while self.tokens[self.i] is not END and self.tokens[self.i] is not END_OF_FILE:
s = self.parse_statement()
if s is not None:
statements.append(s)
self.expect(END)
self.expect(FOREACH)
return ForeachStatement(var, array, index, label, statements)
def parse_let_statement(self):
self.expect(LET)
name = self.identifier()
type = None
if self.tokens[self.i] is COLON:
self.expect(COLON)
type = self.parse_type()
self.expect(ASSIGN)
expr = self.parse_expression()
if type is None:
type = infer_type(expr)
return LetDeclaration(name, type, expr)
def parse_loop_statement(self):
self.expect(LOOP)
label = "LOOP"
if self.tokens[self.i] is LABEL:
self.i += 1
label = self.identifier()
statements = []
while self.tokens[self.i] is not END and self.tokens[self.i] is not END_OF_FILE:
s = self.parse_statement()
if s is not None:
statements.append(s)
self.expect(END)
self.expect(LOOP)
return LoopStatement(label, statements)
def parse_main_statement(self):
self.expect(BEGIN)
self.expect(MAIN)
statements = []
while self.tokens[self.i] is not END and self.tokens[self.i] is not END_OF_FILE:
s = self.parse_statement()
if s is not None:
statements.append(s)
self.expect(END)
self.expect(MAIN)
return FunctionDeclaration(None, "MAIN", None, [], False, statements)
def parse_next_statement(self):
self.expect(NEXT)
label = self.tokens[self.i].name
self.i += 1
return NextStatement(label)
def parse_raise_statement(self):
self.expect(RAISE)
name = []
while True:
name.append(self.identifier())
if self.tokens[self.i] is not DOT:
break
self.i += 1
if self.tokens[self.i] is LPAREN:
class ExceptionType:
def __init__(self):
self.type = ClassRecord(
[
Field("info", TypeSimple("String")),
Field("offset", TypeSimple("Number")),
],
{}
)
def resolve(self, env):
return self.type
info = self.parse_expression()
else:
info = None
return RaiseStatement(name, info)
def parse_repeat_statement(self):
self.expect(REPEAT)
label = "REPEAT"
if self.tokens[self.i] is LABEL:
self.i += 1
label = self.identifier()
statements = []
while self.tokens[self.i] is not UNTIL and self.tokens[self.i] is not END_OF_FILE:
s = self.parse_statement()
if s is not None:
statements.append(s)
self.expect(UNTIL)
cond = self.parse_expression()
return RepeatStatement(label, cond, statements)
def parse_return_statement(self):
self.expect(RETURN)
expr = self.parse_expression()
return ReturnStatement(expr)
def parse_testcase_statement(self):
self.expect(TESTCASE)
expr = self.parse_expression()
expected_exception = None
if self.tokens[self.i] is EXPECT:
self.i += 1
expected_exception = []
while True:
expected_exception.append(self.identifier())
if self.tokens[self.i] is not DOT:
break
self.i += 1
return TestCaseStatement(expr, expected_exception)
def parse_try_statement(self):
self.expect(TRY)
statements = []
while self.tokens[self.i] is not TRAP and self.tokens[self.i] is not END and self.tokens[self.i] is not END_OF_FILE:
s = self.parse_statement()
if s is not None:
statements.append(s)
catches = []
while self.tokens[self.i] is TRAP:
self.i += 1
name = []
while True:
name.append(self.identifier())
if self.tokens[self.i] is not DOT:
break
self.i += 1
exceptions = [name]
infoname = None
if self.tokens[self.i] is AS:
self.i += 1
infoname = self.identifier()
self.expect(DO)
handler = []
while self.tokens[self.i] is not TRAP and self.tokens[self.i] is not END and self.tokens[self.i] is not END_OF_FILE:
s = self.parse_statement()
if s is not None:
handler.append(s)
catches.append((exceptions, infoname, handler))
self.expect(END)
self.expect(TRY)
return TryStatement(statements, catches)
def parse_unused_statement(self):
self.expect(UNUSED)
while True:
self.identifier()
if self.tokens[self.i] is not COMMA:
break
self.expect(COMMA)
return None
def parse_var_statement(self):
self.expect(VAR)
vars = self.parse_variable_declaration(False)
expr = None
if self.tokens[self.i] is ASSIGN:
self.i += 1
expr = self.parse_expression()
if vars[1] is None:
vars = (vars[0], infer_type(expr))
return VariableDeclaration(vars[0], vars[1], expr)
def parse_while_statement(self):
self.expect(WHILE)
if self.tokens[self.i] is VALID:
tests = []
while True:
self.i += 1
ptr = self.parse_expression()
if self.tokens[self.i] is AS:
self.i += 1
name = self.identifier()
else:
assert isinstance(ptr, IdentifierExpression)
name = ptr.name
tests.append((ptr, name))
if self.tokens[self.i] is not COMMA:
break
cond = ValidPointerExpression(tests)
else:
cond = self.parse_expression()
label = "WHILE"
if self.tokens[self.i] is LABEL:
self.i += 1
label = self.identifier()
self.expect(DO)
statements = []
while self.tokens[self.i] is not END and self.tokens[self.i] is not END_OF_FILE:
s = self.parse_statement()
if s is not None:
statements.append(s)
self.expect(END)
self.expect(WHILE)
return WhileStatement(cond, label, statements)
def parse_statement(self):
if self.tokens[self.i] is IMPORT: return self.parse_import()
if self.tokens[self.i] is TYPE: return self.parse_type_definition()
if self.tokens[self.i] is CONSTANT: return self.parse_constant_definition()
if self.tokens[self.i] is FUNCTION: return self.parse_function_definition()
if self.tokens[self.i] is DECLARE: return self.parse_declaration()
if self.tokens[self.i] is EXCEPTION:return self.parse_exception()
if self.tokens[self.i] is INTERFACE:return self.parse_interface()
if self.tokens[self.i] is EXPORT: return self.parse_export()
if self.tokens[self.i] is IF: return self.parse_if_statement()
if self.tokens[self.i] in [INC,DEC]:return self.parse_increment_statement()
if self.tokens[self.i] is RETURN: return self.parse_return_statement()
if self.tokens[self.i] is VAR: return self.parse_var_statement()
if self.tokens[self.i] is LET: return self.parse_let_statement()
if self.tokens[self.i] is WHILE: return self.parse_while_statement()
if self.tokens[self.i] is CASE: return self.parse_case_statement()
if self.tokens[self.i] is FOR: return self.parse_for_statement()
if self.tokens[self.i] is FOREACH: return self.parse_foreach_statement()
if self.tokens[self.i] is LOOP: return self.parse_loop_statement()
if self.tokens[self.i] is REPEAT: return self.parse_repeat_statement()
if self.tokens[self.i] is EXIT: return self.parse_exit_statement()
if self.tokens[self.i] is NEXT: return self.parse_next_statement()
if self.tokens[self.i] is TRY: return self.parse_try_statement()
if self.tokens[self.i] is RAISE: return self.parse_raise_statement()
if self.tokens[self.i] is ASSERT: return self.parse_assert_statement()
if self.tokens[self.i] is CHECK: return self.parse_check_statement()
if self.tokens[self.i] is UNUSED: return self.parse_unused_statement()
if self.tokens[self.i] is BEGIN: return self.parse_main_statement()
if self.tokens[self.i] is TESTCASE: return self.parse_testcase_statement()
if isinstance(self.tokens[self.i], Identifier):
expr = self.parse_expression()
if self.tokens[self.i] is ASSIGN:
self.i += 1
rhs = self.parse_expression()
return AssignmentStatement(expr, rhs)
else:
return ExpressionStatement(expr)
else:
assert False, self.tokens[self.i:self.i+10]
def parse(self):
statements = []
while self.tokens[self.i] is not END_OF_FILE:
s = self.parse_statement()
if s is not None:
statements.append(s)
return Program(statements)
def parse(tokens):
return Parser(tokens).parse()
class Class:
pass
class ClassBoolean(Class):
def default(self, env):
return False
class ClassNumber(Class):
def default(self, env):
return 0
class ClassString(Class):
def default(self, env):
return ""
class ClassBytes(Class):
def default(self, env):
class Bytes:
def __init__(self):
self.a = []
def __eq__(self, rhs):
return self.a == rhs.a
def fromArray(self, env, a):
self.a = list(a)
def size(self, env):
return len(self.a)
def toArray(self, env):
return list(self.a)
def decodeToString(self, env, obj):
if isinstance(self.a, list):
# Convert bytes to string.
self.a = bytearray(self.a)
return self.a.decode("utf-8")
return Bytes()
class ClassObject(Class):
def default(self, env):
return None
class ClassArray(Class):
def __init__(self, elementtype):
self.elementtype = elementtype
def default(self, env):
return []
class ClassDictionary(Class):
def __init__(self, elementtype):
self.elementtype = elementtype
def default(self, env):
return {}
class ClassRecord(Class):
class Instance:
def __init__(self, fields):
self._fields = fields
for x in fields:
setattr(self, x.name, None) # TODO: default()
def __eq__(self, other):
return all(getattr(self, x.name) == getattr(other, | |
# -*- coding: utf-8 -*-
# Apache Software License 2.0
#
# Copyright (c) 2018, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Handles configurations files for the application
"""
import os
import uuid
from datetime import datetime
from logging import DEBUG
from logging import Formatter
from logging import StreamHandler
from logging import getLogger
from logging.config import dictConfig
from platform import uname
from tempfile import mkstemp
from urllib.error import HTTPError
from urllib.error import URLError
import pyhocon
from pytz import timezone
from aiscalator import __version__
from aiscalator.core.utils import copy_replace
from aiscalator.core.utils import data_file
def _generate_global_config() -> str:
"""Generate a standard configuration file for the application in the
user's home folder ~/.aiscalator/config/aiscalator.conf from the
template file in aiscalator/config/template/aiscalator.conf
"""
logger = getLogger(__name__)
dst = os.path.join(os.path.expanduser("~"),
".aiscalator/config/aiscalator.conf")
logger.info("Generating a new configuration file for aiscalator:\n\t%s",
dst)
pattern = [
"testUserID",
"generation_date",
]
replace_value = [
generate_user_id(),
'"' + str(datetime
.utcnow()
.replace(tzinfo=timezone("UTC"))) +
'" // in UTC timezone',
]
dst_dir = os.path.dirname(dst)
if dst_dir:
os.makedirs(dst_dir, exist_ok=True)
copy_replace(data_file("../config/template/aiscalator.conf"),
dst, pattern=pattern, replace_value=replace_value)
open(os.path.join(dst_dir, "apt_packages.txt"), 'a').close()
open(os.path.join(dst_dir, "requirements.txt"), 'a').close()
open(os.path.join(dst_dir, "lab_extensions.txt"), 'a').close()
return dst
def generate_user_id() -> str:
"""
Returns
-------
str
Returns a string identifying this user when the
setup was run first
"""
return 'u' + str((uuid.getnode()))
def _app_config_file() -> str:
"""Return the path to the app configuration file."""
if 'AISCALATOR_HOME' in os.environ:
home = os.environ['AISCALATOR_HOME']
file = os.path.join(home, "config", "aiscalator.conf")
if os.path.exists(file):
return file
return os.path.join(os.path.expanduser("~"), '.aiscalator',
'config', 'aiscalator.conf')
# TODO refactor, splitting up the Global App Config part from
# Jupyter Config (step) and Airflow config (DAG) into 3 classes
# with separate APIs.
class AiscalatorConfig:
"""
A configuration object for the Aiscalator application.
This object stores:
- global configuration for the whole application
- configuration for a particular context specified in a step
configuration file.
- In this case, we might even focus on a particular step.
...
Attributes
----------
_app_conf
global configuration object for the application
_config_path : str
path to the configuration file (or plain configuration as string)
_step_name : str
name of the currently processed step
_step
configuration object for the currently processed step
_dag_name : str
name of the currently processed dag
_dag
configuration object for the currently processed dag
"""
def __init__(self,
config=None,
step_selection=None,
dag_selection=None):
"""
Parameters
----------
config : str
path to the step configuration file (or plain configuration
string)
step_selection : str
Name of step from the configuration file to focus on
dag_selection : str
Name of dag from the configuration file to focus on
"""
self._config_path = config
self._app_conf = _setup_app_config()
self._setup_logging()
parsed_config = _parse_config(config)
self._step_name = None
self._step = None
self._dag_name = None
self._dag = None
if parsed_config:
step_sel = step_selection
if "steps" in parsed_config:
self._step_name, self._step = (
_select_config(parsed_config,
root_node='steps',
child_node='task',
selection=step_sel)
)
if "dags" in parsed_config:
self._dag_name, self._dag = (
_select_config(parsed_config,
root_node='dags',
child_node='definition',
selection=dag_selection)
)
###################################################
# Global App Config methods #
###################################################
def _setup_logging(self):
""" Setup the logging configuration of the application """
if self.app_config_has("logging"):
log_config = self.app_config()["logging"]
filename_list = [
v['filename'] for k, v in
_find_config_tree(log_config, "filename")
]
# pre-create directory in advance for all loggers
for file in filename_list:
file_dir = os.path.dirname(file)
if file_dir and not os.path.isdir(file_dir):
os.makedirs(file_dir, exist_ok=True)
dictConfig(log_config)
else:
log = getLogger()
handler = StreamHandler()
formatter = Formatter(
"%(asctime)s-%(threadName)s-%(name)s-%(levelname)s-%(message)s"
)
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(DEBUG)
msg = ("Starting " + os.path.basename(__name__) +
" version " + __version__ + " on " +
"_".join(uname()).replace(" ", "_"))
logger = getLogger(__name__)
logger.debug(msg)
def app_config_home(self) -> str:
"""Return the path to the app configuration folder."""
if self.app_config_has("app_config_home_directory"):
return self.app_config()["app_config_home_directory"]
return os.path.join(os.path.expanduser("~"), '.aiscalator')
def redefine_app_config_home(self, config_home):
"""
Modify the configuration file to change the value of the
application configuration home directory.
Parameters
----------
config_home : str
path to the new configuration home
Returns
-------
AiscalatorConfig
the new configuration object
"""
dst = _app_config_file()
new_config = (
pyhocon.ConfigFactory.parse_string(
"aiscalator.app_config_home_directory = " + config_home
)
).with_fallback(_app_config_file(), resolve=False)
with open(dst, "w") as output:
output.write(
pyhocon.converter.HOCONConverter.to_hocon(new_config)
)
self._app_conf = new_config
return new_config
def redefine_airflow_workspaces(self, workspaces):
"""
Modify the configuration file to change the value of the
airflow workspaces
Parameters
----------
workspaces : list
list of workspaces to bind to airflow
Returns
-------
AiscalatorConfig
the new configuration object
"""
dst = _app_config_file()
new_config = (
pyhocon.ConfigFactory.parse_string(
"aiscalator.airflow.setup.workspace_paths = [\n" +
"\n".join([ws for ws in workspaces]) +
"]"
)
).with_fallback(_app_config_file(), resolve=False)
with open(dst, "w") as output:
output.write(
pyhocon.converter.HOCONConverter.to_hocon(new_config)
)
self._app_conf = new_config
return new_config
def user_env_file(self, job=None) -> list:
"""
Find a list of env files to pass to docker containers
Parameters
----------
job
Optional step or dag config
Returns
-------
List
env files
"""
logger = getLogger(__name__)
result = []
# Look if any env file or variables were defined in the step/dag
if job:
(_, env_filename) = mkstemp(prefix="aiscalator_", text=True)
with open(env_filename, mode="w") as env_file:
# concatenate all the env files and variables into one
for env in job:
if isinstance(env, pyhocon.config_tree.ConfigTree):
for k in env.keys():
env_file.write(k + '=' + env.get(k) + '\n')
elif os.path.isfile(os.path.join(self.root_dir(), env)):
with open(os.path.join(self.root_dir(), env),
mode="r") as file:
for line in file:
env_file.write(line)
else:
msg = ("Undefined env" + env +
": expecting a dict of environment variables" +
" or path to environment configuration file.")
logger.warning("Warning %s", msg)
result.append(env_filename)
# TODO look in user config if env file has been redefined
result.append(
os.path.join(self.app_config_home(), "config", ".env")
)
return result
def _timestamp_now(self) -> str:
"""
Depending on how the timezone is configured, returns the
timestamp for this instant.
"""
date_now = datetime.utcnow().replace(tzinfo=timezone("UTC"))
if self._app_conf["aiscalator"]:
pst = timezone(self.app_config().timezone)
else:
pst = timezone('Europe/Paris')
return date_now.astimezone(pst).strftime("%Y%m%d%H%M%S")
def app_config(self):
"""
Returns
-------
str
the configuration object for the aiscalator application
"""
return self._app_conf["aiscalator"]
def config_path(self):
"""
Returns
-------
str
Returns the path to the step configuration file.
If it was an URL, it will return the path to the temporary
downloaded version of it.
If it was a plain string, then returns None
"""
if os.path.exists(self._config_path):
if pyhocon.ConfigFactory.parse_file(self._config_path):
return os.path.realpath(self._config_path)
# TODO if string is url/git repo, download file locally first
return None
def root_dir(self):
"""
Returns
-------
str
Returns the path to the folder containing the
configuration file
"""
path = self.config_path()
if path:
root_dir = os.path.dirname(path)
if not root_dir.endswith("/"):
root_dir += "/"
return root_dir
return None
def user_id(self) -> str:
"""
Returns
-------
str
the user id stored when the application was first setup
"""
return self.app_config()["metadata.user.id"]
def app_config_has(self, field) -> bool:
"""
Tests if the applicatin config has a configuration
value for the field.
"""
if not self.app_config():
return False
return field in self.app_config()
def airflow_docker_compose_file(self):
"""Return the configuration file to bring airflow services up."""
if self.app_config_has("airflow.docker_compose_file"):
return self.app_config()["airflow.docker_compose_file"]
return None
def validate_config(self):
"""
Check if all the fields in the reference config are
defined in focused steps too. Otherwise
raise an Exception (either pyhocon.ConfigMissingException
or pyhocon.ConfigWrongTypeException)
"""
reference = data_file("../config/template/minimum_aiscalator.conf")
ref = pyhocon.ConfigFactory.parse_file(reference)
msg = "In Global Application Configuration file "
_validate_configs(self._app_conf, ref, msg,
missing_exception=True,
type_mismatch_exception=True)
reference = data_file("../config/template/aiscalator.conf")
ref = pyhocon.ConfigFactory.parse_file(reference)
msg = "In Global Application Configuration file "
_validate_configs(self._app_conf, ref, msg,
missing_exception=False,
type_mismatch_exception=True)
if self._step_name:
reference = data_file("../config/template/minimum_step.conf")
ref = pyhocon.ConfigFactory.parse_file(reference)
msg = "in step named " + self._step_name
_validate_configs(self._step,
ref["steps"]["Untitled"],
msg,
missing_exception=True,
type_mismatch_exception=True)
reference = data_file("../config/template/step.conf")
ref = pyhocon.ConfigFactory.parse_file(reference)
msg = "in step named " + self._step_name
_validate_configs(self._step,
ref["steps"]["Untitled"],
msg,
missing_exception=False,
type_mismatch_exception=True)
if self._dag_name:
reference = data_file("../config/template/minimum_dag.conf")
ref = pyhocon.ConfigFactory.parse_file(reference)
msg = "in dag named " + self._dag_name
_validate_configs(self._dag,
ref["dags"]["Untitled"],
msg,
missing_exception=True,
type_mismatch_exception=True)
reference = data_file("../config/template/step.conf")
ref = pyhocon.ConfigFactory.parse_file(reference)
msg = "in dag named " + self._dag_name
_validate_configs(self._dag,
ref["dags"]["Untitled"],
msg,
missing_exception=False,
type_mismatch_exception=True)
###################################################
# Step methods #
###################################################
def step_notebook_output_path(self, notebook) -> str:
"""Generates the name of the output notebook"""
return | |
s = getattr(ii,field)
if s not in master_list:
master_list.append(s)
for child in node.next.values():
# FIXME: sloppy return value handling???
collect_ifield(options,child, field,master_list)
return master_list
def collect_ofield(options, node, field, master_list):
"""Collect operand field data for enumerations"""
for ii in node.instructions:
for opnd in ii.operands:
if field_check(opnd, field):
s = getattr(opnd,field)
if s != None and s not in master_list:
master_list[s] = True
for child in node.next.values():
collect_ofield(options,child, field,master_list)
def collect_ofield_operand_type(options, node, field, master_list):
"""Collect operand type enumeration data"""
for ii in node.instructions:
for opnd in ii.operands:
if field_check(opnd, field):
s = opnd.get_type_for_emit()
#s = getattr(opnd,field)
if s != None and s not in master_list:
master_list[s] = True
for child in node.next.values():
collect_ofield_operand_type(options,child, field,master_list)
def collect_ofield_name_type(options, node, field, master_list):
"""Collect operand field data for enumerations"""
for ii in node.instructions:
for opnd in ii.operands:
if field_check(opnd, field):
s = getattr(opnd,field)
type = getattr(opnd,'type')
if s not in master_list:
master_list[s]=type
for child in node.next.values():
collect_ofield_name_type(options,child, field,master_list)
def collect_attributes_pre(options, node, master_list):
collect_attributes(options, node, master_list)
# add always-available attributes. These facilitate writing
# unconditional property-checking code in XED.
for attr in [ 'MASKOP_EVEX', 'MASK_AS_CONTROL' ]:
if attr not in master_list:
master_list.append(attr)
def collect_attributes(options, node, master_list):
"""Collect all attributes"""
for ii in node.instructions:
if field_check(ii, 'attributes'):
s = getattr(ii,'attributes')
if isinstance(s, list):
for x in s:
if x not in master_list:
master_list.append(x)
elif s != None and s not in master_list:
master_list.append(s)
for nxt in node.next.values():
collect_attributes(options,nxt, master_list)
idata_files = 0
def write_instruction_data(agi, idata_dict):
"""Write a file containing the content of the idata_dict. The keys
are iclass:extension the values are (iclass, extension, category). This
appends to the file if we've already opened this."""
global idata_files
idata_files += 1
if idata_files > 1:
die("Not handled: appending ot idata.txt file")
fe = xed_file_emitter_t(agi.common.options.xeddir,
agi.common.options.gendir,
'idata.txt',
shell_file=True)
fe.start(full_header=False)
kys = list(idata_dict.keys())
kys.sort()
s = "#%-19s %-15s %-15s %-30s %-20s %s\n" % ("iclass",
"extension",
"category",
"iform",
"isa_set",
'attributes')
fe.write(s)
for iform in kys:
(iclass,extension,category,isa_set, plist,
iclass_string_index) = idata_dict[iform]
if plist:
attributes = ":".join(plist)
else:
attributes = 'INVALID'
s = "%-19s %-15s %-15s %-30s %-20s %s\n" % (iclass,
extension,
category,
iform,
isa_set,
attributes)
fe.write(s)
fe.close()
def attr_dict_keyfn(a):
return a[0]
def write_attributes_table(agi, odir):
fn = 'xed-attributes-init.c'
if vattr():
msgb("Writing attributes file", fn)
f = agi.common.open_file(fn, start=False)
f.add_misc_header("#include \"xed-attributes.h\"")
f.add_misc_header("#include \"xed-gen-table-defs.h\"")
f.start()
f.write("\nconst xed_attributes_t ")
f.write("xed_attributes[XED_MAX_REQUIRED_ATTRIBUTES] = {\n")
if vattr():
msgb("Unique attributes", len(agi.attributes_dict))
t = []
for s,v in agi.attributes_dict.items():
t.append((v,s))
t.sort(key=attr_dict_keyfn)
if vattr():
msgb("Sorted Unique attributes", len(t))
agi.attributes_ordered = t
# agi.attributes_ordered has tuple (i,s) where s is a comma
# separated list of attributes that we'll use to manufacture the
# initialization equations.
if len(agi.attributes_ordered) >= 65536:
die("Too many attributes combinations for the 16b index used" +
" in the xed_inst_t data structure." +
" Please report this to the SDE/XED team.")
for i,s in agi.attributes_ordered:
if s:
v = s.split(',')
struct_init = make_attributes_structure_init(agi,v)
else:
struct_init = make_attributes_structure_init(agi,None)
f.write("/* %5d */ %s,\n" % (i,struct_init))
f.write("\n};\n")
f.close()
def write_quick_iform_map(agi,odir,idata_dict):
fn = 'xed-iform-map-init.c'
f = agi.common.open_file(fn, start=False)
f.add_misc_header("#include \"xed-iform-map.h\"")
f.start()
# FIXME: declare this type
f.write("\nconst xed_iform_info_t xed_iform_db[XED_IFORM_LAST] = {\n")
first = True
for (iclass,iform_num,iform) in agi.iform_tuples:
try:
(x_iclass,extension,category,isa_set,
plist,
iclass_string_index) = idata_dict[iform]
except:
(x_iclass,extension,category,isa_set,
plist,
iclass_string_index) = ('INVALID',
'INVALID',
'INVALID',
'INVALID',
None,
0) # FIXME BADNESS
if first:
first = False
else:
f.write(",\n")
qual_iclass = "XED_ICLASS_%s" % (iclass.upper())
qual_category = "XED_CATEGORY_%s" % (category.upper())
qual_extension = "XED_EXTENSION_%s" % (extension.upper())
qual_isa_set = "XED_ISA_SET_%s" % (isa_set.upper())
t = '/* %29s */ { (xed_uint16_t) %25s, (xed_uint8_t) %22s, (xed_uint8_t)%20s, (xed_uint16_t)%25s, (xed_uint16_t)%4d }' % \
(iform,
qual_iclass,
qual_category,
qual_extension,
qual_isa_set,
iclass_string_index)
f.write(t)
f.write("\n};\n")
f.close()
def collect_graph_enum_info(agi,graph):
# we ignore the return values because we don't need them. The agi
# fields get written by the collect*() functions.
# operand fields
collect_ofield_operand_type(agi.common.options,
graph,
'type',
agi.operand_types)
collect_ofield(agi.common.options,graph, 'oc2', agi.operand_widths)
collect_ofield_name_type(agi.common.options,graph, 'name',
agi.operand_names)
collect_ifield(agi.common.options,graph, 'iclass',agi.iclasses)
collect_ifield(agi.common.options,graph, 'category', agi.categories)
collect_ifield(agi.common.options,graph, 'extension', agi.extensions)
collect_attributes_pre(agi.common.options,graph, agi.attributes)
def add_invalid(lst):
if 'INVALID' not in lst:
lst[0:0] = ['INVALID']
############################################################################
def key_invalid_first(x):
# make 'INVALID' sort to be first.
if x == 'INVALID':
# space is first printable character in ascii table and should
# not show up in our usage.
return ' '
return x
def key_invalid_tuple_element_0(x):
return key_invalid_first(x[0])
def key_tuple_element_1(x):
return x[1]
class rep_obj_t(object):
def __init__(self, iclass, indx, repkind):
self.iclass = iclass
self.indx = indx
self.repkind = repkind
self.no_rep_iclass = None
self.no_rep_indx = None
def repmap_emit_code(agi, plist, kind, hash_fn):
"""Emit table that implements the required mapping of iclasses. plist
is an array of (key,value) pairs. kind is one of repe, repne, rep
or norep. The hash function maps from the keys to a unique
value. """
fo = function_object_t(name='xed_' + kind + '_map',
return_type='xed_iclass_enum_t',
dll_export=True)
fo.add_arg('xed_iclass_enum_t iclass')
t = {}
mx = 0
for (k,v) in plist:
h = hash_fn.apply(k)
t[h] = (k,v)
mx = max(mx, h)
# For nonlinear hashes, add hash key input validation so that we
# check if the input matches the thing we expect to get on the
# output of the hash. Then the functions won't return undefined
# results for unexpected inputs.
if hash_fn.kind() == 'linear':
array_limit = mx+1 # no extra room required for validation.
else:
array_limit = 2*(mx+1) # make room for input key validation
fo.add_code('const xed_uint16_t lu_table[{}] = {{'.format(array_limit))
hashes = list(t.keys())
hashes.sort()
# fill in the rows of the array
for h in range(0,mx+1):
if h in t:
(k,v) = t[h]
else:
k = "0xFFFF"
v = 0 # XED_ICLASS_INVALID
if hash_fn.kind() == 'linear':
fo.add_code( '/* {} -> {} */ {},'.format(k,h,v))
else:
fo.add_code( '/* {} -> {} */ {}, {},'.format(k,h, k,v))
fo.add_code_eol('}')
fo.add_code_eol('const xed_uint_t key = (xed_uint_t)iclass')
fo.add_code_eol('const xed_uint_t hash = {}'.format(hash_fn.emit_cexpr()))
fo.add_code( 'if (hash <= {}) {{'.format(mx))
if hash_fn.kind() == 'linear':
fo.add_code_eol(' const xed_uint_t v = lu_table[hash]')
fo.add_code_eol(' return (xed_iclass_enum_t) v')
else:
# validate the correct input mapped to the output
fo.add_code_eol(' const xed_uint_t ek = lu_table[2*hash]')
fo.add_code( ' if (ek == key) {')
fo.add_code_eol(' const xed_uint_t v = lu_table[2*hash+1]')
fo.add_code_eol(' return (xed_iclass_enum_t) v')
fo.add_code( ' }')
fo.add_code( '}')
fo.add_code_eol('return XED_ICLASS_INVALID')
return fo
def emit_iclass_rep_ops(agi):
"""We want to make several functions that map (1) norep -> rep, (2)
norep -> repe, (3) norep ->repne, and (4) rep/repe/repne -> norep.
To do that, we need 2 hash functions. One hash function maps from
rep/repe/repne keys and and another one mapping from norep keys.
"""
import hashfks
import hashmul
import hashlin
# collect the iclasses of interest by name.
keys = []
repobjs = []
for i,iclass in enumerate(agi.iclasses_enum_order):
#msge("TTX-ICLASS: {}".format(str(iclass)))
if 'REPE_' in iclass:
keys.append(i)
repobjs.append(rep_obj_t(iclass,i,'repe'))
if 'REPNE_' in iclass:
keys.append(i)
repobjs.append(rep_obj_t(iclass,i,'repne'))
if 'REP_' in iclass:
keys.append(i)
repobjs.append(rep_obj_t(iclass,i,'rep'))
# fill in the no-rep info for each object
for o in repobjs:
o.no_rep_iclass = re.sub(r'REP(E|NE)?_', '', o.iclass)
if o.no_rep_iclass in agi.iclasses_enum_order:
o.no_rep_indx = agi.iclasses_enum_order.index(o.no_rep_iclass)
else:
o.no_rep_indx = 0 # invalid
# make a list of keys for the norep-to-whatever hash functions
no_rep_keys = uniqueify( [x.no_rep_indx for x in repobjs])
no_rep_keys.sort()
msge("NOREP KEYS: {}".format(str(no_rep_keys)))
msge("REP KEYS: {}".format(str(keys)))
# find the two required hash functions
all_fn = { 'repinst':None, 'norepinst':None }
for kind, kl in [('repinst',keys), ('norepinst',no_rep_keys)]:
hashfn = hashlin.get_linear_hash_function(kl)
if not hashfn:
hashfn = hashmul.find_perfect(kl)
if not hashfn:
hashfn = hashfks.find_fks_perfect(kl)
if hashfn:
msge('{}'.format(hashfn.emit_cexpr()))
msge('{}'.format(str(hashfn)))
msge('FOUND PERFECT HASH FUNCTION FOR {}'.format(kind))
all_fn[kind]=hashfn
else:
# If this ever happens, it is seriously bad news. We'll
# have to upgrade the perfect hash function generation so
# that this succeeds or make a fallback code path that either
# large or slow. Or one could generate a 2-level perfect hash
# but that seems like overkill for this.
die('DID NOT FIND PERFECT HASH FUNCTION FOR {}'.format(kind))
functions = []
# emit the 3 functions that map from norep -> various kinds of
# rep/repe/repne prefixes
for kind in ['repe', | |
set_data_units(self, data_units): self.data_units = data_units
def get_valid_range(self): return self.valid_range
def set_valid_range(self, valid_range): self.valid_range = valid_range
def get_radiance(self): return self.radiance
def set_radiance(self, radiance): self.radiance = radiance
def get_reflectance(self): return self.reflectance
def set_reflectance(self, reflectance): self.reflectance = reflectance
def get_thermal_const(self): return self.thermal_const
def set_thermal_const(self, thermal_const): self.thermal_const = thermal_const
def get_bitmap_description(self): return self.bitmap_description
def set_bitmap_description(self, bitmap_description): self.bitmap_description = bitmap_description
def get_class_values(self): return self.class_values
def set_class_values(self, class_values): self.class_values = class_values
def get_qa_description(self): return self.qa_description
def set_qa_description(self, qa_description): self.qa_description = qa_description
def get_percent_coverage(self): return self.percent_coverage
def set_percent_coverage(self, percent_coverage): self.percent_coverage = percent_coverage
def get_app_version(self): return self.app_version
def set_app_version(self, app_version): self.app_version = app_version
def get_level1_filename(self): return self.level1_filename
def set_level1_filename(self, level1_filename): self.level1_filename = level1_filename
def get_production_date(self): return self.production_date
def set_production_date(self, production_date): self.production_date = production_date
def get_product(self): return self.product
def set_product(self, product): self.product = product
def get_source(self): return self.source
def set_source(self, source): self.source = source
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_category(self): return self.category
def set_category(self, category): self.category = category
def get_data_type(self): return self.data_type
def set_data_type(self, data_type): self.data_type = data_type
def get_nlines(self): return self.nlines
def set_nlines(self, nlines): self.nlines = nlines
def get_nsamps(self): return self.nsamps
def set_nsamps(self, nsamps): self.nsamps = nsamps
def get_fill_value(self): return self.fill_value
def set_fill_value(self, fill_value): self.fill_value = fill_value
def get_saturate_value(self): return self.saturate_value
def set_saturate_value(self, saturate_value): self.saturate_value = saturate_value
def get_scale_factor(self): return self.scale_factor
def set_scale_factor(self, scale_factor): self.scale_factor = scale_factor
def get_add_offset(self): return self.add_offset
def set_add_offset(self, add_offset): self.add_offset = add_offset
def validate_categoryType(self, value):
# Validate type categoryType, a restriction on xs:string.
pass
def validate_dataType(self, value):
# Validate type dataType, a restriction on xs:string.
pass
def hasContent_(self):
if (
self.short_name is not None or
self.long_name is not None or
self.file_name is not None or
self.pixel_size is not None or
self.resample_method is not None or
self.data_units is not None or
self.valid_range is not None or
self.radiance is not None or
self.reflectance is not None or
self.thermal_const is not None or
self.bitmap_description is not None or
self.class_values is not None or
self.qa_description is not None or
self.percent_coverage is not None or
self.app_version is not None or
self.level1_filename is not None or
self.production_date is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='band', namespacedef_='', pretty_print=True):
# Check if we are at the root level and output the XML header
if level == 0:
outfile.write('<?xml version="1.0"?>\n')
outfile.write('\n')
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
# Check if we are at the root level and output attributes first before namespacedef
if level == 0:
outfile.write('<%s%s' % (namespace_, name_))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='band')
outfile.write('%s' % (namespacedef_ and ' ' + namespacedef_ or ''))
else:
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='band')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='band', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='band'):
if self.product is not None and 'product' not in already_processed:
already_processed.add('product')
outfile.write(' product=%s' % (self.gds_format_string(quote_attrib(self.product).encode(ExternalEncoding), input_name='product'), ))
if self.source is not None and 'source' not in already_processed:
already_processed.add('source')
outfile.write(' source=%s' % (self.gds_format_string(quote_attrib(self.source).encode(ExternalEncoding), input_name='source'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.category is not None and 'category' not in already_processed:
already_processed.add('category')
outfile.write(' category=%s' % (quote_attrib(self.category), ))
if self.data_type is not None and 'data_type' not in already_processed:
already_processed.add('data_type')
outfile.write(' data_type=%s' % (quote_attrib(self.data_type), ))
if self.nlines is not None and 'nlines' not in already_processed:
already_processed.add('nlines')
outfile.write(' nlines="%s"' % self.gds_format_integer(self.nlines, input_name='nlines'))
if self.nsamps is not None and 'nsamps' not in already_processed:
already_processed.add('nsamps')
outfile.write(' nsamps="%s"' % self.gds_format_integer(self.nsamps, input_name='nsamps'))
if self.fill_value is not None and 'fill_value' not in already_processed:
already_processed.add('fill_value')
outfile.write(' fill_value="%s"' % self.gds_format_integer(self.fill_value, input_name='fill_value'))
if self.saturate_value is not None and 'saturate_value' not in already_processed:
already_processed.add('saturate_value')
outfile.write(' saturate_value="%s"' % self.gds_format_integer(self.saturate_value, input_name='saturate_value'))
if self.scale_factor is not None and 'scale_factor' not in already_processed:
already_processed.add('scale_factor')
outfile.write(' scale_factor="%s"' % self.gds_format_float(self.scale_factor, input_name='scale_factor'))
if self.add_offset is not None and 'add_offset' not in already_processed:
already_processed.add('add_offset')
outfile.write(' add_offset="%s"' % self.gds_format_float(self.add_offset, input_name='add_offset'))
def exportChildren(self, outfile, level, namespace_='', name_='band', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.short_name is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sshort_name>%s</%sshort_name>%s' % (namespace_, self.gds_format_string(quote_xml(self.short_name).encode(ExternalEncoding), input_name='short_name'), namespace_, eol_))
if self.long_name is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%slong_name>%s</%slong_name>%s' % (namespace_, self.gds_format_string(quote_xml(self.long_name).encode(ExternalEncoding), input_name='long_name'), namespace_, eol_))
if self.file_name is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sfile_name>%s</%sfile_name>%s' % (namespace_, self.gds_format_string(quote_xml(self.file_name).encode(ExternalEncoding), input_name='file_name'), namespace_, eol_))
if self.pixel_size is not None:
self.pixel_size.export(outfile, level, namespace_, name_='pixel_size', pretty_print=pretty_print)
if self.resample_method is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sresample_method>%s</%sresample_method>%s' % (namespace_, self.gds_format_string(quote_xml(self.resample_method).encode(ExternalEncoding), input_name='resample_method'), namespace_, eol_))
if self.data_units is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdata_units>%s</%sdata_units>%s' % (namespace_, self.gds_format_string(quote_xml(self.data_units).encode(ExternalEncoding), input_name='data_units'), namespace_, eol_))
if self.valid_range is not None:
self.valid_range.export(outfile, level, namespace_, name_='valid_range', pretty_print=pretty_print)
if self.radiance is not None:
self.radiance.export(outfile, level, namespace_, name_='radiance', pretty_print=pretty_print)
if self.reflectance is not None:
self.reflectance.export(outfile, level, namespace_, name_='reflectance', pretty_print=pretty_print)
if self.thermal_const is not None:
self.thermal_const.export(outfile, level, namespace_, name_='thermal_const', pretty_print=pretty_print)
if self.bitmap_description is not None:
self.bitmap_description.export(outfile, level, namespace_, name_='bitmap_description', pretty_print=pretty_print)
if self.class_values is not None:
self.class_values.export(outfile, level, namespace_, name_='class_values', pretty_print=pretty_print)
if self.qa_description is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sqa_description>%s</%sqa_description>%s' % (namespace_, self.gds_format_string(quote_xml(self.qa_description).encode(ExternalEncoding), input_name='qa_description'), namespace_, eol_))
if self.percent_coverage is not None:
self.percent_coverage.export(outfile, level, namespace_, name_='percent_coverage', pretty_print=pretty_print)
if self.app_version is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sapp_version>%s</%sapp_version>%s' % (namespace_, self.gds_format_string(quote_xml(self.app_version).encode(ExternalEncoding), input_name='app_version'), namespace_, eol_))
if self.level1_filename is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%slevel1_filename>%s</%slevel1_filename>%s' % (namespace_, self.gds_format_string(quote_xml(self.level1_filename).encode(ExternalEncoding), input_name='level1_filename'), namespace_, eol_))
if self.production_date is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sproduction_date>%s</%sproduction_date>%s' % (namespace_, self.gds_format_datetime(self.production_date, input_name='production_date'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='band'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.product is not None and 'product' not in already_processed:
already_processed.add('product')
showIndent(outfile, level)
outfile.write('product="%s",\n' % (self.product,))
if self.source is not None and 'source' not in already_processed:
already_processed.add('source')
showIndent(outfile, level)
outfile.write('source="%s",\n' % (self.source,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
if self.category is not None and 'category' not in already_processed:
already_processed.add('category')
showIndent(outfile, level)
outfile.write('category="%s",\n' % (self.category,))
if self.data_type is not None and 'data_type' not in already_processed:
already_processed.add('data_type')
showIndent(outfile, level)
outfile.write('data_type="%s",\n' % (self.data_type,))
if self.nlines is not None and 'nlines' not in already_processed:
already_processed.add('nlines')
showIndent(outfile, level)
outfile.write('nlines=%d,\n' % (self.nlines,))
if self.nsamps is not None and 'nsamps' not in already_processed:
already_processed.add('nsamps')
showIndent(outfile, level)
outfile.write('nsamps=%d,\n' % (self.nsamps,))
if self.fill_value is not None and 'fill_value' not in already_processed:
already_processed.add('fill_value')
showIndent(outfile, level)
outfile.write('fill_value=%d,\n' % (self.fill_value,))
if self.saturate_value is not None and 'saturate_value' not in already_processed:
already_processed.add('saturate_value')
showIndent(outfile, level)
outfile.write('saturate_value=%d,\n' % (self.saturate_value,))
if self.scale_factor is not None and 'scale_factor' not in already_processed:
already_processed.add('scale_factor')
showIndent(outfile, level)
outfile.write('scale_factor=%f,\n' % (self.scale_factor,))
if self.add_offset is not None and 'add_offset' not in already_processed:
already_processed.add('add_offset')
showIndent(outfile, level)
outfile.write('add_offset=%f,\n' % (self.add_offset,))
def exportLiteralChildren(self, outfile, level, name_):
if self.short_name is not None:
showIndent(outfile, level)
outfile.write('short_name=%s,\n' % quote_python(self.short_name).encode(ExternalEncoding))
if self.long_name is not None:
showIndent(outfile, level)
outfile.write('long_name=%s,\n' % quote_python(self.long_name).encode(ExternalEncoding))
if self.file_name is not None:
showIndent(outfile, level)
outfile.write('file_name=%s,\n' % quote_python(self.file_name).encode(ExternalEncoding))
if self.pixel_size is not None:
showIndent(outfile, level)
outfile.write('pixel_size=model_.pixel_size(\n')
self.pixel_size.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.resample_method is not None:
showIndent(outfile, level)
outfile.write('resample_method=%s,\n' % quote_python(self.resample_method).encode(ExternalEncoding))
if self.data_units is not None:
showIndent(outfile, level)
outfile.write('data_units=%s,\n' % quote_python(self.data_units).encode(ExternalEncoding))
if self.valid_range is not None:
showIndent(outfile, level)
outfile.write('valid_range=model_.valid_range(\n')
self.valid_range.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.radiance is not None:
showIndent(outfile, level)
outfile.write('radiance=model_.radiance(\n')
self.radiance.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.reflectance is not None:
showIndent(outfile, level)
outfile.write('reflectance=model_.reflectance(\n')
self.reflectance.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.thermal_const is not None:
showIndent(outfile, level)
outfile.write('thermal_const=model_.thermal_const(\n')
self.thermal_const.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.bitmap_description is not None:
showIndent(outfile, level)
outfile.write('bitmap_description=model_.bitmap_description(\n')
self.bitmap_description.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.class_values is not None:
showIndent(outfile, level)
outfile.write('class_values=model_.class_values(\n')
self.class_values.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.qa_description is not None:
showIndent(outfile, level)
outfile.write('qa_description=%s,\n' % quote_python(self.qa_description).encode(ExternalEncoding))
if self.percent_coverage is not None:
showIndent(outfile, level)
outfile.write('percent_coverage=model_.percent_coverage(\n')
self.percent_coverage.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.app_version is not None:
showIndent(outfile, level)
outfile.write('app_version=%s,\n' % quote_python(self.app_version).encode(ExternalEncoding))
if self.level1_filename is not None:
showIndent(outfile, level)
outfile.write('level1_filename=%s,\n' | |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021-2022, <NAME>
# All rights reserved.
#
# Licensed under the BSD 3-Clause License:
# http://opensource.org/licenses/BSD-3-Clause
#
from __future__ import annotations
import asyncio
import collections
import datetime
import time
import textwrap
from typing import Callable, Deque, Dict, Iterable, List, Optional
from .terminal import stderr_term
from .code_chunks import CodeChunk, CodeKey
from .code_collections import Code, Session, Source
from . import util
class Progress(object):
'''
Centralized tracking of Codebraid progress, from parsing original document
source files to executing code to postprocessing. This makes possible a
terminal progress display with customizable levels of detail. It also
makes possible arbitrary tasks that use code output immediately after it
becomes available.
'''
def __init__(self):
self._last_error_counts: Dict[CodeKey, int] = {}
self._error_count: int = 0
self._last_warning_counts: Dict[CodeKey, int] = {}
self._warning_count: int = 0
self._session_total_chunks_count: int = 0
self._session_exec_chunks_count: int = 0
self._session_exec_completed_chunks_count: int = 0
self._session_exec_last_completed_chunk_count: Dict[CodeKey, int] = collections.defaultdict(int)
self._current_task: Optional[str] = None
self._current_subtask: Optional[str] = None
self._last_task: Optional[str] = None
self._last_subtask: Optional[str] = None
self._last_progress_time: float = time.monotonic()
if self.term.isatty:
ellipsis_sequence = [' ', '. ', '.. ', '...']
else:
ellipsis_sequence = [' ']
self._progress_ellipsis_deque: Deque = collections.deque(ellipsis_sequence)
self._in_live_output: bool = False
self._last_live_output: Optional[str] = None
self._last_live_output_stream: Optional[str] = None
self._live_output_backlog: List[str] = []
term = stderr_term
_first_textwrappers = util.KeyDefaultDict(
lambda columns: textwrap.TextWrapper(width=columns, initial_indent=' * ', subsequent_indent=' '*4)
)
_subsequent_textwrappers = util.KeyDefaultDict(
lambda columns: textwrap.TextWrapper(width=columns, initial_indent=' '*4, subsequent_indent=' '*4)
)
def _register_codes(self, codes: Iterable[Code]):
for code in codes:
self._last_error_counts[code.key] = code.status.error_count
self._error_count += code.status.error_count
self._last_warning_counts[code.key] = code.status.warning_count
self._warning_count += code.status.warning_count
def register_sources(self, sources: Iterable[Source]):
self._register_codes(sources)
def register_sessions(self, sessions: Iterable[Session]):
self._register_codes(sessions)
self._session_total_chunks_count = sum(len(s.code_chunks) for s in sessions)
self._session_exec_chunks_count = self._session_total_chunks_count
def _code_messages_to_summary_list(self, code: Code, *, msg_type: str, columns: int) -> List[str]:
first_textwrapper = self._first_textwrappers[columns]
subsequent_textwrapper = self._subsequent_textwrappers[columns]
if msg_type == 'errors':
fmt_msg_type = self.term.fmt_error
elif msg_type == 'warnings':
fmt_msg_type = self.term.fmt_warning
else:
raise ValueError
summary_list = []
for msg in getattr(code, msg_type):
if msg.is_refed:
continue
if summary_list:
summary_list.append('')
for line in first_textwrapper.wrap(f'"{code.code_chunks[0].origin_name}":'):
summary_list.append(fmt_msg_type(line))
for line in msg.message:
summary_list.extend(subsequent_textwrapper.wrap(line))
for cc in code.code_chunks:
for msg in getattr(cc, msg_type):
if msg.is_refed:
continue
if summary_list:
summary_list.append('')
for line in first_textwrapper.wrap(f'"{cc.origin_name}", line {cc.origin_start_line_number}:'):
summary_list.append(fmt_msg_type(line))
for line in msg.message:
summary_list.extend(subsequent_textwrapper.wrap(line))
return summary_list
def _summarize_code_messages(self, code: Code, *, columns: int) -> str:
summary_list = []
if code.status.has_errors:
summary_list.append(self.term.fmt_error('Errors:'))
summary_list.extend(self._code_messages_to_summary_list(code, msg_type='errors', columns=columns))
if code.status.has_warnings:
summary_list.append(self.term.fmt_warning('Warnings:'))
summary_list.extend(self._code_messages_to_summary_list(code, msg_type='warnings', columns=columns))
summary_list.append('')
return '\n'.join(summary_list)
def _update_progress(self):
'''
Update terminal progress summary, which includes current task, current
error and warnings counts, and code execution progress.
'''
if self._current_task is None:
return
if self._last_live_output is not None and not self._last_live_output.endswith('\n'):
return
time_now = time.monotonic()
if (not self._in_live_output and
self._current_task == self._last_task and self._current_subtask == self._last_subtask and
time_now - self._last_progress_time < 1.0):
return
error_status = f'Errors: {self._error_count}'
if self._error_count > 0:
error_status_fmted = self.term.fmt_error(error_status)
else:
error_status_fmted = self.term.fmt_ok(error_status)
warning_status = f'Warnings: {self._warning_count}'
if self._warning_count > 0:
warning_status_fmted = self.term.fmt_warning(warning_status)
else:
warning_status_fmted = self.term.fmt_ok(warning_status)
if self._current_task == self._last_task:
self._progress_ellipsis_deque.rotate(-1)
else:
while self._progress_ellipsis_deque[0] != ' ':
self._progress_ellipsis_deque.rotate(-1)
ellipsis = self._progress_ellipsis_deque[0]
if self._current_subtask is None:
task_w_subtask = self._current_task
if self._current_task != 'Complete':
task_w_subtask_fmted = self._current_task
elif self._error_count > 0:
task_w_subtask_fmted = self.term.fmt_error(self._current_task)
elif self._warning_count > 0:
task_w_subtask_fmted = self.term.fmt_warning(self._current_task)
else:
task_w_subtask_fmted = self.term.fmt_ok(self._current_task)
else:
task_w_subtask = f'{self._current_task}: {self._current_subtask}'
task_w_subtask_fmted = task_w_subtask
general_status = f'{task_w_subtask}{ellipsis} {error_status} {warning_status}'
general_status_fmted = f'{task_w_subtask_fmted}{ellipsis} {error_status_fmted} {warning_status_fmted}'
if self._current_task == 'Exec' and self._current_subtask == 'run':
exec_status = f' code chunk {self._session_exec_completed_chunks_count}/{self._session_exec_chunks_count}'
bar_width = self.term.columns() - len(general_status) - len(exec_status) - 3
finished_ratio = self._session_exec_completed_chunks_count / self._session_exec_chunks_count
finished = round(finished_ratio*bar_width)
unfinished = bar_width - finished
bar = f''' [{'#'*finished}{'.'*unfinished}]'''
filler = ''
else:
exec_status = ''
bar = ''
filler = ' '*(self.term.columns() - len(general_status))
if self.term.isatty:
progress_text = f'\r{general_status_fmted}{exec_status}{bar}{filler}'
else:
progress_text = f'PROGRESS: {general_status_fmted}{exec_status}{bar}\n'
self._last_task = self._current_task
self._last_subtask = self._current_subtask
self._last_progress_time = time_now
print(progress_text, file=self.term.stream, end='', flush=True)
async def ticktock(self):
'''
Update terminal progress summary at 1 second intervals.
'''
if self.term.isatty:
while True:
try:
await asyncio.sleep(1)
self._update_progress()
except asyncio.CancelledError:
break
def _print_live_heading(self, code: Code, *, notification_type: str, title: str,
columns: int, flush: bool, clearline: bool,
content_sep: bool=True, chunk: Optional[CodeChunk]=None):
output_list = []
if clearline:
output_list.append(self.term.clearline(columns))
output_list.append(self.term.fmt_delim('='*columns))
datetime_now = datetime.datetime.now()
timestamp = f'[{datetime_now.hour:02d}:{datetime_now.minute:02d}:{datetime_now.second:02d}]'
sep = ' '*(columns-len(notification_type)-2-len(title)-len(timestamp))
output_list.append(self.term.fmt_notify(f'{notification_type}: {title}{sep}{timestamp}'))
if chunk is None:
chunk_progress = ''
chunk_traceback = ''
else:
chunk_progress = f', code chunk {chunk.index+1}/{len(code.code_chunks)}'
chunk_traceback = f'\n"{chunk.origin_name}", line {chunk.origin_start_line_number}'
output_list.append(self.term.fmt_notify(f'{code.lang}, {code.type} "{code.name or "<default>"}"{chunk_progress}{chunk_traceback}'))
if content_sep:
output_list.append(self.term.fmt_delim('.'*columns))
print('\n'.join(output_list), file=self.term.stream, flush=flush)
def _print_live_closing(self, code: Code, *, notification_type: str, title: str,
columns: int, flush: bool, clearline: bool, chunk: Optional[CodeChunk]=None):
output_list = []
if self._last_live_output is not None and not self._last_live_output.endswith('\n'):
output_list.append('\n')
if clearline:
output_list.append(self.term.clearline(columns))
output_list.append(self.term.fmt_delim('-'*columns))
output_list.append('\n')
print(''.join(output_list), file=self.term.stream, flush=flush)
def _print_live_notification(self, code: Code, *, notification_type: str, title: str,
columns: Optional[int]=None, text: Optional[str]=None):
if columns is None:
columns = self.term.columns()
self._print_live_heading(code, notification_type=notification_type, title=title,
columns=columns, flush=False, clearline=self.term.isatty,
content_sep=text is not None)
if text is not None:
if text.endswith('\n'):
print(text, file=self.term.stream, end='')
else:
print(text, file=self.term.stream)
self._print_live_closing(code, notification_type=notification_type, title=title,
columns=columns, flush=True, clearline=False)
def _print_live_output(self, output: str, *, stream: str, fmter: Optional[Callable[[str], str]]=None):
if self._last_live_output is not None and not self._last_live_output.endswith('\n'):
if stream == self._last_live_output_stream:
if fmter is None:
print(output, file=self.term.stream, end='', flush=True)
else:
print(fmter(output), file=self.term.stream, end='', flush=True)
self._last_live_output = output
self._last_live_output_stream = stream
elif stream == 'stderr':
if '\r' in output and ('\n' not in output or output.find('\r') < output.find('\n')):
print('\n', file=self.term.stream, end='')
if fmter is None:
print(output, file=self.term.stream, end='', flush=True)
else:
print(fmter(output), file=self.term.stream, end='', flush=True)
self._last_live_output = output
self._last_live_output_stream = stream
else:
self._live_output_backlog.append((output, stream, fmter))
return
if self.term.isatty:
print(self.term.clearline(), file=self.term.stream, end='')
if not self._live_output_backlog:
if fmter is None:
print(output, file=self.term.stream, end='', flush=True)
else:
print(fmter(output), file=self.term.stream, end='', flush=True)
self._last_live_output = output
self._last_live_output_stream = stream
return
new_backlog = []
for backlog_output, backlog_stream, backlog_fmter in self._live_output_backlog:
if self._last_live_output.endswith('\n') or backlog_stream == self._last_live_output_stream:
if backlog_fmter is None:
print(backlog_output, file=self.term.stream, end='')
else:
print(backlog_fmter(backlog_output), file=self.term.stream, end='')
self._last_live_output = backlog_output
self._last_live_output_stream = backlog_stream
else:
new_backlog.append((backlog_output, backlog_stream, backlog_fmter))
self._live_output_backlog = new_backlog
if self._last_live_output.endswith('\n'):
if fmter is None:
print(output, file=self.term.stream, end='')
else:
print(fmter(output), file=self.term.stream, end='')
self._last_live_output = output
self._last_live_output_stream = stream
elif stream == 'stderr':
if '\r' in output and ('\n' not in output or output.find('\r') < output.find('\n')):
print('\n', file=self.term.stream, end='')
if fmter is None:
print(output, file=self.term.stream, end='')
else:
print(fmter(output), file=self.term.stream, end='')
self._last_live_output = output
self._last_live_output_stream = stream
else:
self._live_output_backlog.append((output, stream, fmter))
self.term.stream.flush()
def _update_message_count(self, code: Code):
self._error_count += code.status.error_count - self._last_error_counts[code.key]
self._last_error_counts[code.key] = code.status.error_count
self._warning_count += code.status.warning_count - self._last_warning_counts[code.key]
self._last_warning_counts[code.key] = code.status.warning_count
def parse_start(self):
self._current_task = 'Parse'
self._current_subtask = None
self._update_progress()
def parse_end(self):
pass
def process_start(self):
self._current_task = 'Process'
self._current_subtask = None
self._update_progress()
def process_end(self):
pass
def exec_start(self):
self._current_task = 'Exec'
self._current_subtask = None
self._update_progress()
def exec_end(self):
pass
def postprocess_start(self):
self._current_task = 'Postprocess'
self._current_subtask = None
self._update_progress()
def postprocess_end(self):
pass
def convert_start(self):
self._current_task = 'Convert'
self._current_subtask = None
self._update_progress()
def convert_end(self):
pass
def complete(self):
self._current_task = 'Complete'
self._update_progress()
def session_load_cache(self, session: Session):
self._update_message_count(session)
self._session_exec_chunks_count -= len(session.code_chunks)
if session.live_output:
self._in_live_output = True
self._print_live_notification(session, notification_type='SESSION', title='LOADED CACHE')
self._update_progress()
def session_exec_stage_start(self, session: Session, *, stage: str):
self._current_subtask = stage
if session.live_output:
self._in_live_output = True
self._print_live_notification(session, notification_type='SESSION', title=f'START {stage.upper()}')
self._update_progress()
def session_exec_stage_output(self, session: Session, *, output: str):
if session.live_output:
self._print_live_output(output, stream='stdout')
def session_exec_stage_end(self, session: Session, *, stage: str):
self._update_message_count(session)
self._update_progress()
def _finished(self, code: Code):
self._update_message_count(code)
if code.live_output and (code.status.has_errors or code.status.has_warnings):
columns = self.term.columns()
self._print_live_notification(code, notification_type=f'{code.type.upper()}', title='SUMMARY',
columns=columns, text=self._summarize_code_messages(code, columns=columns))
self._update_progress()
self._in_live_output = False
def source_finished(self, source: Source):
self._finished(source)
def session_finished(self, session: Session):
if session.did_exec:
# If session exec is interrupted by errors, code chunk count may
# be off because per-chunk progress wasn't registered
delta = len(session.code_chunks) - self._session_exec_last_completed_chunk_count[session.key]
self._session_exec_completed_chunks_count += delta
self._finished(session)
def chunk_start(self, session: Session, *, chunk: CodeChunk):
if session.live_output:
self._print_live_heading(session, chunk=chunk, notification_type='CODE CHUNK', title='LIVE OUTPUT',
columns=self.term.columns(), flush=True, clearline=self.term.isatty)
self._update_progress()
def chunk_end(self, session: Session, *, chunk: CodeChunk):
self._update_message_count(session)
self._session_exec_completed_chunks_count += 1 + (chunk.output_index - chunk.index)
self._session_exec_last_completed_chunk_count[session.key] = chunk.output_index + 1
if session.live_output:
self._print_live_closing(session, chunk=chunk, notification_type='CODE CHUNK', title='LIVE OUTPUT',
columns=self.term.columns(), flush=True, clearline=self.term.isatty)
self._last_live_output = None
self._last_live_output_stream = None
self._update_progress()
def chunk_stdout(self, session: Session, *, chunk: Optional[CodeChunk], output: str):
if session.live_output:
self._print_live_output(output, stream='stdout')
def chunk_stderr(self, session: Session, *, chunk: Optional[CodeChunk], output: str):
| |
<gh_stars>0
# coding: utf-8
"""
Prisma Cloud Compute API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 21.04.439
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class SharedImage(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'created': 'datetime',
'entrypoint': 'list[str]',
'env': 'list[str]',
'healthcheck': 'bool',
'history': 'list[SharedImageHistory]',
'id': 'str',
'labels': 'dict(str, str)',
'layers': 'list[str]',
'os': 'str',
'repo_digest': 'list[str]',
'repo_tags': 'list[str]',
'user': 'str',
'working_dir': 'str'
}
attribute_map = {
'created': 'created',
'entrypoint': 'entrypoint',
'env': 'env',
'healthcheck': 'healthcheck',
'history': 'history',
'id': 'id',
'labels': 'labels',
'layers': 'layers',
'os': 'os',
'repo_digest': 'repoDigest',
'repo_tags': 'repoTags',
'user': 'user',
'working_dir': 'workingDir'
}
def __init__(self, created=None, entrypoint=None, env=None, healthcheck=None, history=None, id=None, labels=None, layers=None, os=None, repo_digest=None, repo_tags=None, user=None, working_dir=None, local_vars_configuration=None): # noqa: E501
"""SharedImage - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._created = None
self._entrypoint = None
self._env = None
self._healthcheck = None
self._history = None
self._id = None
self._labels = None
self._layers = None
self._os = None
self._repo_digest = None
self._repo_tags = None
self._user = None
self._working_dir = None
self.discriminator = None
if created is not None:
self.created = created
if entrypoint is not None:
self.entrypoint = entrypoint
if env is not None:
self.env = env
if healthcheck is not None:
self.healthcheck = healthcheck
if history is not None:
self.history = history
if id is not None:
self.id = id
if labels is not None:
self.labels = labels
if layers is not None:
self.layers = layers
if os is not None:
self.os = os
if repo_digest is not None:
self.repo_digest = repo_digest
if repo_tags is not None:
self.repo_tags = repo_tags
if user is not None:
self.user = user
if working_dir is not None:
self.working_dir = working_dir
@property
def created(self):
"""Gets the created of this SharedImage. # noqa: E501
Date/time when the image was created. # noqa: E501
:return: The created of this SharedImage. # noqa: E501
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this SharedImage.
Date/time when the image was created. # noqa: E501
:param created: The created of this SharedImage. # noqa: E501
:type created: datetime
"""
self._created = created
@property
def entrypoint(self):
"""Gets the entrypoint of this SharedImage. # noqa: E501
Combined entrypoint of the image (entrypoint + CMD). # noqa: E501
:return: The entrypoint of this SharedImage. # noqa: E501
:rtype: list[str]
"""
return self._entrypoint
@entrypoint.setter
def entrypoint(self, entrypoint):
"""Sets the entrypoint of this SharedImage.
Combined entrypoint of the image (entrypoint + CMD). # noqa: E501
:param entrypoint: The entrypoint of this SharedImage. # noqa: E501
:type entrypoint: list[str]
"""
self._entrypoint = entrypoint
@property
def env(self):
"""Gets the env of this SharedImage. # noqa: E501
Image environment variables. # noqa: E501
:return: The env of this SharedImage. # noqa: E501
:rtype: list[str]
"""
return self._env
@env.setter
def env(self, env):
"""Sets the env of this SharedImage.
Image environment variables. # noqa: E501
:param env: The env of this SharedImage. # noqa: E501
:type env: list[str]
"""
self._env = env
@property
def healthcheck(self):
"""Gets the healthcheck of this SharedImage. # noqa: E501
Indicates if health checks are enabled (true) or not (false). # noqa: E501
:return: The healthcheck of this SharedImage. # noqa: E501
:rtype: bool
"""
return self._healthcheck
@healthcheck.setter
def healthcheck(self, healthcheck):
"""Sets the healthcheck of this SharedImage.
Indicates if health checks are enabled (true) or not (false). # noqa: E501
:param healthcheck: The healthcheck of this SharedImage. # noqa: E501
:type healthcheck: bool
"""
self._healthcheck = healthcheck
@property
def history(self):
"""Gets the history of this SharedImage. # noqa: E501
Holds the image history. # noqa: E501
:return: The history of this SharedImage. # noqa: E501
:rtype: list[SharedImageHistory]
"""
return self._history
@history.setter
def history(self, history):
"""Sets the history of this SharedImage.
Holds the image history. # noqa: E501
:param history: The history of this SharedImage. # noqa: E501
:type history: list[SharedImageHistory]
"""
self._history = history
@property
def id(self):
"""Gets the id of this SharedImage. # noqa: E501
ID of the image. # noqa: E501
:return: The id of this SharedImage. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this SharedImage.
ID of the image. # noqa: E501
:param id: The id of this SharedImage. # noqa: E501
:type id: str
"""
self._id = id
@property
def labels(self):
"""Gets the labels of this SharedImage. # noqa: E501
Image labels. # noqa: E501
:return: The labels of this SharedImage. # noqa: E501
:rtype: dict(str, str)
"""
return self._labels
@labels.setter
def labels(self, labels):
"""Sets the labels of this SharedImage.
Image labels. # noqa: E501
:param labels: The labels of this SharedImage. # noqa: E501
:type labels: dict(str, str)
"""
self._labels = labels
@property
def layers(self):
"""Gets the layers of this SharedImage. # noqa: E501
Image filesystem layers. # noqa: E501
:return: The layers of this SharedImage. # noqa: E501
:rtype: list[str]
"""
return self._layers
@layers.setter
def layers(self, layers):
"""Sets the layers of this SharedImage.
Image filesystem layers. # noqa: E501
:param layers: The layers of this SharedImage. # noqa: E501
:type layers: list[str]
"""
self._layers = layers
@property
def os(self):
"""Gets the os of this SharedImage. # noqa: E501
Image os type. # noqa: E501
:return: The os of this SharedImage. # noqa: E501
:rtype: str
"""
return self._os
@os.setter
def os(self, os):
"""Sets the os of this SharedImage.
Image os type. # noqa: E501
:param os: The os of this SharedImage. # noqa: E501
:type os: str
"""
self._os = os
@property
def repo_digest(self):
"""Gets the repo_digest of this SharedImage. # noqa: E501
Image repo digests. # noqa: E501
:return: The repo_digest of this SharedImage. # noqa: E501
:rtype: list[str]
"""
return self._repo_digest
@repo_digest.setter
def repo_digest(self, repo_digest):
"""Sets the repo_digest of this SharedImage.
Image repo digests. # noqa: E501
:param repo_digest: The repo_digest of this SharedImage. # noqa: E501
:type repo_digest: list[str]
"""
self._repo_digest = repo_digest
@property
def repo_tags(self):
"""Gets the repo_tags of this SharedImage. # noqa: E501
Image repo tags. # noqa: E501
:return: The repo_tags of this SharedImage. # noqa: E501
:rtype: list[str]
"""
return self._repo_tags
@repo_tags.setter
def repo_tags(self, repo_tags):
"""Sets the repo_tags of this SharedImage.
Image repo tags. # noqa: E501
:param repo_tags: The repo_tags of this SharedImage. # noqa: E501
:type repo_tags: list[str]
"""
self._repo_tags = repo_tags
@property
def user(self):
"""Gets the user of this SharedImage. # noqa: E501
Image user. # noqa: E501
:return: The user of this SharedImage. # noqa: E501
:rtype: str
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this SharedImage.
Image user. # noqa: E501
:param user: The user of this SharedImage. # noqa: E501
:type user: str
"""
self._user = user
@property
def working_dir(self):
"""Gets the working_dir of this SharedImage. # noqa: E501
Base working directory of the image. # noqa: E501
:return: The working_dir of this SharedImage. # noqa: E501
:rtype: str
"""
return self._working_dir
@working_dir.setter
def working_dir(self, working_dir):
"""Sets the working_dir of this SharedImage.
Base working directory of the image. # noqa: E501
:param working_dir: The working_dir of this SharedImage. # noqa: E501
:type working_dir: str
"""
self._working_dir = working_dir
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
| |
Retrieve validation information from the CMS object, using Adobe's
revocation info archival attribute.
"""
PADES_LT = 'pades'
"""
Retrieve validation information from the DSS, and require the signature's
embedded timestamp to still be valid.
"""
PADES_LTA = 'pades-lta'
"""
Retrieve validation information from the DSS, but read & validate the chain
of document timestamps leading up to the signature to establish the
integrity of the validation information at the time of signing.
"""
@classmethod
def as_tuple(cls):
return tuple(m.value for m in cls)
def _strict_vc_context_kwargs(timestamp, validation_context_kwargs):
# create a new validation context using the timestamp value as the time
# of evaluation, turn off fetching and load OCSP responses / CRL data
# from the DSS / revocation info object
validation_context_kwargs['allow_fetching'] = False
validation_context_kwargs['moment'] = timestamp
# Certs with OCSP/CRL endpoints should have the relevant revocation data
# embedded, if no stricter revocation_mode policy is in place already
rm = validation_context_kwargs.get('revocation_mode', None)
if not rm or rm == 'soft-fail':
validation_context_kwargs['revocation_mode'] = 'hard-fail'
def _validate_timestamp(tst_signed_data, validation_context,
expected_tst_imprint):
assert expected_tst_imprint is not None
tst_info = tst_signed_data['encap_content_info']['content'].parsed
assert isinstance(tst_info, tsp.TSTInfo)
# compare the expected TST digest against the message imprint
# inside the signed data
tst_imprint = tst_info['message_imprint']['hashed_message'].native
if expected_tst_imprint != tst_imprint:
logger.warning(
f"Timestamp token imprint is {tst_imprint.hex()}, but expected "
f"{expected_tst_imprint.hex()}."
)
encap_data_invalid = True
else:
encap_data_invalid = False
timestamp = tst_info['gen_time'].native
return _validate_cms_signature(
tst_signed_data, status_cls=TimestampSignatureStatus,
validation_context=validation_context,
status_kwargs={'timestamp': timestamp},
encap_data_invalid=encap_data_invalid
)
def _establish_timestamp_trust(tst_signed_data, bootstrap_validation_context,
expected_tst_imprint):
timestamp_status_kwargs = _validate_timestamp(
tst_signed_data, bootstrap_validation_context, expected_tst_imprint
)
timestamp_status = TimestampSignatureStatus(**timestamp_status_kwargs)
if not timestamp_status.valid or not timestamp_status.trusted:
logger.warning(
"Could not validate embedded timestamp token: %s.",
timestamp_status.summary()
)
raise SignatureValidationError(
"Could not establish time of signing, timestamp token did not "
"validate with current settings."
)
return timestamp_status
def get_timestamp_chain(reader: PdfFileReader) \
-> Iterator[EmbeddedPdfSignature]:
"""
Get the document timestamp chain of the associated reader, ordered
from new to old.
:param reader:
A :class:`.PdfFileReader`.
:return:
An iterable of :class:`.EmbeddedPdfSignature` objects representing
document timestamps.
"""
return filter(
lambda sig: sig.sig_object.get('/Type', None) == '/DocTimeStamp',
reversed(reader.embedded_signatures)
)
def _establish_timestamp_trust_lta(reader, bootstrap_validation_context,
validation_context_kwargs, until_revision):
timestamps = get_timestamp_chain(reader)
validation_context_kwargs = dict(validation_context_kwargs)
current_vc = bootstrap_validation_context
ts_status = None
ts_count = -1
emb_timestamp = None
for ts_count, emb_timestamp in enumerate(timestamps):
if emb_timestamp.signed_revision < until_revision:
break
emb_timestamp.compute_digest()
ts_status = _establish_timestamp_trust(
emb_timestamp.signed_data, current_vc, emb_timestamp.external_digest
)
# set up the validation kwargs for the next iteration
_strict_vc_context_kwargs(
ts_status.timestamp, validation_context_kwargs
)
# read the DSS at the current revision into a new
# validation context object
try:
current_vc = DocumentSecurityStore.read_dss(
reader.get_historical_resolver(emb_timestamp.signed_revision)
).as_validation_context(validation_context_kwargs)
except NoDSSFoundError:
current_vc = ValidationContext(**validation_context_kwargs)
return emb_timestamp, ts_status, ts_count + 1, current_vc
# TODO verify formal PAdES requirements for timestamps
# TODO verify other formal PAdES requirements (coverage, etc.)
# TODO signature/verification policy-based validation! (PAdES-EPES-* etc)
# (this is a different beast, though)
# TODO "tolerant" timestamp validation, where we tolerate problems in the
# timestamp chain provided that newer timestamps are "strong" enough to
# cover the gap.
def validate_pdf_ltv_signature(embedded_sig: EmbeddedPdfSignature,
validation_type: RevocationInfoValidationType,
validation_context_kwargs=None,
bootstrap_validation_context=None,
force_revinfo=False,
diff_policy: DiffPolicy = None,
key_usage_settings: KeyUsageConstraints = None,
skip_diff: bool = False) -> PdfSignatureStatus:
"""
Validate a PDF LTV signature according to a particular profile.
:param embedded_sig:
Embedded signature to evaluate.
:param validation_type:
Validation profile to use.
:param validation_context_kwargs:
Keyword args to instantiate
:class:`.pyhanko_certvalidator.ValidationContext` objects needed over
the course of the validation.
:param bootstrap_validation_context:
Validation context used to validate the current timestamp.
:param force_revinfo:
Require all certificates encountered to have some form of live
revocation checking provisions.
:param diff_policy:
Policy to evaluate potential incremental updates that were appended
to the signed revision of the document.
Defaults to
:const:`~pyhanko.sign.diff_analysis.DEFAULT_DIFF_POLICY`.
:param key_usage_settings:
A :class:`.KeyUsageConstraints` object specifying which key usage
extensions must or must not be present in the signer's certificate.
:param skip_diff:
If ``True``, skip the difference analysis step entirely.
:return:
The status of the signature.
"""
# create a fresh copy of the validation_kwargs
validation_context_kwargs: dict = dict(validation_context_kwargs or {})
# To validate the first timestamp, allow fetching by default
# we'll turn it off later
validation_context_kwargs.setdefault('allow_fetching', True)
# same for revocation_mode: if force_revinfo is false, we simply turn on
# hard-fail by default for now. Once the timestamp is validated,
# we switch to hard-fail forcibly.
if force_revinfo:
validation_context_kwargs['revocation_mode'] = 'require'
else:
validation_context_kwargs.setdefault('revocation_mode', 'hard-fail')
reader = embedded_sig.reader
if validation_type == RevocationInfoValidationType.ADOBE_STYLE:
dss = None
current_vc = bootstrap_validation_context or ValidationContext(
**validation_context_kwargs
)
else:
# If there's a DSS, there's no harm in reading additional certs from it
dss = DocumentSecurityStore.read_dss(reader)
if bootstrap_validation_context is None:
current_vc = dss.as_validation_context(
validation_context_kwargs, include_revinfo=False
)
else:
current_vc = bootstrap_validation_context
# add the certs from the DSS
for cert in dss._load_certs():
current_vc.certificate_registry.add_other_cert(cert)
embedded_sig.compute_digest()
embedded_sig.compute_tst_digest()
# If the validation profile is PAdES-type, then we validate the timestamp
# chain now.
# This is bootstrapped using the current validation context.
# If successful, we obtain a new validation context set to a new
# "known good" verification time. We then repeat the process using this
# new validation context instead of the current one.
earliest_good_timestamp_st = None
ts_chain_length = 0
# also record the embedded sig object assoc. with the oldest applicable
# DTS in the timestamp chain
latest_dts = None
if validation_type != RevocationInfoValidationType.ADOBE_STYLE:
latest_dts, earliest_good_timestamp_st, ts_chain_length, current_vc = \
_establish_timestamp_trust_lta(
reader, current_vc, validation_context_kwargs,
until_revision=embedded_sig.signed_revision
)
# In PAdES-LTA, we should only rely on DSS information that is covered
# by an appropriate document timestamp.
# If the validation profile is PAdES-LTA, then we must have seen
# at least one document timestamp pass by, i.e. earliest_known_timestamp
# must be non-None by now.
if earliest_good_timestamp_st is None \
and validation_type == RevocationInfoValidationType.PADES_LTA:
raise SignatureValidationError(
"Purported PAdES-LTA signature does not have a timestamp chain."
)
# if this assertion fails, there's a bug in the validation code
assert validation_type == RevocationInfoValidationType.PADES_LT \
or ts_chain_length >= 1
# now that we have arrived at the revision with the signature,
# we can check for a timestamp token attribute there
# (This is allowed, regardless of whether we use Adobe-style LTV or
# a PAdES validation profile)
tst_signed_data = embedded_sig.attached_timestamp_data
if tst_signed_data is not None:
earliest_good_timestamp_st = _establish_timestamp_trust(
tst_signed_data, current_vc, embedded_sig.tst_signature_digest
)
elif validation_type == RevocationInfoValidationType.PADES_LTA \
and ts_chain_length == 1:
# TODO Pretty sure that this is the spirit of the LTA profile,
# but are we being too harsh here? I don't think so, but it's worth
# revisiting later
# For later review: I believe that this check is appropriate, because
# the timestamp that protects the signature should be verifiable
# using only information from the next DSS, which should in turn
# also be protected using a DTS. This requires at least two timestamps.
raise SignatureValidationError(
"PAdES-LTA signature requires separate timestamps protecting "
"the signature & the rest of the revocation info."
)
# if, by now, we still don't have a trusted timestamp, there's a problem
# regardless of the validation profile in use.
if earliest_good_timestamp_st is None:
raise SignatureValidationError(
'LTV signatures require a trusted timestamp.'
)
_strict_vc_context_kwargs(
earliest_good_timestamp_st.timestamp, validation_context_kwargs
)
if validation_type == RevocationInfoValidationType.ADOBE_STYLE:
ocsps, crls = retrieve_adobe_revocation_info(
embedded_sig.signer_info
)
validation_context_kwargs['ocsps'] = ocsps
validation_context_kwargs['crls'] = crls
stored_vc = ValidationContext(**validation_context_kwargs)
elif validation_type == RevocationInfoValidationType.PADES_LT:
# in this case, we don't care about whether the information
# in the DSS is protected by any timestamps, so just ingest everything
stored_vc = dss.as_validation_context(validation_context_kwargs)
else:
# in the LTA profile, we should use only DSS information covered
# by the last relevant timestamp, so the correct VC is current_vc
current_vc.moment = earliest_good_timestamp_st.timestamp
stored_vc = current_vc
# Now, we evaluate the validity of the timestamp guaranteeing the signature
# *within* the LTV context.
# (i.e. we check whether there's enough revinfo to keep tabs on the
# timestamp's validity)
# If the last timestamp comes from a timestamp token attached to the
# signature, it should be possible to validate it using only data from the
# DSS / revocation info store, so validate the timestamp *again*
# using those settings.
if tst_signed_data is | |
'third',
'this',
'thorough',
'thoroughly',
'those',
'though',
'three',
'through',
'throughout',
'thru',
'thus',
'to',
'together',
'too',
'took',
'toward',
'towards',
'tried',
'tries',
'truly',
'try',
'trying',
'twice',
'two',
'u',
'un',
'under',
'unfortunately',
'unless',
'unlikely',
'until',
'unto',
'up',
'upon',
'us',
'use',
'used',
'useful',
'uses',
'using',
'usually',
'uucp',
'v',
'value',
'various',
'very',
'via',
'viz',
'vs',
'w',
'want',
'wants',
'was',
"wasn't",
'way',
'we',
"we'd",
"we'll",
"we're",
"we've",
'website',
'welcome',
'well',
'went',
'were',
"weren't",
'what',
"what's",
'whatever',
'when',
'whence',
'whenever',
'where',
"where's",
'whereafter',
'whereas',
'whereby',
'wherein',
'whereupon',
'wherever',
'whether',
'which',
'while',
'whither',
'who',
"who's",
'whoever',
'whole',
'whom',
'whose',
'why',
'will',
'willing',
'wish',
'with',
'within',
'without',
"won't",
'would',
'would',
"wouldn't",
'x',
'y',
'yes',
'yet',
'you',
"you'd",
"you'll",
"you're",
"you've",
'your',
'yours',
'yourself',
'yourselves',
'z',
'zero',
},
'es': {
'a',
'á',
'acerca',
'además',
'adonde',
'al',
'algo',
'algún',
'alguna',
'algunas',
'alguno',
'algunos',
'allende',
'ambos',
'amén',
'ampleamos',
'ante',
'antes',
'aquel',
'aquellas',
'aquellos',
'aqui',
'arriba',
'atras',
'aun',
'bajo',
'bastante',
'bien',
'cabe',
'cabo',
'cada',
'cierta',
'ciertas',
'cierto',
'ciertos',
'circa',
'como',
'con',
'conmigo',
'connosco',
'conseguimos',
'conseguir',
'consigo',
'consigue',
'consiguen',
'consigues',
'contigo',
'contra',
'convosco',
'convusco',
'cual',
'cuando',
'de',
'dejante',
'del',
'delas',
'dentro',
'desde',
'después',
'donde',
'dos',
'durante',
'e',
'el',
'él',
'ella',
'ellas',
'ellos',
'empleais',
'emplean',
'emplear',
'empleas',
'empleo',
'en',
'encima',
'entonces',
'entre',
'era',
'erais',
'eramos',
'éramos',
'eran',
'erar',
'eras',
'eres',
'es',
'esa',
'esas',
'ese',
'eso',
'esos',
'esta',
'está',
'estaba',
'estabais',
'estábamos',
'estaban',
'estabas',
'estad',
'estada',
'estadas',
'estado',
'estados',
'estais',
'estáis',
'estamos',
'estan',
'están',
'estando',
'estar',
'estará',
'estarán',
'estarás',
'estaré',
'estaréis',
'estaremos',
'estaría',
'estaríais',
'estaríamos',
'estarían',
'estarías',
'estas',
'estás',
'este',
'esté',
'estéis',
'estemos',
'estén',
'estés',
'esto',
'estos',
'estoy',
'estuve',
'estuviera',
'estuvierais',
'estuviéramos',
'estuvieran',
'estuvieras',
'estuvieron',
'estuviese',
'estuvieseis',
'estuviésemos',
'estuviesen',
'estuvieses',
'estuvimos',
'estuviste',
'estuvisteis',
'estuvo',
'excepto',
'fin',
'fue',
'fuera',
'fuerais',
'fuéramos',
'fueran',
'fueras',
'fueron',
'fuerza',
'fuese',
'fueseis',
'fuésemos',
'fuesen',
'fueses',
'fui',
'fuimos',
'fuiste',
'fuisteis',
'gueno',
'ha',
'habéis',
'haber',
'había',
'habíais',
'habíamos',
'habían',
'habías',
'habida',
'habidas',
'habido',
'habidos',
'habiendo',
'habrá',
'habrán',
'habrás',
'habré',
'habréis',
'habremos',
'habría',
'habríais',
'habríamos',
'habrían',
'habrías',
'hace',
'haceis',
'hacemos',
'hacen',
'hacer',
'haces',
'hacia',
'hago',
'han',
'has',
'hasta',
'hay',
'haya',
'hayáis',
'hayamos',
'hayan',
'hayas',
'haz',
'he',
'hemo',
'hemos',
'hube',
'hubiera',
'hubierais',
'hubiéramos',
'hubieran',
'hubieras',
'hubieron',
'hubiese',
'hubieseis',
'hubiésemos',
'hubiesen',
'hubieses',
'hubimos',
'hubiste',
'hubisteis',
'hubo',
'incluso',
'intenta',
'intentais',
'intentamos',
'intentan',
'intentar',
'intentas',
'intento',
'ir',
'la',
'largo',
'las',
'le',
'les',
'lo',
'los',
'más',
'me',
'mediante',
'menos',
'mi',
'mí',
'mía',
'miar',
'mías',
'mientras',
'mio',
'mío',
'míos',
'mis',
'modode',
'mucho',
'muchos',
'muy',
'na',
'nada',
'ni',
'no',
'nos',
'nosotras',
'nosotros',
'nuestra',
'nuestras',
'nuestro',
'nuestros',
'nunca',
'o',
'os',
'otra',
'otras',
'otro',
'otros',
'pa',
'pa\'',
'par',
'para',
'pero',
'poco',
'podeis',
'podemos',
'poder',
'podria',
'podriais',
'podriamos',
'podrian',
'podrias',
'por',
'porque',
'primero',
'pro',
'puede',
'pueden',
'puedo',
'pues',
'que',
'qué',
'quien',
'quienes',
'sabe',
'sabeis',
'sabemos',
'saben',
'saber',
'sabes',
'salvo',
'se',
'sea',
'seáis',
'seamos',
'sean',
'seas',
'según',
'sentid',
'sentida',
'sentidas',
'sentido',
'sentidos',
'sentir',
'ser',
'será',
'serán',
'serás',
'seré',
'seréis',
'seremos',
'sería',
'seríais',
'seríamos',
'serían',
'serías',
'si',
'sí',
'sido',
'siendo',
'siente',
'sin',
'sintiendo',
'so',
'sobre',
'sois',
'solamente',
'solo',
'somos',
'son',
'soy',
'su',
'sus',
'suya',
'suyas',
'suyo',
'suyos',
'también',
'tanto',
'te',
'tendrá',
'tendrán',
'tendrás',
'tendré',
'tendréis',
'tendremos',
'tendría',
'tendríais',
'tendríamos',
'tendrían',
'tendrías',
'tened',
'teneis',
'tenéis',
'tenemos',
'tener',
'tenga',
'tengáis',
'tengamos',
'tengan',
'tengas',
'tengo',
'tenía',
'teníais',
'teníamos',
'tenían',
'tenías',
'tenida',
'tenidas',
'tenido',
'tenidos',
'teniendo',
'ti',
'tiempo',
'tiene',
'tienen',
'tienes',
'todo',
'todos',
'trabaja',
'trabajais',
'trabajamos',
'trabajan',
'trabajar',
'trabajas',
'trabajo',
'tras',
'tu',
'tú',
'tus',
'tuve',
'tuviera',
'tuvierais',
'tuviéramos',
'tuvieran',
'tuvieras',
'tuvieron',
'tuviese',
'tuvieseis',
'tuviésemos',
'tuviesen',
'tuvieses',
'tuvimos',
'tuviste',
'tuvisteis',
'tuvo',
'tuya',
'tuyas',
'tuyo',
'tuyos',
'ultimar',
'ultimo',
'un',
'un',
'una',
'unas',
'uno',
'unos',
'usa',
'usais',
'usamos',
'usan',
'usar',
'usas',
'uso',
'va',
'vais',
'valor',
'vamos',
'van',
'vaya',
'verdad',
'verdadera',
'verdadero',
'versus',
'vía',
'vosostras',
'vosostros',
'vosotras',
'vosotros',
'voy',
'vuestra',
'vuestras',
'vuestro',
'vuestros',
'vusco',
'y',
'ya',
'yo',
},
'fi': {
'aiemmin',
'aika',
'aikaa',
'aikaan',
'aikaisemmin',
'aikaisin',
'aikajen',
'aikana',
'aikoina',
'aikoo',
'aikovat',
'aina',
'ainakaan',
'ainakin',
'ainoa',
'ainoat',
'aiomme',
'aion',
'aiotte',
'aist',
'aivan',
'ajan',
'älä',
'alas',
'alemmas',
'älköön',
'alkuisin',
'alkuun',
'alla',
'alle',
'aloitamme',
'aloitan',
'aloitat',
'aloitatte',
'aloitattivat',
'aloitettava',
'aloitettevaksi',
'aloitettu',
'aloitimme',
'aloitin',
'aloitit',
'aloititte',
'aloittaa',
'aloittamatta',
'aloitti',
'aloittivat',
'alta',
'aluksi',
'alussa',
'alusta',
'annettavaksi',
'annetteva',
'annettu',
'antaa',
'antamatta',
'antoi',
'aoua',
'apu',
'asia',
'asiaa',
'asian',
'asiasta',
'asiat',
'asioiden',
'asioihin',
'asioita',
'asti',
'avuksi',
'avulla',
'avun',
'avutta',
'edellä',
'edelle',
'edelleen',
'edeltä',
'edemmäs',
'edes',
'edessä',
'edestä',
'ehkä',
'ei',
'eikä',
'eilen',
'eivät',
'eli',
'ellei',
'elleivät',
'ellemme',
'ellen',
'ellet',
'ellette',
'emme',
'en',
'enää',
'enemmän',
'eniten',
'ennen',
'ensi',
'ensimmäinen',
'ensimmäiseksi',
'ensimmäisen',
'ensimmäisenä',
'ensimmäiset',
'ensimmäisiä',
'ensimmäisiksi',
'ensimmäisinä',
'ensimmäistä',
'ensin',
'entinen',
'entisen',
'entisiä',
'entistä',
'entisten',
'eräät',
'eräiden',
'eräs',
'eri',
'erittäin',
'erityisesti',
'esi',
'esiin',
'esillä',
'esimerkiksi',
'et',
'eteen',
'etenkin',
'että',
'ette',
'ettei',
'halua',
'haluaa',
'haluamatta',
'haluamme',
'haluan',
'haluat',
'haluatte',
'haluavat',
'halunnut',
'halusi',
'halusimme',
'halusin',
'halusit',
'halusitte',
'halusivat',
'halutessa',
'haluton',
'hän',
'häneen',
'hänellä',
'hänelle',
'häneltä',
'hänen',
'hänessä',
'hänestä',
'hänet',
'he',
'hei',
'heidän',
'heihin',
'heille',
'heiltä',
'heissä',
'heistä',
'heitä',
'helposti',
'heti',
'hetkellä',
'hieman',
'huolimatta',
'huomenna',
'hyvä',
'hyvää',
'hyvät',
'hyviä',
'hyvien',
'hyviin',
'hyviksi',
'hyville',
'hyviltä',
'hyvin',
'hyvinä',
'hyvissä',
'hyvistä',
'ihan',
'ilman',
'ilmeisesti',
'itse',
'itseään',
'itsensä',
'ja',
'jää',
'jälkeen',
'jälleen',
'jo',
'johon',
'joiden',
'joihin',
'joiksi',
'joilla',
'joille',
'joilta',
'joissa',
'joista',
'joita',
'joka',
'jokainen',
'jokin',
'joko',
'joku',
'jolla',
'jolle',
'jolloin',
'jolta',
'jompikumpi',
'jonka',
'jonkin',
'jonne',
'joo',
'jopa',
'jos',
'joskus',
'jossa',
'josta',
'jota',
'jotain',
'joten',
'jotenkin',
'jotenkuten',
'jotka',
'jotta',
'jouduimme',
'jouduin',
'jouduit',
'jouduitte',
'joudumme',
'joudun',
'joudutte',
'joukkoon',
'joukossa',
'joukosta',
'joutua',
'joutui',
'joutuivat',
'joutumaan',
'joutuu',
'joutuvat',
'juuri',
'kahdeksan',
'kahdeksannen',
'kahdella',
'kahdelle',
'kahdelta',
'kahden',
'kahdessa',
'kahdesta',
'kahta',
'kahteen',
'kai',
'kaiken',
'kaikille',
'kaikilta',
'kaikkea',
'kaikki',
'kaikkia',
'kaikkiaan',
'kaikkialla',
'kaikkialle',
'kaikkialta',
'kaikkien',
'kaikkin',
'kaksi',
'kannalta',
'kannattaa',
'kanssa',
'kanssaan',
'kanssamme',
'kanssani',
'kanssanne',
'kanssasi',
'kauan',
'kauemmas',
'kautta',
'kehen',
'keiden',
'keihin',
'keiksi',
'keillä',
'keille',
'keiltä',
'keinä',
'keissä',
'keistä',
'keitä',
'keittä',
'keitten',
'keneen',
'keneksi',
'kenellä',
'kenelle',
'keneltä',
'kenen',
'kenenä',
'kenessä',
'kenestä',
'kenet',
'kenettä',
'kennessästä',
'kerran',
'kerta',
'kertaa',
'kesken',
'keskimäärin',
'ketä',
'ketkä',
'kiitos',
'kohti',
'koko',
'kokonaan',
'kolmas',
'kolme',
'kolmen',
'kolmesti',
'koska',
'koskaan',
'kovin',
'kuin',
'kuinka',
'kuitenkaan',
'kuitenkin',
'kuka',
'kukaan',
'kukin',
'kumpainen',
'kumpainenkaan',
'kumpi',
'kumpikaan',
'kumpikin',
'kun',
'kuten',
'kuuden',
'kuusi',
'kuutta',
'kyllä',
'kymmenen',
'kyse',
'lähekkäin',
'lähellä',
'lähelle',
'läheltä',
'lähemmäs',
'lähes',
'lähinnä',
'lähtien',
'läpi',
'liian',
'liki',
'lisää',
'lisäksi',
'luo',
'mahdollisimman',
'mahdollista',
'me',
'meidän',
'meillä',
'meille',
'melkein',
'melko',
'menee',
'meneet',
'menemme',
'menen',
'menet',
'menette',
'menevät',
'meni',
'menimme',
'menin',
'menit',
'menivät',
'mennessä',
'mennyt',
'menossa',
'mihin',
'mikä',
'mikään',
'mikäli',
'mikin',
'miksi',
'milloin',
'minä',
'minne',
'minun',
'minut',
'missä',
'mistä',
'mitä',
'mitään',
'miten',
'moi',
'molemmat',
'mones',
'monesti',
'monet',
'moni',
'moniaalla',
'moniaalle',
'moniaalta',
'monta',
'muassa',
'muiden',
'muita',
'muka',
'mukaan',
'mukaansa',
'mukana',
'mutta',
'muu',
'muualla',
'muualle',
'muualta',
'muuanne',
'muulloin',
'muun',
'muut',
'muuta',
'muutama',
'muutaman',
'muuten',
'myöhemmin',
'myös',
'myöskään',
'myöskin',
'myötä',
'näiden',
'näin',
'näissä',
'näissähin',
'näissälle',
'näissältä',
'näissästä',
'näitä',
'nämä',
'ne',
'neljä',
'neljää',
'neljän',
'niiden',
'niin',
'niistä',
'niitä',
'noin',
'nopeammin',
'nopeasti',
'nopeiten',
'nro',
'nuo',
'nyt',
'ohi',
'oikein',
'ole',
'olemme',
'olen',
'olet',
'olette',
'oleva',
'olevan',
'olevat',
'oli',
'olimme',
'olin',
'olisi',
'olisimme',
'olisin',
'olisit',
'olisitte',
'olisivat',
'olit',
'olitte',
'olivat',
'olla',
'olleet',
'olli',
'ollut',
'oma',
'omaa',
'omaan',
'omaksi',
'omalle',
'omalta',
'oman',
'omassa',
'omat',
'omia',
'omien',
'omiin',
'omiksi',
'omille',
'omilta',
'omissa',
'omista',
'on',
'onkin',
'onko',
'ovat',
'päälle',
'paikoittain',
'paitsi',
'pakosti',
'paljon',
'paremmin',
'parempi',
'parhaillaan',
'parhaiten',
'peräti',
'perusteella',
'pian',
'pieneen',
'pieneksi',
'pienellä',
'pienelle',
'pieneltä',
'pienempi',
'pienestä',
'pieni',
'pienin',
'puolesta',
'puolestaan',
'runsaasti',
'saakka',
'sadam',
'sama',
'samaa',
'samaan',
'samalla',
'samallalta',
'samallassa',
'samallasta',
'saman',
'samat',
'samoin',
'sata',
'sataa',
'satojen',
'se',
'seitsemän',
'sekä',
'sen',
'seuraavat',
'siellä',
'sieltä',
'siihen',
'siinä',
'siis',
'siitä',
'sijaan',
'siksi',
'sillä',
'silloin',
'silti',
'sinä',
'sinne',
'sinua',
'sinulle',
'sinulta',
'sinun',
'sinussa',
'sinusta',
'sinut',
'sisäkkäin',
'sisällä',
'sitä',
'siten',
'sitten',
'suoraan',
'suuntaan',
'suuren',
'suuret',
'suuri',
'suuria',
'suurin',
'suurten',
'taa',
'täällä',
'täältä',
'taas',
'taemmas',
'tähän',
'tahansa',
'tai',
'takaa',
'takaisin',
'takana',
'takia',
'tällä',
'tällöin',
'tämä',
'tämän',
'tänä',
'tänään',
'tänne',
'tapauksessa',
'tässä',
'tästä',
'tätä',
'täten',
'tavalla',
'tavoitteena',
'täysin',
'täytyvät',
'täytyy',
'te',
'tietysti',
'todella',
'toinen',
'toisaalla',
'toisaalle',
'toisaalta',
'toiseen',
'toiseksi',
'toisella',
'toiselle',
'toiselta',
'toisemme',
'toisen',
'toisensa',
'toisessa',
'toisesta',
'toista',
'toistaiseksi',
'toki',
'tosin',
'tuhannen',
'tuhat',
'tule',
'tulee',
'tulemme',
'tulen',
'tulet',
'tulette',
'tulevat',
'tulimme',
'tulin',
'tulisi',
'tulisimme',
'tulisin',
'tulisit',
'tulisitte',
'tulisivat',
'tulit',
'tulitte',
'tulivat',
'tulla',
'tulleet',
'tullut',
'tuntuu',
'tuo',
'tuolla',
'tuolloin',
'tuolta',
'tuonne',
'tuskin',
'tykö',
'usea',
'useasti',
'useimmiten',
'usein',
'useita',
'uudeksi',
'uudelleen',
'uuden',
'uudet',
'uusi',
'uusia',
'uusien',
'uusinta',
'uuteen',
'uutta',
'vaan',
'vähän',
'vähemmän',
'vähintään',
'vähiten',
'vai',
'vaiheessa',
'vaikea',
'vaikean',
| |
<reponame>sjhloco/asa_acl_report
#!/usr/bin/env python
# This script is to go through ASA access list (read from the device or a file) and produce human readable xl file.
import csv
import os
import re
from datetime import datetime
from os.path import expanduser
from sys import exit
import ipaddress
from ipaddress import IPv4Network
from getpass import getpass
from netmiko import Netmiko
################# Variables to change dependant on environment #################
# Sets it has users home directory
directory = expanduser("~")
# To change the default header values in the CSV file
csv_columns = ['ACL Name', 'Line Number', 'Access', 'Protocol', 'Source Address', 'Source Port',
'Destination Address', 'Destination Port', 'Hit Count', 'Date Last Hit', 'Time Last Hit']
################################## Gather information from user ##################################
# 1. Welcome and informational screen
def start():
global against_asa
print()
print('=' * 30, 'ASA ACL Auditer v0.2 (tested 9.6)', '=' * 30)
print('This tool can be used to search specific IP addresses or all IPs in specific or all ACLs')
print('If filtering IP addresses leave a blank space between entries')
print('If filtering ACLs leave a blank space between entries and ensure capitliaztion is correct')
print('The output will be stored in a CSV file saved in your the home directory')
print()
print('If searching against a file put all the ACLs in the one file. They must be expanded access-lists (show access-list)')
print('To get the timestamp of last hit must have a second file with show access-list <name> brief for the ACLs (optional)')
print('Both the ACL abd ACL brief files should be stored in your home directory')
print()
# Options of whether to test against an ASA or a static file.
while True:
print('Do you want to grab the ACL config from an ASA or use a file?')
print('1. Search against a ASA')
print('2. Search against a file')
answer = input('Type 1 or 2> ')
if answer == '1':
against_asa = True
test_login() # Test log into ASA
break
elif answer == '2':
against_asa = False
gather_info()
break
else:
print('\n!!! ERROR - Response not understood, please try again !!!\n')
# 2. Gets username/password and checks connectivity
def test_login():
global net_conn # Make connection variable global so can be used in all functions
while True:
try:
device = input("Enter IP of the ASA firewall: ")
username = input("Enter your username: ")
password = <PASSWORD>()
net_conn = Netmiko(host=device, username=username, password=password, device_type='cisco_asa')
net_conn.find_prompt() # Expects to recieve prompt back from access switch
break
except Exception as e: # If login fails loops to begining with the error message
print(e)
gather_info() # Runs next function
# 3. Gathers the IP addresses and ACLs Names to be filtered as well as the name to use for the CSV file
def gather_info():
global filename
# Prompts user to enter the IPs to be searched and makes a list of them.
print("\nIP: Enter the IPs you want to search for in the ACLs seperated by a space. Leave blank to search all IPs.")
ips_entered = input('> ')
search_ips = []
if len(ips_entered) != 0:
search_ips = ips_entered.split(' ')
# Prompts user to enter the ACLs to be searched and makes a list of them.
print("\nACL: Enter the names of the ACLs you want to search in seperated by a space. Leave blank to search all ACLs.")
acls_entered = input('> ')
acl_names = []
if len(acls_entered) != 0:
acl_names = acls_entered.split(' ')
# Prompts user to enter the name of the file to be created. It it already exists prompts user to confirm they want to overwrite.
while True:
print("\nFILE: Enter the name of the file to save the results to.")
filename = input('> ')
filename = os.path.join(directory, filename + ".csv")
if os.path.exists(filename):
print("The filename already exists, do you want to overwrite this?")
print("Type y if this is correct, or n to re-enter the file name.")
answer = input('> ')
if answer == 'y':
break
else:
break
# Run next function
verify(search_ips, acl_names)
################################## Validates entered information and gets ACLs/ ACL brief from ASA or file ##################################
# 4. Verifies that the entered filter information is of a valid format
def verify(search_ips, acl_names):
global acl_brief
ip_error = []
acl_error = []
# Checks to make sure that the IP address are valid, if not exits the script
if len(search_ips) != 0:
for ip in search_ips:
# Checks if IPs are valid, gathers list of non-valid IPs
try:
ipaddress.ip_address(ip)
except ValueError as errorCode:
ip_error.append(str(errorCode))
# Exits script if there was an ip address error (list not empty)
if len(ip_error) != 0:
print("!!!ERROR - Invalid IP addresses entered !!!")
for x in ip_error:
print(x)
exit()
# ASA ONLY - Checks to make sure that the ACL names are on the ASA, if not exits the script
if against_asa is True:
# Gathers list of access-group ACLs and group policy ACLs
asa_acls = []
acl = net_conn.send_command('show run access-group')
vpn_acl = net_conn.send_command('show run | in split-tunnel-network-list')
for x in acl.splitlines():
asa_acls.append(x.split(' ')[1])
for x in vpn_acl.splitlines():
asa_acls.append(x.split('value ')[1])
# If user entered a list of ACLs checks to make sure they are on the ASA
if len(acl_names) != 0:
# Converts to a set to remove duplicates, then finds any element from acl_names not in acls
acl_error = list(set(acl_names) - set(asa_acls))
# Exits script if there was an acl name error (list not empty)
if len(acl_error) != 0:
print("!!! ERROR - Invalid ACL names entered !!!")
for x in acl_error:
print("'{}' does not appear to be an ACL on the ASA".format(x))
exit()
# Gather the hashes for all ACLs from show access-list brief
asa_acls = set(asa_acls)
acl_brief1 = ''
acl_brief = []
for x in asa_acls:
acl_brief1 = acl_brief1 + net_conn.send_command('show access-list {} brief'.format(x))
# Filter the output so only contains the hashes
for x in acl_brief1.splitlines():
if re.match(r"^\S{8}\s\S{8}\s", x):
acl_brief.append(x)
# Run next function
get_acl(search_ips, acl_names)
# FILE ONLY - Prompts user to enter the name of the files to be loaded. If cant find them prompts user to re-enter
elif against_asa is False:
acl_file_exists = False
acl_brief_file_exists = False
print("\nThe results of show access-list and show access-list <name> brief must be in seperate files.")
print("Make sure that both files are already in your home directory before continuing.")
while acl_file_exists is False:
print("\nACL_FILE: Enter the full filename (including extension) of the file containing all the ACLs output.")
filename = input('> ')
filename = os.path.join(directory, filename)
if not os.path.exists(filename):
print('!!! ERROR - Cant find the file, was looking for {} !!!'.format(filename))
print('Make sure it is in home directory and named correctly before trying again.')
else:
acl_file_exists = True
while acl_brief_file_exists is False:
print("\n-ACL_BR_FILE: Enter the full filename (including extension) of the file containing the ACL brief output (optional).")
acl_brief2 = input('> ')
if len(acl_brief2) != 0:
acl_brief2 = os.path.join(directory, acl_brief2)
if not os.path.exists(acl_brief2):
print('!!! ERROR - Cant find the file, was looking for {} !!!'.format(acl_brief2))
print('Make sure it is in home directory and named correctly before trying again.')
else:
acl_brief_file_exists = True
else:
acl_brief_file_exists = True
# Runs checks against the ACL file to make sure is valid and normalizes it
with open(filename) as var:
acl1 = var.read().splitlines()
# Remove all lines that arent ACEs
for x in list(acl1):
if (len(x) == 0) or ('show' in x) or ('access-list' not in x) or ('elements' in x) or ('cached' in x) or ('remark' in x):
acl1.remove(x)
# Exits script if no hitcnt as means user has done show run access-list
for x in acl1:
if 'hitcnt' not in x:
print('!!! ERROR - No hitcnt in {}, the file is incompatible with this script !!!'.format(filename))
print('Check the file and make sure you have done "show access-list" and NOT "show RUN access-list"')
exit()
# Creates a list of ACL names from the acl file to comapre against the user entered ACL names
file_acls = []
for x in acl1:
y = x.lstrip()
y = y.split(' ')
file_acls.append(y[1])
# Converts to a set to remove duplicates, then finds any element from acl_names not in acls
acl_error | |
def __ne__(self, other):
return not (self == other)
class KeyNotFoundException(TException):
"""
Attributes:
- msg
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'msg', None, None, ), # 1
)
def __init__(self, msg=None,):
self.msg = msg
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.msg = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('KeyNotFoundException')
if self.msg is not None:
oprot.writeFieldBegin('msg', TType.STRING, 1)
oprot.writeString(self.msg.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.msg is None:
raise TProtocol.TProtocolException(message='Required field msg is unset!')
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.msg)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class KeyAlreadyExistsException(TException):
"""
Attributes:
- msg
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'msg', None, None, ), # 1
)
def __init__(self, msg=None,):
self.msg = msg
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.msg = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('KeyAlreadyExistsException')
if self.msg is not None:
oprot.writeFieldBegin('msg', TType.STRING, 1)
oprot.writeString(self.msg.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.msg is None:
raise TProtocol.TProtocolException(message='Required field msg is unset!')
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.msg)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TopologySummary:
"""
Attributes:
- id
- name
- num_tasks
- num_executors
- num_workers
- uptime_secs
- status
- sched_status
- owner
- replication_count
- requested_memonheap
- requested_memoffheap
- requested_cpu
- assigned_memonheap
- assigned_memoffheap
- assigned_cpu
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
(2, TType.STRING, 'name', None, None, ), # 2
(3, TType.I32, 'num_tasks', None, None, ), # 3
(4, TType.I32, 'num_executors', None, None, ), # 4
(5, TType.I32, 'num_workers', None, None, ), # 5
(6, TType.I32, 'uptime_secs', None, None, ), # 6
(7, TType.STRING, 'status', None, None, ), # 7
None, # 8
None, # 9
None, # 10
None, # 11
None, # 12
None, # 13
None, # 14
None, # 15
None, # 16
None, # 17
None, # 18
None, # 19
None, # 20
None, # 21
None, # 22
None, # 23
None, # 24
None, # 25
None, # 26
None, # 27
None, # 28
None, # 29
None, # 30
None, # 31
None, # 32
None, # 33
None, # 34
None, # 35
None, # 36
None, # 37
None, # 38
None, # 39
None, # 40
None, # 41
None, # 42
None, # 43
None, # 44
None, # 45
None, # 46
None, # 47
None, # 48
None, # 49
None, # 50
None, # 51
None, # 52
None, # 53
None, # 54
None, # 55
None, # 56
None, # 57
None, # 58
None, # 59
None, # 60
None, # 61
None, # 62
None, # 63
None, # 64
None, # 65
None, # 66
None, # 67
None, # 68
None, # 69
None, # 70
None, # 71
None, # 72
None, # 73
None, # 74
None, # 75
None, # 76
None, # 77
None, # 78
None, # 79
None, # 80
None, # 81
None, # 82
None, # 83
None, # 84
None, # 85
None, # 86
None, # 87
None, # 88
None, # 89
None, # 90
None, # 91
None, # 92
None, # 93
None, # 94
None, # 95
None, # 96
None, # 97
None, # 98
None, # 99
None, # 100
None, # 101
None, # 102
None, # 103
None, # 104
None, # 105
None, # 106
None, # 107
None, # 108
None, # 109
None, # 110
None, # 111
None, # 112
None, # 113
None, # 114
None, # 115
None, # 116
None, # 117
None, # 118
None, # 119
None, # 120
None, # 121
None, # 122
None, # 123
None, # 124
None, # 125
None, # 126
None, # 127
None, # 128
None, # 129
None, # 130
None, # 131
None, # 132
None, # 133
None, # 134
None, # 135
None, # 136
None, # 137
None, # 138
None, # 139
None, # 140
None, # 141
None, # 142
None, # 143
None, # 144
None, # 145
None, # 146
None, # 147
None, # 148
None, # 149
None, # 150
None, # 151
None, # 152
None, # 153
None, # 154
None, # 155
None, # 156
None, # 157
None, # 158
None, # 159
None, # 160
None, # 161
None, # 162
None, # 163
None, # 164
None, # 165
None, # 166
None, # 167
None, # 168
None, # 169
None, # 170
None, # 171
None, # 172
None, # 173
None, # 174
None, # 175
None, # 176
None, # 177
None, # 178
None, # 179
None, # 180
None, # 181
None, # 182
None, # 183
None, # 184
None, # 185
None, # 186
None, # 187
None, # 188
None, # 189
None, # 190
None, # 191
None, # 192
None, # 193
None, # 194
None, # 195
None, # 196
None, # 197
None, # 198
None, # 199
None, # 200
None, # 201
None, # 202
None, # 203
None, # 204
None, # 205
None, # 206
None, # 207
None, # 208
None, # 209
None, # 210
None, # 211
None, # 212
None, # 213
None, # 214
None, # 215
None, # 216
None, # 217
None, # 218
None, # 219
None, # 220
None, # 221
None, # 222
None, # 223
None, # 224
None, # 225
None, # 226
None, # 227
None, # 228
None, # 229
None, # 230
None, # 231
None, # 232
None, # 233
None, # 234
None, # 235
None, # 236
None, # 237
None, # 238
None, # 239
None, # 240
None, # 241
None, # 242
None, # 243
None, # 244
None, # 245
None, # 246
None, # 247
None, # 248
None, # 249
None, # 250
None, # 251
None, # 252
None, # 253
None, # 254
None, # 255
None, # 256
None, # 257
None, # 258
None, # 259
None, # 260
None, # 261
None, # 262
None, # 263
None, # 264
None, # 265
None, # 266
None, # 267
None, # 268
None, # 269
None, # 270
None, # 271
None, # 272
None, # 273
None, # 274
None, # 275
None, # | |
fmt = "%%%ss:" % width
fmteval = fmt[:-1]+"="
line = ( fmt % ('-'*(width-2)) ) + ( '-'*(min(40,width*5)) )
print(line)
for key,value in self.__dict__.items():
if key not in self._excludedattr:
if isinstance(value,(int,float,str,list,tuple,np.ndarray,np.generic)):
if isinstance(value,pstr):
print(fmt % key,'p"'+self.dispmax(value)+'"')
else:
print(fmt % key,self.dispmax(value))
elif isinstance(value,struct):
print(fmt % key,self.dispmax(value.__str__()))
elif isinstance(value,type):
print(fmt % key,self.dispmax(str(value)))
else:
print(fmt % key,type(value))
if self._evalfeature:
if isinstance(value,pstr):
print(fmteval % "",'p"'+self.dispmax(tmp.getattr(key))+'"')
elif isinstance(value,str):
print(fmteval % "",self.dispmax(tmp.getattr(key)))
print(line)
return f"{self._fulltype} ({self._type} object) with {len(self)} {self._ftype}s"
def disp(self):
""" display method """
self.__repr__()
def __str__(self):
return f"{self._fulltype} ({self._type} object) with {len(self.__dict__)} {self._ftype}s"
def format(self,s,escape=False):
"""
format a string with field (use {field} as placeholders)
s.replace(string), s.replace(string,escape=True)
where:
s is a struct object
string is a string with possibly ${variable1}
escape is a flag to prevent ${} replaced by {}
"""
try:
if escape:
return s.format(**self.__dict__)
else:
return s.replace("${","{").format(**self.__dict__)
except KeyError:
print(f'\n Missing {self._ftype} unable interpret the expression:\n\t"{s}"')
raise
def fromkeys(self,keys):
""" returns a structure from keys """
return self+struct(**dict.fromkeys(keys,None))
@staticmethod
def scan(s):
""" scan a string for variables """
if not isinstance(s,str):
raise TypeError("scan() requires a string")
tmp = struct()
#return tmp.fromkeys(set(re.findall(r"\$\{(.*?)\}",s)))
found = re.findall(r"\$\{(.*?)\}",s);
uniq = []
for x in found:
if x not in uniq: uniq.append(x)
return tmp.fromkeys(uniq)
def generator(self):
""" generate Python code of the equivalent structure """
nk = len(self)
if nk==0:
print("X = struct()")
else:
ik = 0
fmt = "%%%ss=" % max(10,max([len(k) for k in self.keys()])+2)
print("\nX = struct(")
for k in self.keys():
ik += 1
end = ",\n" if ik<nk else "\n"+(fmt[:-1] % ")")+"\n"
v = getattr(self,k)
if isinstance(v,(int,float)) or v == None:
print(fmt % k,v,end=end)
elif isinstance(v,str):
print(fmt % k,f'"{v}"',end=end)
elif isinstance(v,(list,tuple)):
print(fmt % k,v,end=end)
else:
print(fmt % k,"/* unsupported type */",end=end)
# copy and deep copy methpds for the class
def __copy__(self):
""" copy method """
cls = self.__class__
copie = cls.__new__(cls)
copie.__dict__.update(self.__dict__)
return copie
def __deepcopy__(self, memo):
""" deep copy method """
cls = self.__class__
copie = cls.__new__(cls)
memo[id(self)] = copie
for k, v in self.__dict__.items():
setattr(copie, k, duplicatedeep(v, memo))
return copie
# write a file
def write(self,file):
"""
write the equivalent structure (not recursive for nested struct)
write(filename)
"""
f = open(file,mode="w",encoding='utf-8')
print(f"# {self._fulltype} with {len(self)} {self._ftype}s\n",file=f)
for k,v in self.items():
if v is None:
print(k,"=None",file=f,sep="")
elif isinstance(v,(int,float)):
print(k,"=",v,file=f,sep="")
elif isinstance(v,str):
print(k,'="',v,'"',file=f,sep="")
else:
print(k,"=",str(v),file=f,sep="")
f.close()
# write a file
@staticmethod
def read(file):
"""
read the equivalent structure
read(filename)
"""
f = open(file,mode="r",encoding="utf-8")
s = struct()
while 1:
line = f.readline()
if not line: break
line = line.strip()
expr = line.split(sep="=")
if len(line)>0 and line[0]!="#" and len(expr)>0:
lhs = expr[0]
rhs = "".join(expr[1:]).strip()
if len(rhs)==0 or rhs=="None":
v = None
else:
v = eval(rhs)
s.setattr(lhs,v)
f.close()
return s
# argcheck
def check(self,default):
"""
populate fields from a default structure
check(defaultstruct)
missing field, None and [] values are replaced by default ones
Note: a.check(b) is equivalent to b+a except for [] and None values
"""
if not isinstance(default,struct):
raise TypeError("the first argument must be a structure")
for f in default.keys():
ref = default.getattr(f)
if f not in self:
self.setattr(f, ref)
else:
current = self.getattr(f)
if ((current is None) or (current==[])) and \
((ref is not None) and (ref!=[])):
self.setattr(f, ref)
# %% param class with scripting and evaluation capabilities
class param(struct):
"""
class parameters derived from struct() with dynamic evaluation
container obj.param = value
Example:
s=param(a=1,b=2,c='${a}+${b} # evaluate me if you can',
d="$this is a string",e="1000 # this is my number")
returns
--------:----------------------------------------
a: 1
b: 2
c: ${a}+${b} # evaluate me if you can
= 3
d: $this is a string
= this is a string
e: 1000 # this is my number
= 1000
--------:----------------------------------------
Out: parameter list (param object) with 5 definitions
s.a=10
produces
--------:----------------------------------------
a: 10
b: 2
c: ${a}+${b} # evaluate me if you can
= 12
d: $this is a string
= this is a string
e: 1000 # this is my number
= 1000
--------:----------------------------------------
Out: parameter list (param object) with 5 definitions
Other example with text parameters
s = param()
s.mypath = "$/this/folder"
s.myfile = "$file"
s.myext = "$ext"
s.fullfile = "$${mypath}/${myfile}.${myext}"
generates
--------:----------------------------------------
mypath: $/this/folder
= /this/folder
myfile: $file
= file
myext: $ext
= ext
fullfile: $${mypath}/${myfile}.${myext}
= /this/folder/file.ext
--------:----------------------------------------
Out: parameter list (param object) with 4 definitions
Evaluate a string with variables define in s
s.eval("this a string with ${variable1}, ${variable2}")
note: \${variable} prevents the evaluation
note: use s.eval("...$variable",protection=True) to add automatically {}
Examples:
definitions = param(a=1,b="${a}*10+${a}",c="\${a}+10",d='\${myparam}')
text = definitions.formateval("this my text ${a}, ${b}, \${myvar}=${c}+${d}")
print(text)
definitions = param(a=1,b="$a*10+$a",c="\$a+10",d='\$myparam')
text = definitions.formateval("this my text $a, $b, \$myvar=$c+$d",protection=True)
print(text)
s = struct(a=1,b=2)
s[1] = 3
s.disp()
s = {"a":1, "b":2}
t=struct.dict2struct(s)
t.disp()
sback = t.struct2dict()
sback.__repr__()
p=struct.fromkeysvalues(["a","b","c","d"],[1,2,3]).struct2param()
ptxt = p.protect("$c=$a+$b")
"""
# override
_type = "param"
_fulltype = "parameter list"
_ftype = "definition"
_evalfeature = True # This class can be evaluated with .eval()
# magic constructor
def __init__(self,_protection=False,_evaluation=True,**kwargs):
""" constructor """
super().__init__(**kwargs)
self._protection = _protection
self._evaluation = _evaluation
# escape definitions if needed
@staticmethod
def escape(s):
"""
escape \${} as ${{}} --> keep variable names
convert ${} as {} --> prepare Python replacement
Examples:
escape("\${a}")
returns ('${{a}}', True)
escape(" \${abc} ${a} \${bc}")
returns (' ${{abc}} {a} ${{bc}}', True)
escape("${a}")
Out[94]: ('{a}', False)
escape("${tata}")
returns ('{tata}', False)
"""
se, start, found = "", 0, True
while found:
pos0 = s.find("\${",start)
found = pos0>=0
if found:
pos1 = s.find("}",pos0)
found = pos1>=0
if found:
se += s[start:pos0].replace("${","{")+"${{"+s[pos0+3:pos1]+"}}"
start=pos1+1
result = se+s[start:].replace("${","{")
if isinstance(s,pstr): result = pstr(result)
return result,start>0
# protect variables in a string
def protect(self,s=""):
""" protect $variable as ${variable} """
if isinstance(s,str):
t = s.replace("\$","££") # && is a placeholder
escape = t!=s
for k in self.keyssorted():
t = t.replace("$"+k,"${"+k+"}")
if escape: t = t.replace("££","\$")
if isinstance(s,pstr): t = pstr(t)
return t, escape
raise ValueError('the argument must be string')
# lines starting with # (hash) are interpreted as comments
# ${variable} or {variable} are substituted by variable.value
# any line starting with $ is assumed to be a string (no interpretation)
# ^ is accepted in formula(replaced by **))
def eval(self,s="",protection=False):
"""
Eval method for structure such as MS.alias
s = p.eval() or s = s = p.eval(string)
where :
p is a param object
s is a structure with evaluated fields
string is only to used whether definitions have been forgotten
"""
# Evaluate all DEFINITIONS
# the argument s is only used by formateval() for error management
tmp = struct()
for key,value in self.items():
# strings are assumed to be expressions on one single line
if isinstance(value,str):
# replace ${variable} (Bash, Lammps syntax) by {variable} (Python syntax)
# use \${variable} to prevent replacement (espace with \)
# Protect variables if required
ispstr = isinstance(value,pstr)
valuesafe = pstr.eval(value,ispstr=ispstr) # value.strip()
if protection or self._protection:
valuesafe, escape0 = self.protect(valuesafe)
else:
escape0 = False
valuesafe, escape = param.escape(valuesafe)
escape = escape or escape0
# replace "^" (Matlab, Lammps exponent) by "**" (Python syntax)
valuesafe = pstr.eval(valuesafe.replace("^","**"),ispstr=ispstr)
# Remove all content after #
# if the first character is '#', it is not comment (e.g. MarkDown titles)
poscomment = valuesafe.find("#")
if poscomment>0: valuesafe = valuesafe[0:poscomment].strip()
# Literal string starts with $
if not self._evaluation:
tmp.setattr(key, pstr.eval(tmp.format(valuesafe,escape),ispstr=ispstr))
elif valuesafe.startswith("$") and not escape:
tmp.setattr(key,tmp.format(valuesafe[1:].lstrip())) # discard $
else: # string empty or which can be evaluated
if valuesafe=="":
tmp.setattr(key,valuesafe) # empty content
else:
if isinstance(value,pstr): # keep path
tmp.setattr(key, pstr.topath(tmp.format(valuesafe,escape=escape)))
elif escape: # partial evaluation
tmp.setattr(key, tmp.format(valuesafe,escape=True))
else: # full evaluation
tmp.setattr(key, eval(tmp.format(valuesafe)))
elif isinstance(value,(int,float,list,tuple)): # already a number
tmp.setattr(key, value) # store the value with the key
else: # unsupported types
if s.find("{"+key+"}")>=0:
print(f'*** WARNING ***\n\tIn the | |
<reponame>bthornton191/Adams_Modules<filename>adamspy/adripy/string.py
"""A module that contains the :class:`DrillString` class
"""
import os
import copy
import re
import shutil
import thornpy
from . import TMPLT_ENV
from .tool import DrillTool
from .utilities import read_TO_file, get_cdb_location, get_cdb_path, get_full_path, TO_LENGTH_PARAM, isabs
from .constants import DATABASE_INFO
class DrillString():
"""
An object with all data necessary to write a drill string.
Parameters in the string file can be specified when the DrillString is instanced using kwargs or they can be set later using:
>>> drill_string.parameters[parameter] = value
Note
----
Once the :class:`DrillString` is instanced, tools within the string must be defined as :class:`DrillTool` objects before the string is written to a string file. Use the :meth:`add_tool()` method to add :class:`DrillTool` objects to the string.
Attributes
----------
parameters : dict
Dictionary of parameters that make up an Adams Drill string and would be found in an Adams Drill string file (.str). The keys of the dictionary are the parameter names that would be seen in the string file and the values of the dictionary are the values that would be seen in the string file.
tools : list
List of tools in the drill string. Each element of the list is a dictionary with the keys 'DrillTool', 'Name', 'Property_File', 'Measure', 'Color', 'Number_of_Joints', and 'Stack_Order'.
top_drive : dict
Describes the top drive. The keys are 'DrillTool', 'Type', 'Name', and 'Property_File'.
"""
_SCALAR_PARAMETERS = [
'Units',
'ModelName',
'OutputName',
'Gravity',
'Deviation_Deg',
'Adams_Results',
'Adams_Requests',
'SolverDLL',
'Contact_Method',
'Cyl_Drag_Coeff',
'Hole_Color'
]
_FILENAME_PARAMETERS = [
'Hole_Property_File',
'Event_Property_File'
]
_DEFAULT_PARAMETER_SCALARS = {
'Units': 'Imperial',
'Gravity': 32.187,
'Deviation_Deg': 0.0,
'Adams_Results': 'animation',
'Adams_Requests': 'on',
'Adams_Graphics': 'off',
'SolverDLL': 'adrill_solver',
'Contact_Method': 'Subroutine',
'Cyl_Drag_Coeff': 1.2,
'Hole_Color': 'LtGray'
}
_TABLE_PARAMETERS = [
'Distance_from_Bit'
]
_DEFAULT_PARAMETER_TABLES = {
'Distance_from_Bit': ()
}
_CDB_TABLE = 'drill_strings.tbl'
_EXT = 'str'
_MULTI_JOINT_TOOLS = ['hw_pipe', 'drillpipe', 'equivalent_upper_string', 'drill_collar']
_DRILL_TOOL_PATTERN = re.compile('^DRILL_TOOL_[0-9]{2}$')
def __init__(self, string_name, hole_file, event_file, **kwargs):
"""Initializes the :class:`DrillString` object.
Parameters
----------
string_name : int
Name of the string.
hole_file : str
Filename of a hole file.
event_file : str
Filename of an event file.
"""
self.parameters = kwargs
self.parameters['ModelName'] = string_name
self.parameters['OutputName'] = string_name
self.parameters['Hole_Property_File'] = get_cdb_path(hole_file)
self.parameters['Event_Property_File'] = get_cdb_path(event_file)
self._apply_defaults()
self.tools = []
self.top_drive = {}
def add_tool(self, drill_tool, joints=1, measure=False, stack_order=None, color='Default', group_name=None, equivalent=False):
"""
Adds a :class:`DrillTool` object to the :class:`DrillString`.
Note
----
You cannot add the same :class:`DrillTool` object to the string multiple times. If you want to add multiple instances of the same tool you must create two seperate :class:`DrillTool` objects from the same Tiem Orbit property file.
Parameters
----------
drill_tool : DrillTool
:class:`DrillTool` object representing the tool to be added
joints : int
Number of Joints. Note that this only applies for certain tool types. (default is 1)
measure : bool
If TRUE indicates that output requests should be generated for this tool. (default is FALSE)
stack_order : int
If an integer is given the tool will be inserted into the string at that point. (default is None which means the tool will be appended to the end)
color : str
The color used to render the tool in an Adams Drill animation. (default is 'Default')
group_name : str
This argument is required if the tool being added is a tool that comes in multiple joints (e.g. drill_pipe). NOTE: Tools that come in multiple joints are listed in :attr:`DrillTool._MULTI_JOINT_TOOLS`.
eqivalent : bool
Set this to `True` if the tool being added is drill_pipe and you want to use the equivalent upper string formulation.
"""
# Check that the group_name argument is givn
if drill_tool.tool_type.lower() in self._MULTI_JOINT_TOOLS and group_name is None:
raise ValueError('group_name is required for tools of type {}.'.format('{}'.format(self._MULTI_JOINT_TOOLS)[1:-1].replace("'",'')))
if drill_tool.tool_type.lower() != 'top_drive':
# If the tool added IS NOT a top_drive, check that the tool is not already in the tools list
for existing_tool in self.tools:
if drill_tool is existing_tool:
raise DrillStringError('You cannot add the same DrillTool object to a DrillString multiple times!')
# Create a dictionary describing the tool
tool = {
'DrillTool': drill_tool,
'Name': None,
'Property_File': drill_tool.property_file,
'Measure': 'yes' if measure else 'no',
'Color': color,
'Number_of_Joints': joints,
'Stack_Order': None
}
# set tool['Name'] to equivalent_upper_string if equivalent=True
if drill_tool.tool_type.lower() == 'drillpipe' and equivalent is True:
tool['Type'] = 'equivalent_upper_string'
else:
tool['Type'] = drill_tool.tool_type
# Set tool['Name'] equal to the group name if this is a multi joint tool
if drill_tool.tool_type.lower() in self._MULTI_JOINT_TOOLS:
tool['Name'] = group_name
else:
tool['Name'] = drill_tool.name
# If the stack order is not given append to the end otherwise insert accordingly
if stack_order is None:
self.tools.append(tool)
else:
self.tools.insert(stack_order-1, tool)
# Set Stack Orders equal to place in list
for order, tool in enumerate(self.tools):
tool['Stack_Order'] = order + 1
else:
# If the tool added IS a top_drive
self.top_drive = {
'DrillTool': drill_tool,
'Type': drill_tool.tool_type,
'Name': drill_tool.name,
'Property_File': drill_tool.property_file
}
# This binds drill_tool.name to tool['Name'] and tool['Property_File]
# Now if drill_tool is renamed by another processes, the tools list won't need to be updated.
drill_tool.bind_name_to(self.tool_renamed)
def set_pipe_joints(self, joints, equivalent=False):
"""Sets the number of joints in the upper most section of drill pipe. Set `equivalent=True` to adjust equivalent upper string joints.
Parameters
----------
joints : int
Number of physical drill pipe joints.
equivalent : str
False for physical string. True for equivalent upper string. (Default is False.)
Raises
------
DrillStringError
Raised if the drill string doesn't have drill pipe of the specified type.
"""
# Check that the pipe type argument was passed correctly
tool_type = 'equivalent_upper_string' if equivalent else 'drillpipe'
found = False
for tool in self.tools[::-1]:
if tool['Type'] == tool_type:
# set the number of joints
tool['Number_of_Joints'] = joints
found = True
break
if not found:
raise DrillStringError(f'There is no {tool_type} in this string!')
def get_tool(self, tool_type, index=0):
"""Returns a DrillTool object of type `tool_type` in the :class:`DrillString` object's tools list.
Parameters
----------
tool_type : str
Desired tool type.
index : int
Index of the tool to return (the default is 0)
Raises
------
DrillStringError
Raised if a tool of the specified type does not exist in the drill string.
Returns
-------
DrillTool
:class:`DrillTool` object
"""
tools_found = []
for tool in self.tools:
# For each tool in self.tools if this tool matches the requested tool type
if tool['Type']==tool_type:
tools_found.append(tool['DrillTool'])
# If the requested instance is positive and we've found enough tools to stop
if index >= 0 and len(tools_found) == index+1:
break
# Raise an error if no tools were found
if tools_found == []:
raise DrillStringError(f'No tool of type {tool_type} was found!')
if index >= len(tools_found):
n_tools_found = len(tools_found)
raise DrillStringError(f'Not enough tools of type {tool_type} were found. Number of tools found was {n_tools_found}. Requested index was {index}.')
return tools_found[index]
def tool_renamed(self, renamed_tool):
"""Updates the 'name' and 'Property_File' entries in the :attr:`tools` to match `renamed_tool`.name.
Parameters
----------
renamed_tool : DrillTool
:class:`DrillTool` object that has been renamed and needs to have its 'Name' and 'Property_File' entries updates.
"""
for tool in self.tools + [self.top_drive]:
if tool['DrillTool'] is renamed_tool:
tool['Name'] = renamed_tool.name
ext = tool['DrillTool'].extension
tool['Property_File'] = os.path.join(os.path.split(tool['Property_File'])[0], f'{renamed_tool.name}.{ext}')
break
def get_bha_length(self):
"""Gets the total length of the BHA defined in string_file
Note
----
The BHA is all the tools EXCEPT the equivalent upper string and highest most physical string.
Returns
-------
float : Cumulative length of the bha
"""
# Get a list of the drill pipe tools in the string
drill_pipe_tools = [tool for tool in self.tools if tool['Type'] == 'drillpipe']
# Initialize a list of tool lengths
tool_lengths = []
for tool in self.tools:
# for each tool in the tools list
if tool['Type'] != 'equivalent_upper_string' and tool is not drill_pipe_tools[-1]:
# If this isn't equivalent upper string or the last drill pipe tool
lnth_params = TO_LENGTH_PARAM[tool['Type']]
lengths = []
for lnth_param in lnth_params:
length | |
"""
This is the main file for ParEx, a suite of parallel extrapolation solvers for
initial value problems. It includes explicit, implicit, and semi-implicit (linearly
implicit) solvers. The code is based largely on material from the
following two volumes:
- *Solving Ordinary Differential Equations I: Nonstiff Problems*,
by Hairer, Norsett and Wanner
- *Solving Ordinary Differntial Equations II: Stiff and
Differential-Algebraic Problems*, by Hairer and Wanner
The calling interface follows this pattern:
def solve_implicit/explicit/semi-implicit(ode_fun, jac_fun, y_olds, t_old,
f_old, dt, args)
@param ode_fun (callable ode_fun(y,t,args)): derivative of u(t) (ODE RHS, ode_fun(u,t))
@param jac_fun (callable jac_fun(y,t,args)): Jacobian of ode_fun.
@param y_olds (2-tuple) : previous solution values (two previous values, at t_n-2 and t_n-1)
used to obtain the next point (using two previous values because
midpoint explicit method needs the previous two points, otherwise
the t_n-2 value can remain unused)
@param t_old (float): time at which previous solution is found
@param f_old (array): function evaluation at the y_old (at t_n-1), so that
it can be reused if it was already calculated for other purposes
(Jacobian estimation or dense output interpolation)
@param dt (float): step length
@param args (tuple): extra arguments
@param extra arguments to be passed to some of the methods:
@param J00 (2D array): (only for semi-implicit) Jacobian estimation at
the previous stage value (at each extrapolation stage
different number of steps are taken, depending on the step
sequence chosen).
@return (yj, f_yj, fe_tot, je_tot):
@return yj (array): solution calculated at t_old+dt
@return f_yj (array): function evaluation value at y_old, t_old
@return fe_tot (int): number of function evaluations done
@return je_tot (int): number of Jacobian evaluations done
"""
from __future__ import division
import numpy as np
import multiprocessing as mp
import math
from scipy import optimize
import scipy
from . import forward_diff
gmres = scipy.sparse.linalg.gmres
# NUM_WORKERS is a global variable, but not affect multiprocessing performance
# as it is used only in the sequential parts of the code.
# The same goes for jacobian_old.
NUM_WORKERS = None
jacobian_old = 0
def _set_NUM_WORKERS(nworkers):
"""Set number of parallel workers.
Used to determine how many processes (maximum) will be used by
multiprocessing when building the extrapolation table.
Parameters
----------
nworkers : int
Number of processes to allow. If not specified, use value returned by
mp.cpu_count().
"""
global NUM_WORKERS
if nworkers is None:
try:
NUM_WORKERS = mp.cpu_count()
except NotImplementedError:
NUM_WORKERS = 4
else:
NUM_WORKERS = max(nworkers, 1)
def _error_norm(y1, y2, atol, rtol):
"""
Compute Euclidean grid-norm of difference between vectors y1 and y2,
scaled based on relative and absolute tolerances. Tolerances are
satisfied if the return value is smaller than unity.
Based on II.4.11 (ref I).
Parameters
----------
y1, y2 : array_like
Vectors whose difference will be used.
atol : float
Absolute tolerance.
rtol : float
Relative tolerance.
Returns
-------
float
Scaled norm of y1-y2.
"""
tol = atol + np.maximum(np.abs(y1), np.abs(y2))*rtol
return np.linalg.norm((y1-y2)/tol)/(len(y1)**0.5)
def linear_solve(A, b, iterative=False, tol=1.e-8, x0=None):
"""Solve Ax=b for x. Used in semi-implicit method.
Parameters
----------
A : n x n array
b : length n array
iterative : boolean
If true, use an iterative solver.
tol : float
Error tolerance if using an iterative solver.
x0 : length n array
Initial guess if using an iterative solver.
Returns
-------
x : length n array
Approximate solution of Ax = b.
"""
if iterative:
# TODO: choose an appropriate value of maxiter to distribute work
# between taking more steps and having a more accurate solution
dy, info = gmres(A, b, tol=tol, x0=x0, maxiter=100)
if info >0:
print("Info: maximum iterations reached for sparse system solver (GMRES).")
return dy
else:
if(scipy.sparse.issparse(A)):
return scipy.sparse.linalg.spsolve(A, b)
else:
return np.linalg.solve(A, b)
def _solve_implicit_step(f, jacobian, initial_guess, tol):
"""
Find the root of the function f() using as initial approximation
initial_guess. Used in fully implicit methods.
Parameters
----------
f : callable f(y,t,args)
Residual function.
jacobian : callable jacobian(y,t,args)
Jacobian of f.
initial_guess : array
Estimated value of f to use as initial guess
tol : dict
Error tolerance passed to fsolve
Returns
-------
yj : array
Solution calculated at t_old+dt (root of f)
fe_tot : int
Number of function evaluations performed
je_tot : int
Number of Jacobian evaluations performed
"""
# TODO: Problem -> fsolve doesn't seem to work well with xtol parameter
# TODO: pass to the zero finder solver the tolerance required to the
# overall problem. Then the error by the solver won't limit the global
# error of the ODE solution.
# TODO: change solver so it doesn't do the 2 extra unnecessary function
# evaluations
# https://github.com/scipy/scipy/issues/5369
# TODO: add extra 2 function evaluations
x, infodict, _, _ = optimize.fsolve(f, initial_guess, fprime=jacobian,
full_output=True, xtol=tol)
if("njev" in infodict):
return (x, infodict["nfev"], infodict["njev"])
else:
return (x, infodict["nfev"], 0)
def _semi_implicit_midpoint(ode_fun, jac_fun, y_olds, t_old, f_old, dt, args,
solver_parameters, J00, I):
"""
Calculate solution at t_old+dt using the semi-implicit midpoint
formula. Based on equations IV.9.16a-b of Ref II.
"""
y_older, y_old = y_olds
je_tot=0
if(y_older is None): # Use Euler to get starting value
return _semi_implicit_euler(ode_fun, jac_fun, y_olds, t_old,
f_old, dt, args, solver_parameters,
J00, I)
if(f_old is None):
f_yj = ode_fun(*(y_old,t_old)+args)
fe_tot = 1
else: # We already computed it and can just reuse it
f_yj = f_old
fe_tot=0
b = np.dot(-(I+dt*J00),(y_old-y_older)) + 2*dt*f_yj
A = I-dt*J00
if(solver_parameters['initialGuess']): # Use Euler for initial guess
x0, f_yj, fe_tot_,je_tot=_explicit_euler(ode_fun, jac_fun, y_olds,
t_old, f_yj, dt,
args, solver_parameters)
fe_tot += fe_tot_
else:
x0=None
dy = linear_solve(A, b, iterative=solver_parameters['iterative'],
tol=solver_parameters['min_tol'], x0=x0)
y_new = y_old + dy
return (y_new, f_yj, fe_tot, je_tot)
def _semi_implicit_euler(ode_fun, jac_fun, y_olds, t_old,
f_old,dt, args, solver_parameters, J00, I):
"""
Calculate solution at t_old+dt using the semi-implicit Euler method.
Based on Section IV.9.25 of Ref II.
"""
y_older, y_old = y_olds
je_tot = 0
if(f_old is None):
f_yj = ode_fun(*(y_old, t_old)+args)
fe_tot = 1
else:
f_yj = f_old
fe_tot = 0
b = dt*f_yj
A = I-dt*J00
if(solver_parameters['initialGuess']):
# TODO: Using explicit Euler as a predictor doesn't seem to be
# effective (maybe because with extrapolation we are taking too big
# steps for the predictor be close to the solution).
# x0, f_yj, fe_tot_,je_tot=_explicit_euler(ode_fun, jac_fun,
# y_olds, t_old, f_yj, dt, args, solver_parameters)
# fe_tot += fe_tot_
x0 = y_old
else:
x0 = None
dy = linear_solve(A, b, iterative=solver_parameters['iterative'],
tol=solver_parameters['min_tol'], x0=x0)
y_new = y_old + dy
return (y_new, f_yj, fe_tot, je_tot)
def _implicit_midpoint(ode_fun, jac_fun, y_olds, t_old, f_old,
dt, args, solver_parameters):
"""
Calculate solution at t_old+dt using the implicit midpoint method.
Based on IV.9.2 (ref II).
"""
y_older, y_old = y_olds
def zero_func(x):
fval = ode_fun(*((y_old+x)/2, t_old+dt/2) + args)
return x - y_old - dt * fval
def jacobian(x):
II = np.identity(len(x), dtype=float)
return np.matrix(II - dt*jac_fun(x,t_old+dt/2))
if(jac_fun is None):
jacobian = None
if(not solver_parameters['initialGuess']):
initial_guess = y_old
fe_tot = 0
je_tot = 0
else:
# Estimation of the value as the starting point for the zero solver
initial_guess, f_yj, fe_tot, je_tot = \
_explicit_euler(ode_fun, jac_fun, y_olds, t_old, f_old, dt, args,
solver_parameters)
y_new, fe_tot_, je_tot_ = _solve_implicit_step(zero_func, jacobian,
initial_guess,
solver_parameters['min_tol'])
fe_tot += fe_tot_
je_tot += je_tot_
f_yj = ode_fun(*(y_old,t_old)+args)
fe_tot += 1
return (y_new, f_yj, fe_tot, je_tot)
def _explicit_midpoint(ode_fun, jac_fun, y_olds, t_old, f_old,
dt, args, solver_parameters):
"""
Calculate solution at t_old+dt using the explicit midpoint method.
Based on II.9.13b of Ref I.
"""
y_older, y_old = y_olds
if(y_older is None): # Use Euler to get additional starting value
return _explicit_euler(ode_fun, jac_fun, y_olds, t_old,
f_old, dt, args, solver_parameters)
f_yj = ode_fun(*(y_old, t_old)+args)
fe_tot = 1
return (y_older + (2*dt)*f_yj, f_yj, fe_tot,0)
def _explicit_euler(ode_fun, jac_fun, y_olds, t_old, f_old,
dt, args, solver_parameters):
"""
Calculate solution at t_old+dt doing one step with explicit Euler.
Based on II.9.13a of Ref I.
"""
y_older, y_old = y_olds
if(f_old is None):
f_yj = ode_fun(*(y_old, t_old)+args)
fe_tot = 1
else:
f_yj = f_old
fe_tot = 0
return (y_old + dt*f_yj, f_yj, fe_tot,0)
# End of solver definitions
def _compute_stages(job):
"""
Compute extrapolation tableau values with the order specified and number of
steps specified in j_nj_list.
Calculate the T_{k,1} values for the k's in j_nj_list.
Based on II.9.2 (Definition of | |
self.setWindowIcon(QIcon('..\\img\\icon_16px.ico'))
self.setFixedSize(self.size())
self.show()
# Set up trigger and queue to update dialog GUI during approach:
self.progress_trigger = Trigger()
self.progress_trigger.s.connect(self.update_progress)
self.finish_trigger = Trigger()
self.finish_trigger.s.connect(self.finish_approach)
self.spinBox_numberSlices.setRange(1, 100)
self.spinBox_numberSlices.setSingleStep(1)
self.spinBox_numberSlices.setValue(5)
self.spinBox_numberSlices.valueChanged.connect(self.update_progress)
self.pushButton_startApproach.clicked.connect(self.start_approach)
self.pushButton_abortApproach.clicked.connect(self.abort_approach)
self.pushButton_startApproach.setEnabled(True)
self.pushButton_abortApproach.setEnabled(False)
self.slice_counter = 0
self.approach_in_progress = False
self.aborted = False
self.z_mismatch = False
self.max_slices = self.spinBox_numberSlices.value()
self.update_progress()
def add_to_log(self, msg):
self.main_window_queue.put(utils.format_log_entry(msg))
self.main_window_trigger.s.emit()
def update_progress(self):
self.max_slices = self.spinBox_numberSlices.value()
if self.slice_counter > 0:
remaining_time_str = (
' ' + str(int((self.max_slices - self.slice_counter)*12))
+ ' seconds left')
else:
remaining_time_str = ''
self.label_statusApproach.setText(str(self.slice_counter) + '/'
+ str(self.max_slices)
+ remaining_time_str)
self.progressBar_approach.setValue(
int(self.slice_counter/self.max_slices * 100))
def start_approach(self):
self.pushButton_startApproach.setEnabled(False)
self.pushButton_abortApproach.setEnabled(True)
self.buttonBox.setEnabled(False)
self.spinBox_thickness.setEnabled(False)
self.spinBox_numberSlices.setEnabled(False)
self.main_window_queue.put('APPROACH BUSY')
self.main_window_trigger.s.emit()
thread = threading.Thread(target=self.approach_thread)
thread.start()
def finish_approach(self):
# Clear knife
self.add_to_log('3VIEW: Clearing knife.')
self.microtome.clear_knife()
if self.microtome.get_error_state() > 0:
self.add_to_log('CTRL: Error clearing knife.')
self.microtome.reset_error_state()
QMessageBox.warning(self, 'Error',
'Warning: Clearing the knife failed. '
'Try to clear manually.', QMessageBox.Ok)
self.main_window_queue.put('STATUS IDLE')
self.main_window_trigger.s.emit()
# Show message box to user and reset counter and progress bar:
if not self.aborted:
QMessageBox.information(
self, 'Approach finished',
str(self.max_slices) + ' slices have been cut successfully. '
'Total sample depth removed: '
+ str(self.max_slices * self.thickness / 1000) + ' µm.',
QMessageBox.Ok)
self.slice_counter = 0
self.update_progress()
elif self.z_mismatch:
# Show warning message if Z mismatch detected
self.microtome.reset_error_state()
QMessageBox.warning(
self, 'Z position mismatch',
'The current Z position does not match the last known '
'Z position in SBEMimage. Have you manually changed Z? '
'Make sure that the Z position is correct before cutting.',
QMessageBox.Ok)
else:
QMessageBox.warning(
self, 'Approach aborted',
str(self.slice_counter) + ' slices have been cut. '
'Total sample depth removed: '
+ str(self.slice_counter * self.thickness / 1000) + ' µm.',
QMessageBox.Ok)
self.slice_counter = 0
self.update_progress()
self.pushButton_startApproach.setEnabled(True)
self.pushButton_abortApproach.setEnabled(False)
self.buttonBox.setEnabled(True)
self.spinBox_thickness.setEnabled(True)
self.spinBox_numberSlices.setEnabled(True)
self.approach_in_progress = False
def approach_thread(self):
self.approach_in_progress = True
self.aborted = False
self.z_mismatch = False
self.slice_counter = 0
self.max_slices = self.spinBox_numberSlices.value()
self.thickness = self.spinBox_thickness.value()
self.progress_trigger.s.emit()
# Get current z position of stage:
z_position = self.microtome.get_stage_z(wait_interval=1)
if z_position is None or z_position < 0:
# Try again:
z_position = self.microtome.get_stage_z(wait_interval=2)
if z_position is None or z_position < 0:
self.add_to_log(
'CTRL: Error reading Z position. Approach aborted.')
self.microtome.reset_error_state()
self.aborted = True
if self.microtome.get_error_state() == 206:
self.microtome.reset_error_state()
self.z_mismatch = True
self.aborted = True
self.add_to_log(
'CTRL: Z position mismatch. Approach aborted.')
self.main_window_queue.put('UPDATE Z')
self.main_window_trigger.s.emit()
if not self.aborted:
self.microtome.near_knife()
self.add_to_log('3VIEW: Moving knife to near position.')
if self.microtome.get_error_state() > 0:
self.add_to_log(
'CTRL: Error moving knife to near position. '
'Approach aborted.')
self.aborted = True
self.microtome.reset_error_state()
# ====== Approach loop =========
while (self.slice_counter < self.max_slices) and not self.aborted:
# Move to new z position:
z_position = z_position + (self.thickness / 1000)
self.add_to_log(
'3VIEW: Move to new Z: ' + '{0:.3f}'.format(z_position))
self.microtome.move_stage_to_z(z_position)
# Show new Z position in main window:
self.main_window_queue.put('UPDATE Z')
self.main_window_trigger.s.emit()
# Check if there were microtome problems:
if self.microtome.get_error_state() > 0:
self.add_to_log(
'CTRL: Z stage problem detected. Approach aborted.')
self.aborted = True
self.microtome.reset_error_state()
break
self.add_to_log('3VIEW: Cutting in progress ('
+ str(self.thickness) + ' nm cutting thickness).')
# Do the approach cut (cut, retract, in near position)
self.microtome.do_full_approach_cut()
sleep(self.microtome.get_full_cut_duration() - 5)
if self.microtome.get_error_state() > 0:
self.add_to_log(
'CTRL: Cutting problem detected. Approach aborted.')
self.aborted = True
self.microtome.reset_error_state()
break
else:
self.add_to_log('3VIEW: Approach cut completed.')
self.slice_counter += 1
# Update progress bar and slice counter
self.progress_trigger.s.emit()
# ====== End of approach loop =========
# Signal that thread is done:
self.finish_trigger.s.emit()
def abort_approach(self):
self.aborted = True
self.pushButton_abortApproach.setEnabled(False)
def closeEvent(self, event):
if not self.approach_in_progress:
event.accept()
else:
event.ignore()
def accept(self):
if not self.approach_in_progress:
super().accept()
#------------------------------------------------------------------------------
class GrabFrameDlg(QDialog):
"""Acquires or saves a single frame from SmartSEM."""
def __init__(self, config, sem, main_window_queue, main_window_trigger):
super().__init__()
self.cfg = config
self.sem = sem
self.main_window_queue = main_window_queue
self.main_window_trigger = main_window_trigger
self.finish_trigger = Trigger()
self.finish_trigger.s.connect(self.scan_complete)
loadUi('..\\gui\\grab_frame_dlg.ui', self)
self.setWindowModality(Qt.ApplicationModal)
self.setWindowIcon(QIcon('..\\img\\icon_16px.ico'))
self.setFixedSize(self.size())
self.show()
timestamp = str(datetime.datetime.now())
# Remove some characters from timestap to get valid file name:
timestamp = timestamp[:19].translate({ord(c): None for c in ' :-.'})
self.file_name = 'image_' + timestamp
self.lineEdit_filename.setText(self.file_name)
frame_size, pixel_size, dwell_time = self.sem.get_grab_settings()
store_res_list = [
'%d × %d' % (res[0], res[1]) for res in self.sem.STORE_RES]
self.comboBox_frameSize.addItems(store_res_list)
self.comboBox_frameSize.setCurrentIndex(frame_size)
self.doubleSpinBox_pixelSize.setValue(pixel_size)
self.comboBox_dwellTime.addItems(map(str, self.sem.DWELL_TIME))
self.comboBox_dwellTime.setCurrentIndex(
self.sem.DWELL_TIME.index(dwell_time))
self.pushButton_scan.clicked.connect(self.scan_frame)
self.pushButton_save.clicked.connect(self.save_frame)
def scan_frame(self):
"""Scan and save a single frame using the current grab settings."""
self.file_name = self.lineEdit_filename.text()
# Save and apply grab settings:
selected_dwell_time = self.sem.DWELL_TIME[
self.comboBox_dwellTime.currentIndex()]
self.sem.set_grab_settings(self.comboBox_frameSize.currentIndex(),
self.doubleSpinBox_pixelSize.value(),
selected_dwell_time)
self.sem.apply_grab_settings()
self.pushButton_scan.setText('Wait')
self.pushButton_scan.setEnabled(False)
self.pushButton_save.setEnabled(False)
QApplication.processEvents()
thread = threading.Thread(target=self.perform_scan)
thread.start()
def perform_scan(self):
"""Acquire a new frame. Executed in a thread because it may take some
time and GUI should not freeze.
"""
self.scan_success = self.sem.acquire_frame(
self.cfg['acq']['base_dir'] + '\\' + self.file_name + '.tif')
self.finish_trigger.s.emit()
def scan_complete(self):
"""This function is called when the scan is complete.
Reset the GUI and show result of grab command.
"""
self.pushButton_scan.setText('Scan and grab')
self.pushButton_scan.setEnabled(True)
self.pushButton_save.setEnabled(True)
if self.scan_success:
self.add_to_log('CTRL: Single frame acquired by user.')
QMessageBox.information(
self, 'Frame acquired',
'The image was acquired and saved as '
+ self.file_name +
'.tif in the current base directory.',
QMessageBox.Ok)
else:
QMessageBox.warning(
self, 'Error',
'An error ocurred while attempting to acquire the frame: '
+ self.sem.get_error_cause(),
QMessageBox.Ok)
self.sem.reset_error_state()
def save_frame(self):
"""Save the image currently visible in SmartSEM."""
self.file_name = self.lineEdit_filename.text()
success = self.sem.save_frame(os.path.join(
self.cfg['acq']['base_dir'], self.file_name + '.tif'))
if success:
self.add_to_log('CTRL: Single frame saved by user.')
QMessageBox.information(
self, 'Frame saved',
'The current image shown in SmartSEM was saved as '
+ self.file_name + '.tif in the current base directory.',
QMessageBox.Ok)
else:
QMessageBox.warning(
self, 'Error',
'An error ocurred while attempting to save the current '
'SmarSEM image: '
+ self.sem.get_error_cause(),
QMessageBox.Ok)
self.sem.reset_error_state()
def add_to_log(self, msg):
"""Use trigger and queue to add an entry to the main log."""
self.main_window_queue.put(utils.format_log_entry(msg))
self.main_window_trigger.s.emit()
#------------------------------------------------------------------------------
class EHTDlg(QDialog):
"""Show EHT status and let user switch beam on or off."""
def __init__(self, sem):
super().__init__()
self.sem = sem
loadUi('..\\gui\\eht_dlg.ui', self)
self.setWindowModality(Qt.ApplicationModal)
self.setWindowIcon(QIcon('..\\img\\icon_16px.ico'))
self.setFixedSize(self.size())
self.show()
self.pushButton_on.clicked.connect(self.turn_on)
self.pushButton_off.clicked.connect(self.turn_off)
self.update_status()
def update_status(self):
if self.sem.is_eht_on():
pal = QPalette(self.label_EHTStatus.palette())
pal.setColor(QPalette.WindowText, QColor(Qt.red))
self.label_EHTStatus.setPalette(pal)
self.label_EHTStatus.setText('ON')
self.pushButton_on.setEnabled(False)
self.pushButton_off.setEnabled(True)
else:
pal = QPalette(self.label_EHTStatus.palette())
pal.setColor(QPalette.WindowText, QColor(Qt.black))
self.label_EHTStatus.setPalette(pal)
self.label_EHTStatus.setText('OFF')
self.pushButton_on.setEnabled(True)
self.pushButton_off.setEnabled(False)
def turn_on(self):
self.pushButton_on.setEnabled(False)
self.pushButton_on.setText('Wait')
thread = threading.Thread(target=self.send_on_cmd_and_wait)
thread.start()
def turn_off(self):
self.pushButton_off.setEnabled(False)
self.pushButton_off.setText('Wait')
QApplication.processEvents()
thread = threading.Thread(target=self.send_off_cmd_and_wait)
thread.start()
def send_on_cmd_and_wait(self):
self.sem.turn_eht_on()
max_wait_time = 15
while not self.sem.is_eht_on() and max_wait_time > 0:
sleep(1)
max_wait_time -= 1
self.pushButton_on.setText('ON')
self.update_status()
def send_off_cmd_and_wait(self):
self.sem.turn_eht_off()
max_wait_time = 15
while not self.sem.is_eht_off() and max_wait_time > 0:
sleep(1)
max_wait_time -= 1
self.pushButton_off.setText('OFF')
self.update_status()
#------------------------------------------------------------------------------
class FTSetParamsDlg(QDialog):
"""Read working distance and stigmation parameters from user input or
from SmartSEM for setting WD/STIG for individual tiles/OVs in
focus tool.
"""
def __init__(self, sem, current_wd, current_stig_x, current_stig_y,
simulation_mode=False):
super().__init__()
self.sem = sem
loadUi('..\\gui\\focus_tool_set_params_dlg.ui', self)
self.setWindowModality(Qt.ApplicationModal)
self.setWindowIcon(QIcon('..\\img\\icon_16px.ico'))
self.setFixedSize(self.size())
self.show()
if simulation_mode:
self.pushButton_getFromSmartSEM.setEnabled(False)
self.pushButton_getFromSmartSEM.clicked.connect(self.get_from_sem)
if current_wd is not None:
self.doubleSpinBox_currentFocus.setValue(1000 * current_wd)
else:
self.doubleSpinBox_currentFocus.setValue(0)
if current_stig_x is not None:
self.doubleSpinBox_currentStigX.setValue(current_stig_x)
else:
self.doubleSpinBox_currentStigX.setValue(0)
if current_stig_y is not None:
self.doubleSpinBox_currentStigY.setValue(current_stig_y)
else:
self.doubleSpinBox_currentStigY.setValue(0)
def get_from_sem(self):
self.doubleSpinBox_currentFocus.setValue(1000 * self.sem.get_wd())
self.doubleSpinBox_currentStigX.setValue(self.sem.get_stig_x())
self.doubleSpinBox_currentStigY.setValue(self.sem.get_stig_y())
def return_params(self):
return (self.new_wd, self.new_stig_x, self.new_stig_y)
def accept(self):
self.new_wd = self.doubleSpinBox_currentFocus.value() / 1000
self.new_stig_x = self.doubleSpinBox_currentStigX.value()
self.new_stig_y = self.doubleSpinBox_currentStigY.value()
super().accept()
#------------------------------------------------------------------------------
class FTMoveDlg(QDialog):
"""Move the stage to the selected tile or OV position."""
def __init__(self, microtome, coordinate_system, grid_manager,
grid_number, tile_number, ov_number):
super().__init__()
self.microtome = microtome
self.cs = coordinate_system
self.gm = grid_manager
self.ov_number = ov_number
self.grid_number = grid_number
self.tile_number = tile_number
self.error = False
self.finish_trigger = Trigger()
self.finish_trigger.s.connect(self.move_completed)
loadUi('..\\gui\\focus_tool_move_dlg.ui', self)
self.setWindowModality(Qt.ApplicationModal)
self.setWindowIcon(QIcon('..\\img\\icon_16px.ico'))
self.setFixedSize(self.size())
self.show()
self.pushButton_move.clicked.connect(self.start_move)
if ov_number >= 0:
self.label_moveTarget.setText('OV ' + str(ov_number))
elif (grid_number >= 0) and (tile_number >= 0):
self.label_moveTarget.setText(
'Grid: %d, Tile: %d' % (grid_number, tile_number))
def start_move(self):
self.error = False
self.pushButton_move.setText('Busy... please wait.')
self.pushButton_move.setEnabled(False)
thread = threading.Thread(target=self.move_and_wait)
thread.start()
def move_and_wait(self):
# Load target coordinates
if self.ov_number >= 0:
stage_x, stage_y = self.cs.get_ov_centre_s(self.ov_number)
elif self.tile_number >= 0:
stage_x, stage_y = self.gm.get_tile_coordinates_s(
self.grid_number, self.tile_number)
# Now move the stage
self.microtome.move_stage_to_xy((stage_x, stage_y))
if self.microtome.get_error_state() > 0:
self.error = True
self.microtome.reset_error_state()
# Signal that move complete
self.finish_trigger.s.emit()
def move_completed(self):
if self.error:
QMessageBox.warning(self, 'Error',
'An error was detected during the move. '
'Please try again.',
QMessageBox.Ok)
else:
QMessageBox.information(self, 'Move complete',
'The stage has been moved to the selected position. '
'The Viewport will be updated after pressing OK.',
QMessageBox.Ok)
super().accept()
# Enable button again:
self.pushButton_move.setText('Move again')
self.pushButton_move.setEnabled(True)
#------------------------------------------------------------------------------
class MotorTestDlg(QDialog):
"""Perform a random-walk-like XYZ motor test. Experimental, only for
testing/debugging. Only works with a | |
<reponame>anthonyhu/TrackR-CNN
# a lot of stuff is copied over from savitar1, but we still need to port more of it
import numpy as np
from scipy.special import expit as sigmoid
from collections import namedtuple
import munkres
from scipy.spatial.distance import cdist
import pycocotools.mask as cocomask
from cv2 import remap, INTER_NEAREST
TrackElement_ = namedtuple("TrackElement", ["t", "box", "reid", "track_id", "class_", "mask", "score"])
TrackElement = namedtuple("TrackElement", ["box", "track_id", "class_", "mask", "score"])
munkres_obj = munkres.Munkres()
def track_single_sequence(tracker_options, boxes, scores, reids, classes, masks, optical_flow=None):
# perform tracking per class and in the end combine the results
classes_flat = [c for cs in classes for c in cs]
unique_classes = np.unique(classes_flat)
start_track_id = 1
class_tracks = []
tracker_options_class = {"tracker": tracker_options["tracker"], "reid_comp": tracker_options["reid_comp"],
"box_offset": tracker_options["box_offset"],
"box_scale": tracker_options["box_scale"]}
for class_ in unique_classes:
if class_ == 1:
tracker_options_class["detection_confidence_threshold"] = tracker_options["detection_confidence_threshold_car"]
tracker_options_class["reid_weight"] = tracker_options["reid_weight_car"]
tracker_options_class["mask_iou_weight"] = tracker_options["mask_iou_weight_car"]
tracker_options_class["bbox_iou_weight"] = tracker_options["bbox_iou_weight_car"]
tracker_options_class["bbox_center_weight"] = tracker_options["bbox_center_weight_car"]
tracker_options_class["association_threshold"] = tracker_options["association_threshold_car"]
tracker_options_class["keep_alive"] = tracker_options["keep_alive_car"]
tracker_options_class["new_reid_threshold"] = tracker_options["new_reid_threshold_car"]
tracker_options_class["reid_euclidean_offset"] = tracker_options["reid_euclidean_offset_car"]
tracker_options_class["reid_euclidean_scale"] = tracker_options["reid_euclidean_scale_car"]
elif class_ == 2:
tracker_options_class["detection_confidence_threshold"] = tracker_options[
"detection_confidence_threshold_pedestrian"]
tracker_options_class["reid_weight"] = tracker_options["reid_weight_pedestrian"]
tracker_options_class["mask_iou_weight"] = tracker_options["mask_iou_weight_pedestrian"]
tracker_options_class["bbox_iou_weight"] = tracker_options["bbox_iou_weight_pedestrian"]
tracker_options_class["bbox_center_weight"] = tracker_options["bbox_center_weight_pedestrian"]
tracker_options_class["association_threshold"] = tracker_options["association_threshold_pedestrian"]
tracker_options_class["keep_alive"] = tracker_options["keep_alive_pedestrian"]
tracker_options_class["new_reid_threshold"] = tracker_options["new_reid_threshold_pedestrian"]
tracker_options_class["reid_euclidean_offset"] = tracker_options["reid_euclidean_offset_pedestrian"]
tracker_options_class["reid_euclidean_scale"] = tracker_options["reid_euclidean_scale_pedestrian"]
else:
assert False, "unknown class"
if tracker_options["new_reid"]:
tracks = tracker_per_class_new_reid(tracker_options_class, boxes, scores, reids, classes, masks, class_,
start_track_id, optical_flow=optical_flow)
else:
tracks = tracker_per_class(tracker_options_class, boxes, scores, reids, classes, masks, class_, start_track_id,
optical_flow=optical_flow)
class_tracks.append(tracks)
track_ids_flat = [track.track_id for tracks_t in tracks for track in tracks_t]
track_ids_flat.append(start_track_id)
start_track_id = max(track_ids_flat) + 1
n_timesteps = len(boxes)
tracks_combined = [[] for _ in range(n_timesteps)]
for tracks_c in class_tracks:
for t, tracks_c_t in enumerate(tracks_c):
tracks_combined[t].extend(tracks_c_t)
return tracks_combined
def tracker_per_class(tracker_options, boxes, scores, reids, classes, masks, class_to_track, start_track_id,
optical_flow=None):
max_track_id = start_track_id
all_tracks = []
active_tracks = []
if optical_flow is None:
optical_flow = [None for _ in masks]
else:
optical_flow = [None] + optical_flow
assert len(boxes) == len(scores) == len(reids) == len(classes) == len(masks) == len(optical_flow)
for t, (boxes_t, scores_t, reids_t, classes_t, masks_t, flow_tm1_t) in enumerate(zip(boxes, scores, reids,
classes, masks, optical_flow)):
detections_t = []
for box, score, reid, class_, mask in zip(boxes_t, scores_t, reids_t, classes_t, masks_t):
if class_ != class_to_track:
continue
if mask is not None and cocomask.area(mask) <= 10:
continue
if score >= tracker_options["detection_confidence_threshold"]:
detections_t.append((box, reid, mask, class_, score))
else:
continue
if len(detections_t) == 0:
curr_tracks = []
elif len(active_tracks) == 0:
curr_tracks = []
for det in detections_t:
curr_tracks.append(TrackElement_(t=t, box=det[0], reid=det[1], mask=det[2], class_=det[3],
track_id=max_track_id, score=det[4]))
max_track_id += 1
else:
association_similarities = np.zeros((len(detections_t), len(active_tracks)))
if tracker_options["reid_weight"] != 0:
curr_reids = np.array([x[1] for x in detections_t], dtype="float64")
last_reids = np.array([x.reid for x in active_tracks], dtype="float64")
if tracker_options["reid_comp"] == "sigmoid_dot":
reid_similarities = sigmoid(np.dot(curr_reids, last_reids.T))
elif tracker_options["reid_comp"] == "cosine":
reid_similarities = np.dot(curr_reids/np.linalg.norm(curr_reids, axis=1, ord=2)[:, np.newaxis],
(last_reids/np.linalg.norm(last_reids, axis=1, ord=2)[:, np.newaxis]).T)
elif tracker_options["reid_comp"] == "euclidean":
reid_dists = cdist(curr_reids, last_reids, "euclidean")
reid_similarities = tracker_options["reid_euclidean_scale"] *\
(tracker_options["reid_euclidean_offset"] - reid_dists)
elif tracker_options["reid_comp"] == "normalized_euclidean":
reid_dists = cdist(curr_reids/np.linalg.norm(curr_reids, axis=1, ord=2)[:, np.newaxis],
last_reids/np.linalg.norm(last_reids, axis=1, ord=2)[:, np.newaxis], "euclidean")
reid_similarities = 1 - reid_dists
else:
assert False
association_similarities += tracker_options["reid_weight"] * reid_similarities
if tracker_options["mask_iou_weight"] != 0:
# Prepare flow
#h, w = flow_tm1_t.shape[:2]
#flow_tm1_t = -flow_tm1_t
#flow_tm1_t[:, :, 0] += np.arange(w)
#flow_tm1_t[:, :, 1] += np.arange(h)[:, np.newaxis]
masks_t = [v[2] for v in detections_t]
masks_tm1 = [v.mask for v in active_tracks]
#masks_tm1_warped = [warp_flow(mask, flow_tm1_t) for mask in masks_tm1]
masks_tm1_warped = masks_tm1
mask_ious = cocomask.iou(masks_t, masks_tm1_warped, [False] * len(masks_tm1_warped))
association_similarities += tracker_options["mask_iou_weight"] * mask_ious
if tracker_options["bbox_center_weight"] != 0:
centers_t = [v[0][0:2] + (v[0][2:4] - v[0][0:2]) / 2 for v in detections_t]
centers_tm1 = [v.box[0:2] + (v.box[2:4] - v.box[0:2]) / 2 for v in active_tracks]
box_dists = cdist(np.array(centers_t), np.array(centers_tm1), "euclidean")
box_similarities = tracker_options["box_scale"] *\
(tracker_options["box_offset"] - box_dists)
association_similarities += tracker_options["bbox_center_weight"] * box_similarities
if tracker_options["bbox_iou_weight"] != 0:
bboxes_t = [v[0] for v in detections_t]
bboxes_tm1 = [v.box for v in active_tracks]
bboxes_tm1_warped = [warp_box(box, flow_tm1_t) for box in bboxes_tm1]
bbox_ious = np.array([[bbox_iou(box1, box2) for box1 in bboxes_tm1_warped] for box2 in bboxes_t])
assert (0 <= bbox_ious).all() and (bbox_ious <= 1).all()
association_similarities += tracker_options["bbox_iou_weight"] * bbox_ious
curr_tracks = []
detections_assigned = [False for _ in detections_t]
if tracker_options["tracker"] == "greedy":
while True:
idx = association_similarities.argmax()
idx = np.unravel_index(idx, association_similarities.shape)
val = association_similarities[idx]
if val < tracker_options["association_threshold"]:
break
det = detections_t[idx[0]]
te = TrackElement_(t=t, box=det[0], reid=det[1], mask=det[2], class_=det[3],
track_id=active_tracks[idx[1]].track_id, score=det[4])
curr_tracks.append(te)
detections_assigned[idx[0]] = True
association_similarities[idx[0], :] = -1e10
association_similarities[:, idx[1]] = -1e10
elif tracker_options["tracker"] == "hungarian":
cost_matrix = munkres.make_cost_matrix(association_similarities)
disallow_indices = np.argwhere(association_similarities <= tracker_options["association_threshold"])
for ind in disallow_indices:
cost_matrix[ind[0]][ind[1]] = 1e9
indexes = munkres_obj.compute(cost_matrix)
for row, column in indexes:
value = cost_matrix[row][column]
if value == 1e9:
continue
det = detections_t[row]
te = TrackElement_(t=t, box=det[0], reid=det[1], mask=det[2], class_=det[3],
track_id=active_tracks[column].track_id, score=det[4])
curr_tracks.append(te)
detections_assigned[row] = True
else:
assert False
for det, assigned in zip(detections_t, detections_assigned):
if not assigned:
curr_tracks.append(TrackElement_(t=t, box=det[0], reid=det[1], mask=det[2], class_=det[3],
track_id=max_track_id, score=det[4]))
max_track_id += 1
all_tracks.append(curr_tracks)
newly_active_ids = {track.track_id for track in curr_tracks}
active_tracks = [track for track in active_tracks
if track.track_id not in newly_active_ids and track.t >= t - tracker_options["keep_alive"]]
active_tracks.extend(curr_tracks)
# remove the reid values, since they are an implementation detail of the tracker and should not be part of the result
result = [[TrackElement(box=track.box, track_id=track.track_id, mask=track.mask, class_=track.class_, score=track.score)
for track in tracks_t] for tracks_t in all_tracks]
return result
def tracker_per_class_new_reid(tracker_options, boxes, scores, reids, classes, masks, class_to_track, start_track_id,
optical_flow=None):
assert tracker_options["reid_comp"] == "euclidean"
assert tracker_options["tracker"] == "hungarian"
max_track_id = start_track_id
all_tracks = []
last_tracks = []
if optical_flow is None:
optical_flow = [None for _ in masks]
else:
optical_flow = [None] + optical_flow
assert len(boxes) == len(scores) == len(reids) == len(classes) == len(masks) == len(optical_flow)
for t, (boxes_t, scores_t, reids_t, classes_t, masks_t, flow_tm1_t) in enumerate(zip(boxes, scores, reids,
classes, masks, optical_flow)):
curr_tracks = []
assigned_track_ids = []
all_detections_t = []
### build all_detections_t
for box, score, reid, class_, mask in zip(boxes_t, scores_t, reids_t, classes_t, masks_t):
if class_ != class_to_track:
continue
if mask is not None and cocomask.area(mask) <= 10:
continue
all_detections_t.append((box, reid, mask, class_, score))
# assign high confidence dets by association scores
high_confidence_detections_t = [d for d in all_detections_t if
d[4] >= tracker_options["detection_confidence_threshold"]]
detections_assigned = [False for _ in high_confidence_detections_t]
if len(high_confidence_detections_t) > 0 and len(last_tracks) > 0:
association_similarities = calculate_association_similarities(high_confidence_detections_t, last_tracks,
flow_tm1_t, tracker_options)
cost_matrix = munkres.make_cost_matrix(association_similarities)
disallow_indices = np.argwhere(association_similarities <= tracker_options["association_threshold"])
for ind in disallow_indices:
cost_matrix[ind[0]][ind[1]] = 1e9
indexes = munkres_obj.compute(cost_matrix)
for row, column in indexes:
value = cost_matrix[row][column]
if value == 1e9:
continue
det = high_confidence_detections_t[row]
track_id = last_tracks[column].track_id
te = TrackElement_(t=t, box=det[0], reid=det[1], mask=det[2], class_=det[3],
track_id=track_id, score=det[4])
assigned_track_ids.append(track_id)
curr_tracks.append(te)
detections_assigned[row] = True
#### begin reid stuff ####
old_tracks = []
for tracks_in_time_step in all_tracks:
for track_obj in tracks_in_time_step:
if track_obj.track_id not in assigned_track_ids:
old_tracks.append(track_obj)
old_reids = np.array([x.reid for x in old_tracks], dtype="float32")
# low conf dets
dets_for_reid = [d for d in all_detections_t if d[4] < tracker_options["detection_confidence_threshold"]]
# use unassigned high conf dets as well?
for det, assigned in zip(high_confidence_detections_t, detections_assigned):
if not assigned:
dets_for_reid.append(det)
curr_reids = np.array([d[1] for d in dets_for_reid], dtype="float32")
reided_dets = []
if old_reids.size > 0 and curr_reids.size > 0:
reid_dists = cdist(curr_reids, old_reids, "euclidean")
while True:
idx = reid_dists.argmin()
idx = np.unravel_index(idx, reid_dists.shape)
val = reid_dists[idx]
if val > tracker_options["new_reid_threshold"]:
break
#print("reided", class_to_track, val)
det = dets_for_reid[idx[0]]
reided_dets.append(det)
track = old_tracks[idx[1]]
te = TrackElement_(t=t, box=det[0], reid=det[1], mask=det[2], class_=det[3],
track_id=track.track_id, score=det[4])
curr_tracks.append(te)
reid_dists[idx[0], :] = 1e10
for idx, track2 in enumerate(old_tracks):
if track.track_id == track2.track_id:
reid_dists[:, idx] = 1e10
### end reid stuff ###
# assign every high confidence det which has neither been propagated nor reided to a new track
for det, assigned in zip(high_confidence_detections_t, detections_assigned):
if not assigned:
curr_tracks.append(TrackElement_(t=t, box=det[0], reid=det[1], mask=det[2], class_=det[3],
track_id=max_track_id, score=det[4]))
max_track_id += 1
all_tracks.append(curr_tracks)
last_tracks = curr_tracks
# remove the reid values, since they are an implementation detail of the tracker and should not be part of the result
result = [[TrackElement(box=track.box, track_id=track.track_id, mask=track.mask, class_=track.class_, score=track.score)
for track in tracks_t] for tracks_t in all_tracks]
return result
def calculate_association_similarities(detections_t, last_tracks, flow_tm1_t, tracker_options):
association_similarities = np.zeros((len(detections_t), len(last_tracks)))
if tracker_options["reid_weight"] != 0:
curr_reids = np.array([x[1] for x in detections_t], dtype="float64")
last_reids = np.array([x.reid | |
to MoMA",
"<NAME> - Meet the dazzling flying machines of the future",
"<NAME> - The business logic of sustainability",
"<NAME> - The quantified self",
"<NAME> - Visualizing ourselves ... with crowd-sourced data",
"<NAME> - The danger of hiding who you are",
"<NAME> - A robot that flies like a bird",
"<NAME> - The fastest ambulance A motorcycle",
"<NAME> - The psychology of your future self",
"<NAME> - Treat design as art",
"<NAME> - How to go to space without having to go to space",
"<NAME> - The oceans glory -- and horror",
"<NAME> - Homeopathy quackery and fraud",
"<NAME> - Glowing life in an underwater world",
"<NAME> - The surprisingly logical minds of babies",
"<NAME> - Why we need strangeness",
"<NAME> - How to step up in the face of disaster",
"<NAME> - Why we need gender-neutral bathrooms",
"<NAME> - How state budgets are breaking US schools",
"<NAME> - My obsession with objects and the stories they tell",
"<NAME> - All your devices can be hacked",
"<NAME> - The 4 commandments of cities",
"<NAME> - Understanding cancer through proteomics",
"<NAME> - Dont misrepresent Africa",
"<NAME> - Were covered in germs. Lets design for that.",
"<NAME> - The oil spills unseen villains -- and victims",
"<NAME> - Are mushrooms the new plastic",
"<NAME> - Looks arent everything. Believe me Im a model.",
"<NAME> - The mathematics of love",
"<NAME> - How we found hundreds of potential Earth-like planets",
"<NAME> - Why the world needs WikiLeaks",
"<NAME> - Animations of unseeable biology",
"<NAME> - Why you will fail to have a great career",
"<NAME> - 5 ways to lead in an era of constant change",
"<NAME> - Why ordinary people need to understand power",
"<NAME> - How the worst moments in our lives make us who we are",
"<NAME> - A new kind of music video",
"<NAME> - The optimism bias",
"<NAME> - The day I turned down <NAME>",
"<NAME> - Pay attention to nonviolence",
"<NAME> - How photography connects us",
"<NAME> - Telling stories from Africa",
"<NAME> - Design and discovery",
"<NAME> - My library of human imagination",
"<NAME> - Learning from Sherman the shark",
"<NAME> - What happens when a city runs out of room for its dead",
"<NAME> - The hidden power of smiling",
"<NAME> - The Philosophical Breakfast Club",
"<NAME> - Social services are broken. How we can fix them",
"<NAME> - Keep your goals to yourself",
"<NAME> - The secret structure of great talks",
"<NAME> - Great design is serious not solemn",
"<NAME> - Why the world needs charter cities",
"<NAME> - Breakthrough designs for ultra-low-cost products",
"<NAME> - Mosquitos malaria and education",
"<NAME> - How well resurrect the gastric brooding frog the Tasmanian tiger",
"<NAME> - Are we in control of our own decisions",
"<NAME> - A letter to all who have lost in this era",
"<NAME> - Its time to question bio-engineering",
"<NAME> - Can we create new senses for humans",
"<NAME> - A map of the brain",
"<NAME> - Can we eat to starve cancer",
"<NAME> - Be an artist right now",
"<NAME> - DNA folding in detail",
"<NAME> - Weaving narratives in museum galleries",
"<NAME> - We can start winning the war against cancer",
"<NAME> - In praise of slowness",
"<NAME> - Three types of online attack",
"<NAME> - Why are these 32 symbols found in ancient caves all over Europe",
"<NAME> - How do you save a shark you know nothing about",
"<NAME> - Are we born to run",
"<NAME> - Legos for grownups",
"<NAME> - Listening to global voices",
"<NAME> - Why the universe seems so strange",
"<NAME> - How to grow a forest in your backyard",
"<NAME> - A new way to fight corruption",
"<NAME> - Machine intelligence makes human morals more important",
"<NAME> - In search of the man who broke my neck",
"<NAME> - Human-centered design",
"<NAME> - Can we build AI without losing control over it",
"<NAME> - A cinematic journey through visual effects",
"<NAME> - How women wage conflict without violence",
"<NAME> - Fighting cancer with dance",
"<NAME> - The birth of Wikipedia",
"<NAME> - Why we shouldnt trust markets with our civic life",
"<NAME> - Experiments that point to a new understanding of cancer",
"<NAME> - Luke a new prosthetic arm for soldiers",
"<NAME> - Teachers need real feedback",
"<NAME> - The big idea my brother inspired",
"<NAME> - The music of a war child",
"<NAME> - The inside story of the Paris climate agreement",
"<NAME> - Inside the Egyptian revolution",
"<NAME> - Cradle to cradle design",
"<NAME> - Could the sun be good for your heart",
"<NAME> - Why 30 is not the new 20",
"<NAME> - A magical tale with augmented reality",
"<NAME> <NAME> <NAME> - Award-winning teenage science in action",
"<NAME> - The electric rise and fall of Nikola Tesla",
"<NAME> - A tool to fix one of the most dangerous moments in surgery",
"<NAME> - Why city flags may be the worst-designed thing youve never noticed",
"<NAME> - Healthier men one moustache at a time",
"<NAME> - Whats so funny about mental illness",
"<NAME> - The future of cars",
"<NAME> - How to make stress your friend",
"<NAME> - Silicon-based comedy",
"Skylar Tibbits - Can we make things that make themselves",
"<NAME> - A dance in a hurricane of paper wind and light",
"<NAME> - To upgrade is human",
"<NAME> - The first 21 days of a bees life",
"<NAME> - The family I lost in North Korea. And the family I gained.",
"Mundano - Pimp my ... trash cart",
"<NAME> - A 50-cent microscope that folds like origami",
"<NAME> - Transplant cells not organs",
"<NAME> - The pattern behind self-deception",
"<NAME> - Who controls the world",
"<NAME> - Fashion and creativity",
"<NAME> - My life in typefaces",
"<NAME> - The birth of a word",
"<NAME> - Is Pivot a turning point for web exploration",
"<NAME> - The roots of plant intelligence",
"<NAME> - Back to the future of 1994",
"<NAME> - How virtual reality can create the ultimate empathy machine",
"<NAME> - The first secret of design is ... noticing",
"<NAME> - Designers -- think big",
"<NAME> - Biomimicrys surprising lessons from natures engineers",
"<NAME> - A plane you can drive",
"<NAME> - A complicated hero in the war on dictatorship",
"<NAME> - The next age of government",
"<NAME> - Why smart statistics are the key to fighting crime",
"<NAME> - My architectural philosophy Bring the community into the process",
"<NAME> - The wonderful and terrifying implications of computers that can learn",
"Hans and <NAME> - How not to be ignorant about the world",
"<NAME> - The Earth is full",
"<NAME> - The linguistic genius of babies",
"<NAME> - Economic growth has stalled. Lets fix it",
"<NAME> - Soon well cure diseases with a cell not a pill",
"<NAME> - Science versus wonder",
"<NAME> - Why open a school To close a prison",
"<NAME> - Let the environment guide our development",
"<NAME> - Drawing on humor for change",
"<NAME> - Change our culture change our world",
"<NAME> - This new telescope might show us the beginning of the universe",
"<NAME> - Cloudy with a chance of joy",
"<NAME> - A comic sendup of TED2006",
"<NAME> - Kids need structure",
"<NAME> - How to fight desertification and reverse climate change",
"<NAME> - Join the SETI search",
"<NAME> - 3 lessons on success from an Arab businesswoman",
"<NAME> - A theory of everything",
"<NAME> - Why not make video games for girls",
"<NAME> - Art that lets you talk back to NSA spies",
"<NAME> - How the news distorts our worldview",
"<NAME> - The mathematics of history",
"<NAME> - How to revive a neighborhood with imagination beauty and art",
"<NAME> - The | |
range(len(cList)):
x, y, z = cList[num]
polyline.points[num].co = (x, y, z, weight)
return polyline
def execute(self, context):
#update selection
if bpy.context.object.type != "EMPTY":
if bpy.context.mode == 'OBJECT':
bpy.ops.object.mode_set(mode = 'EDIT')
bpy.ops.object.mode_set(mode = 'OBJECT')
elif bpy.context.mode == 'EDIT_MESH':
bpy.ops.object.mode_set(mode = 'OBJECT')
bpy.ops.object.mode_set(mode = 'EDIT')
if self.getEmptyPositions() is not None:
# if edit mode
if bpy.context.object.type == "MESH":
if bpy.context.mode == "EDIT_MESH":
bpy.ops.object.mode_set(mode = 'OBJECT')
# get curve name
name = bpy.context.scene.curve_name
if name:
if name in bpy.context.scene.objects:
while name in bpy.context.scene.objects:
lastNum = int(name[-1])
name = name[:-1]
name = name + str(lastNum + 1)
# create line
if bpy.context.scene.curve_type == 'Line':
if bpy.context.mode == 'OBJECT':
listOfVectors = self.getEmptyPositions()
if listOfVectors is not None:
self.MakePolyLine(name, listOfVectors, 0.01)
bpy.ops.object.select_all(action = 'DESELECT')
bpy.data.objects[name].select_set(True)
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS')
bpy.context.view_layer.objects.active = bpy.data.objects[name]
# create bezier
if bpy.context.scene.curve_type == 'Bezier':
if bpy.context.mode == 'OBJECT':
listOfVectors = self.getEmptyPositions()
if listOfVectors is not None:
self.MakePolyLine(name, listOfVectors, 0.01)
bpy.ops.object.select_all(action = 'DESELECT')
bpy.data.objects[name].select_set(True)
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS')
bpy.context.view_layer.objects.active = bpy.data.objects[name]
bpy.ops.object.mode_set(mode = 'EDIT')
bpy.ops.curve.select_all(action='SELECT')
bpy.ops.curve.spline_type_set(type='BEZIER')
bpy.ops.curve.handle_type_set(type='AUTOMATIC')
bpy.ops.object.mode_set(mode = 'OBJECT')
#assign color
bpy.context.object.color = (0, 0, 1, 1)
# cleanup
data = bpy.data.objects
for o in data:
if 'VertexEmpty' in o.name:
data.remove(o, do_unlink=True)
else:
self.report({'WARNING'}, "Curve is not named")
else:
self.report({'WARNING'}, "Select 2 objects or 2 vertices!")
return {'FINISHED'}
class EdgeToCurve(Operator):
bl_idname = "object.edge_to_curve"
bl_label = "Edges to Curve"
bl_description = "Convert Edges to Curve"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return bpy.context.active_object
def execute(self, context):
o = bpy.ops.object
m = bpy.ops.mesh
obj = bpy.context.active_object
# reset selection
bpy.ops.mesh.reset_vertex_selection()
m.select_mode(type="EDGE")
bpy.context.scene.tool_settings.transform_pivot_point = 'BOUNDING_BOX_CENTER'
# get selected edges
edges = [v for v in obj.data.edges if v.select]
if len(edges):
if bpy.context.scene.curve_type == 'Bezier':
# detach edges
bpy.ops.mesh.duplicate_detach_faces()
# if modifiers
if len(obj.modifiers[:]):
for m in obj.modifiers[:]:
bpy.ops.object.modifier_remove(modifier=m.name)
# make conversion
bpy.ops.object.convert(target='CURVE')
bpy.ops.object.mode_set(mode = 'EDIT')
bpy.ops.curve.select_all(action='SELECT')
bpy.ops.curve.spline_type_set(type='BEZIER')
bpy.ops.curve.handle_type_set(type='AUTOMATIC')
bpy.ops.curve.smooth()
bpy.ops.curve.smooth()
#assign color
bpy.context.object.color = (1, 0, 0, 1)
bpy.ops.curve.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode = 'OBJECT')
elif bpy.context.scene.curve_type == 'Line':
bpy.ops.mesh.duplicate_detach_faces()
bpy.ops.object.mode_set(mode = 'OBJECT')
else:
self.report({'WARNING'}, "Nothing selected")
return {'FINISHED'}
class VertexPaintrFill(Operator):
bl_idname = "mesh.fill_vertex_color"
bl_label = "Fill Vertex Color"
bl_description = "Fill Vertex Color"
bl_options = {'REGISTER', 'UNDO'}
color_new: bpy.props.StringProperty(options = {'HIDDEN'})
@classmethod
def poll(cls, context):
return context.active_object and context.mode == 'EDIT_MESH'
def _fill_polygon_(self, obj, replace_with):
obj.use_paint_mask = True
if bpy.context.tool_settings.vertex_paint.brush != bpy.data.brushes['Draw']:
bpy.context.tool_settings.vertex_paint.brush = bpy.data.brushes['Draw']
bpy.data.brushes['Draw'].color = replace_with[:3]
bpy.ops.paint.vertex_color_set()
def replace_vertex_color(self, context):
sel = [obj for obj in bpy.context.selected_objects if obj.type != "EMPTY"]
mode = bpy.context.mode
if len(sel) > 0:
for o in sel:
bpy.context.view_layer.objects.active = o
obj = o.data
bpy.ops.object.mode_set(mode = 'VERTEX_PAINT')
if_replace = bpy.context.scene.replace_vertex_paint_value
fill_polygon = bpy.context.scene.fill_vertex_paint
alpha_value = bpy.context.scene.vertex_color_alpha_value
replace = bpy.context.scene.color_replace
if if_replace and fill_polygon:
bpy.context.scene.fill_vertex_paint = False
fill_polygon = bpy.context.scene.fill_vertex_paint
replace_with = None
if self.color_new == "Red":
replace_with = (1.0, 0.0, 0.0, 0.0)
elif self.color_new == "Green":
replace_with = (0.0, 1.0, 0.0, 0.0)
elif self.color_new == "Blue":
replace_with = (0.0, 0.0, 1.0, 0.0)
elif self.color_new == "A":
replace_with = (0.0, 0.0, 0.0, alpha_value)
elif self.color_new == "White":
replace_with = (1.0, 1.0, 1.0, 0.0)
elif self.color_new == "Black":
replace_with = (0.0, 0.0, 0.0, 0.0)
to_replace = None
if replace == 'R':
to_replace = (1.0, 0.0, 0.0)
elif replace == 'G':
to_replace = (0.0, 1.0, 0.0)
elif replace == 'B':
to_replace = (0.0, 0.0, 1.0)
elif replace == 'White':
to_replace = (1.0, 1.0, 1.0)
elif replace == 'Black':
to_replace = (0.0, 0.0, 0.0)
if if_replace == False:
verts = [v for v in obj.vertices if v.select]
else:
verts = [v for v in obj.vertices]
if if_replace:
if fill_polygon is False:
for polygon in obj.polygons:
for v in verts:
for i, index in enumerate(polygon.vertices):
if v.index == index:
loop_index = polygon.loop_indices[i]
# paint
if obj.vertex_colors.active.data[loop_index].color[:3] == to_replace:
obj.vertex_colors.active.data[loop_index].color = replace_with
else:
# clamp and paint
for s in range(4):
obj.vertex_colors.active.data[loop_index].color[s] = round(obj.vertex_colors.active.data[loop_index].color[s], 0)
if to_replace is not None:
if obj.vertex_colors.active.data[loop_index].color[:3] == to_replace:
obj.vertex_colors.active.data[loop_index].color = replace_with
else:
obj.vertex_colors.active.data[loop_index].color = replace_with
else:
self._fill_polygon_(obj, replace_with)
else:
# alpha
if fill_polygon is False:
for polygon in obj.polygons:
for v in verts:
for i, index in enumerate(polygon.vertices):
if v.index == index:
loop_index = polygon.loop_indices[i]
if self.color_new != "A":
obj.vertex_colors.active.data[loop_index].color = replace_with
else:
obj.vertex_colors.active.data[loop_index].color[3] = alpha_value
else:
self._fill_polygon_(obj, replace_with)
bpy.ops.object.mode_set(mode = 'EDIT')
bpy.ops.mesh.select_all(action='DESELECT')
else:
self.report({'WARNING'}, "Active object is hidden or not found!")
go_back_to_initial_mode(self, mode)
def execute(self, context):
self.replace_vertex_color(context)
return {'FINISHED'}
############
class VertexColorChannelOnOff(Operator):
bl_idname = "mesh.channel_on_off"
bl_label = "Channel On/Off"
bl_description = "Channel On/Off"
bl_options = {'REGISTER', 'UNDO'}
action: bpy.props.FloatProperty(options = {'HIDDEN'})
@classmethod
def poll(cls, context):
return context.active_object and context.mode == 'EDIT_MESH'
def execute(self, context):
sel = [obj for obj in bpy.context.selected_objects if obj.type != "EMPTY"]
mode = bpy.context.mode
if len(sel) > 0:
for o in sel:
bpy.context.view_layer.objects.active = o
obj = o.data
bpy.ops.object.mode_set(mode = 'VERTEX_PAINT')
if_replace = bpy.context.scene.replace_vertex_paint_value
fill_polygon = bpy.context.scene.fill_vertex_paint
alpha_value = bpy.context.scene.vertex_color_alpha_value
replace = bpy.context.scene.color_replace
if if_replace and fill_polygon:
bpy.context.scene.fill_vertex_paint = False
fill_polygon = bpy.context.scene.fill_vertex_paint
to_replace = None
if replace == 'R':
to_replace = 0
elif replace == 'G':
to_replace = 1
elif replace == 'B':
to_replace = 2
if if_replace == False:
verts = [v for v in obj.vertices if v.select]
else:
verts = [v for v in obj.vertices]
if if_replace:
if fill_polygon is False:
for polygon in obj.polygons:
for v in verts:
for i, index in enumerate(polygon.vertices):
if v.index == index:
loop_index = polygon.loop_indices[i]
#zero out channel
if to_replace is not None:
r = obj.vertex_colors.active.data[loop_index].color[0]
g = obj.vertex_colors.active.data[loop_index].color[1]
b = obj.vertex_colors.active.data[loop_index].color[2]
obj.vertex_colors.active.data[loop_index].color[to_replace] = self.action
else:
self._fill_polygon_(obj, replace_with)
else:
# alpha
if fill_polygon is False:
for polygon in obj.polygons:
for v in verts:
for i, index in enumerate(polygon.vertices):
if v.index == index:
loop_index = polygon.loop_indices[i]
if self.color_new != "A":
obj.vertex_colors.active.data[loop_index].color = replace_with
else:
obj.vertex_colors.active.data[loop_index].color[3] = alpha_value
else:
self._fill_polygon_(obj, replace_with)
bpy.ops.object.mode_set(mode = 'EDIT')
bpy.ops.mesh.select_all(action='DESELECT')
else:
self.report({'WARNING'}, "Nothing changed!")
go_back_to_initial_mode(self, mode)
return {'FINISHED'}
############
class CopyObjectName(Operator):
bl_idname = "object.copy_object_name"
bl_label = "Copy Object Name"
bl_options = {'REGISTER', 'UNDO'}
bl_description = "Copy Object Name"
@classmethod
def poll(cls, context):
return context.object is not None
def execute(self, context):
obj = bpy.context.active_object
if obj is not None:
bpy.context.window_manager.clipboard = obj.name
else:
self.report({'WARNING'}, "Incorrect source object!")
return {'FINISHED'}
class PasteObjectName(Operator):
bl_idname = "object.paste_object_name"
bl_label = "Paste Object Name"
bl_options = {'REGISTER', 'UNDO'}
bl_description = "Paste Object Name"
@classmethod
def poll(cls, context):
return context.object is not None
def execute(self, context):
sel = bpy.context.selected_objects
if len(sel):
for i in sel:
i.name = bpy.context.window_manager.clipboard
return {'FINISHED'}
class NameForBake(Operator):
# select 2 meshes and call the command. Highest will get suffix/name "_high", lowest "_low"
bl_idname = "object.rename_lp_hp"
bl_label = "Name For Bake"
bl_options = {'REGISTER', 'UNDO'}
bl_description = "Add *_lp and *_hp in the ends of high/low poly meshes. Select 2 meshes and run the script"
@classmethod
def poll(cls, context):
return context.object is not None
def execute(self, context):
sel = bpy.context.selected_objects
if len(sel) == 2:
if sel[0].type == 'MESH' and sel[1].type == 'MESH':
if len(sel[0].data.vertices) > len(sel[1].data.vertices):
sel[1].name = sel[0].name + "_low"
sel[0].name = sel[0].name + "_high"
else:
sel[1].name = sel[0].name + "_high"
sel[0].name = sel[0].name + "_low"
return {'FINISHED'}
class CreateGroup(Operator):
#all LODs must be properly named before use
bl_idname = "object.create_group"
bl_label = "Create Group"
bl_options = {'REGISTER', 'UNDO'}
bl_description = "Add a new Group(Empty) and parent selected objects inside"
@classmethod
def poll(cls, context):
return context.object is not None
def execute(self, context):
sel = bpy.context.selected_objects
ao = bpy.context.active_object
bpy.ops.object.select_all(action='DESELECT')
# if empty in the selection list, deselect it
for i in sel:
if i.type != "EMPTY":
i.select_set(True)
sel = bpy.context.selected_objects
bpy.context.view_layer.objects.active = ao
# if the objects have a parent
parent_empty = bpy.context.active_object.parent
#select the collection where the active object is located
bpy.ops.object.select_act_obj_collection()
if len(sel):
# get collection
col = sel[0].users_collection[0].name
# if not Scene Collection
bpy.ops.collection.objects_add_active(collection = col)
#add empty
bpy.ops.object.empty_add(type='PLAIN_AXES', radius=0.00001, location = (0,0,0))
new_empty = bpy.context.active_object
# parent if parent exists
if parent_empty is not None:
new_empty.parent = parent_empty
#parenting
for i in sel:
i.parent = new_empty
new_empty.name = i.name
return {'FINISHED'}
class CreateCollection(Operator):
#all LODs must be properly named before use
bl_idname = "object.create_collection_with_objects"
bl_label = "Create Collection"
bl_options = {'REGISTER', 'UNDO'}
bl_description = "Add a new Collection and parent selected objects inside"
@classmethod
def poll(cls, context):
return context.object is not None
def find_collection(self, ao):
#find the active object's parent collection
collections = bpy.data.collections
if len(collections) > 0:
for collection in collections:
if len(collection.objects) > 0:
if ao.name in collection.objects:
return collection
else:
continue
else:
return bpy.context.view_layer.active_layer_collection.collection
def execute(self, context):
sel = bpy.context.selected_objects
ao = bpy.context.active_object
parent_collection = self.find_collection(ao)
new_collection = bpy.context.blend_data.collections.new(name= ao.name)
if parent_collection is not None:
parent_collection.children.link(new_collection)
for obj in sel:
new_collection.objects.link(obj)
parent_collection.objects.unlink(obj)
else:
parent_collection = bpy.context.view_layer.active_layer_collection.collection
parent_collection.children.link(new_collection)
for obj in sel:
new_collection.objects.link(obj)
parent_collection.objects.unlink(obj)
return {'FINISHED'}
class MoveToSceneCenter(Operator):
bl_idname = "object.move_to_scene_center"
bl_label = "Move to Scene Center"
bl_options = {'REGISTER', 'UNDO'}
bl_description = "Move to Scene Centre"
@classmethod
def poll(cls, context):
return context.object is not None
def execute(self, context):
sel = bpy.context.selected_objects
if len(sel):
for i in sel:
if i.data.users == 1:
i.location = (0,0,0)
else:
self.report({'WARNING'}, "Nothing changed. The object has instances")
return {'FINISHED'}
class SocketInVertexSelectionCentre(Operator):
bl_idname = "mesh.socket_in_vertex_selection_centre"
bl_label = "Socket In Vertex Selection Centre"
bl_description = "Create a Socket in the vertex selection centre"
@classmethod
def poll(cls, context):
return context.area.type == "VIEW_3D" and context.mode == "EDIT_MESH" and context.object.type == "MESH"
def execute(self, context):
obj = bpy.context.object
bm = bmesh.from_edit_mesh(obj.data)
verts = []
v_pos = Vector()
for vert in bm.verts:
if vert.select == True:
verts.append(vert)
if len(verts) > 0:
for vert in verts:
v_pos += vert.co
v_pos_avg = v_pos / len(verts)
orient = bpy.context.scene.transform_orientation_slots[0].type
new_socket = bpy.data.objects.new("SOCKET_", None)
scene_collection = context.layer_collection.collection
scene_collection.objects.link(new_socket)
if orient == "LOCAL":
new_matrix = obj.matrix_world @ Matrix.Translation(v_pos_avg)
new_socket.matrix_world = new_matrix
elif orient == "GLOBAL":
new_matrix = obj.matrix_world @ Matrix.Translation(v_pos_avg)
new_socket.matrix_world = new_matrix
new_socket.rotation_euler = ( 0, 0, 0 )
bpy.context.view_layer.objects.active = obj
obj.select_set(True)
bpy.ops.mesh.select_all(action='DESELECT')
else:
self.report({'WARNING'}, self.bl_idname + ": "+ "Nothing selected!")
return {'FINISHED'}
class SocketInObjectPivotPosition(Operator):
bl_idname = "object.socket_in_pivot"
bl_label = "Socket in Object Pivot"
bl_description = "Create Sockets in the objects' pivot positions"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
sel = bpy.context.selected_objects
if len(sel):
for i in sel:
#get pivot position
bpy.context.view_layer.objects.active = i
pos = i.location
#create empty
bpy.ops.object.empty_add(type='PLAIN_AXES', radius=0.5, location = (pos))
bpy.context.active_object.name = 'SOCKET_'
else:
self.report({'WARNING'}, self.bl_idname + ": " + "Nothing selected!")
return {'FINISHED'}
# Functions
def duplicate(cls, context, obj):
obj.select_set(True)
bpy.context.view_layer.objects.active = obj
bpy.ops.object.duplicate()
bpy.ops.object.select_all(action='DESELECT')
return context.active_object
def get_faces_indicies(cls, obj):
# called by the class only
bpy.ops.object.mode_set(mode = 'EDIT')
bm = bmesh.from_edit_mesh(obj.data)
pos = [f.index for f in bm.faces]
bpy.ops.object.mode_set(mode = 'OBJECT')
return pos
def select_mirrored_faces(cls, obj, indicies):
bm = bmesh.from_edit_mesh(obj.data)
bpy.ops.mesh.select_mode(type='FACE')
mirrored_faces = [f for f in bm.faces]
for f in mirrored_faces:
for match in indicies:
if f.index == match:
f.select = True
bm.select_flush(True)
bmesh.update_edit_mesh(obj.data)
# invert
bpy.ops.mesh.select_all(action='INVERT')
def fix_mirrored_half_triangulation(cls, obj, indicies):
if 'Triangulate' in obj.modifiers:
if obj.modifiers['Triangulate'].quad_method != 'FIXED':
obj.modifiers['Triangulate'].quad_method = 'FIXED'
bpy.ops.object.mode_set(mode = 'EDIT')
bpy.ops.mesh.select_all(action='DESELECT')
select_mirrored_faces(cls, obj, indicies)
bpy.ops.mesh.rotate_edge_triangulation_quads(quad_method="FIXED_ALTERNATE")
bpy.ops.object.mode_set(mode = 'OBJECT')
def generate_collections(cls, context, list, parent_index, children_count):
_collection_ = bpy.context.collection
_collections_= bpy.data.collections
# generate
if | |
password_change_is_valid = True
userobject.change_password_is_valid = password_change_is_valid
userobject.password_attempted_change_date = datetime.datetime.now()
#only overwrite the password if the post was a valid password change
if password_change_is_valid:
userobject.password_salt = uuid.uuid4().hex
userobject.password = utils.new_passhash(new_password, userobject.password_salt)
else:
logging.info("unable to set password to %s. Current password %s appears not to match" % (new_password, current_password))
# write the object in all cases, because state information about the attempted password
# change is written into the userobject. We use the attempted_password_change time in the
# ajax function "load_change_password_fields" -- we should fix this in the future, and
# NOT write the userobject, unless a successful password change has occured -- we can eaily
# pass the status back to the client and process with javascript as opposed to server side
# tracking of this status (this is written like this because when I first started coding I didn't know
# any javascript).
put_userobject(userobject)
return HttpResponse('Success')
except:
error_reporting.log_exception(logging.critical)
return HttpResponse('Error')
###########################################################################
def reset_new_contact_or_mail_counter_notification_settings(object_ref_key):
# resets the date_of_last_notification on the object to the current time. This can be used for both "new_contact_counter" and "unread_mail_count"
try:
def txn(new_contact_counter_ref_key):
counter_obj = object_ref_key.get()
counter_obj.date_of_last_notification = datetime.datetime.now()
counter_obj.num_new_since_last_notification = 0
counter_obj.when_to_send_next_notification = datetime.datetime.max
counter_obj.when_to_send_next_notification_string = str(counter_obj.when_to_send_next_notification)
counter_obj.put()
ndb.transaction(lambda: txn(object_ref_key))
except:
error_reporting.log_exception(logging.critical)
###########################################################################
def modify_new_contact_counter(new_contact_counter_ref_key, action_type, action_prefix,
action_postfix, value,
hours_between_notifications, update_notification_times,
):
# updates the new_contact_counter_obj value based on the value passed in. Must
# be run in a transaction to ensure that only a single update can take place at a time.
# This value is used for tracking number of kisses, winks, keys, received by a user since
# the last time they viewed their contacts.
try:
def txn(new_contact_counter_ref_key, action_type, action_prefix, action_postfix, value):
action_field_for_contact_count = action_prefix + action_type + action_postfix
new_contact_counter_obj = new_contact_counter_ref_key.get()
current_count = getattr(new_contact_counter_obj, action_field_for_contact_count)
current_count += value
setattr(new_contact_counter_obj, action_field_for_contact_count, current_count)
if update_notification_times:
new_contact_counter_obj.num_new_since_last_notification += value
if value > 0:
# only update notification settings if the user has received a new
# contact (not one that is delted)
if hours_between_notifications != None:
new_contact_counter_obj.when_to_send_next_notification = \
new_contact_counter_obj.date_of_last_notification + \
datetime.timedelta(hours = hours_between_notifications)
else:
new_contact_counter_obj.when_to_send_next_notification = datetime.datetime.max
if new_contact_counter_obj.num_new_since_last_notification <= 0:
# This number may go negative if someone gives a kiss, and then takes it away.
# Or if a notification is sent out about a new contact, and then that contact is removed
# before the user has checked their contacts.
# Not an exception,
# but it is good to keep it non-negative so that if new contacts are received after old contacts
# are taken away, this number will still be positive.
new_contact_counter_obj.num_new_since_last_notification = 0
# don't send a notification if there are no new contacts
new_contact_counter_obj.when_to_send_next_notification = datetime.datetime.max
new_contact_counter_obj.when_to_send_next_notification_string = str(new_contact_counter_obj.when_to_send_next_notification)
new_contact_counter_obj.put()
return new_contact_counter_obj
new_contact_counter_obj = ndb.transaction(lambda: txn(new_contact_counter_ref_key, action_type, action_prefix, action_postfix, value))
return new_contact_counter_obj
except:
error_reporting.log_exception(logging.critical)
###########################################################################
# New HR Datastore
def update_users_have_sent_messages_object_favorite_val(userobject, other_userobject, bool_val):
try:
users_have_sent_messages_object = utils.get_have_sent_messages_object(userobject.key, other_userobject.key)
if users_have_sent_messages_object:
users_have_sent_messages_object.other_is_favorite = bool_val
users_have_sent_messages_object.put()
else:
# if they have not yet sent messages between them, then do not create this object
pass
except:
error_reporting.log_exception(logging.critical)
def toggle_chat_friend_status(initiate_contact_object):
# chat_friend_stored will contain a string that indicates the following possible conditions:
# None: Neither the viewer or displayed profile have made any request to add to each others chat list
# "request_sent": the displayed profile has been sent a request to add to chat contacts
# "request_received": the viewer profile has been sent a chat request from the users whose profile is being viewed
# "connected": the viewer and displayed profile have agreed to add each other to their chat contacts.
# we are essentially toggeling the chat_friend status, based on the current setting
try:
if not initiate_contact_object.chat_friend_stored:
# store a totally new request
initiate_contact_object.chat_friend_stored = "request_sent"
chat_request_action_on_receiver = "friend_request"
counter_modify = 1
elif initiate_contact_object.chat_friend_stored == "request_sent":
# remove the request
initiate_contact_object.chat_friend_stored = None
chat_request_action_on_receiver = "friend_request"
counter_modify = -1
elif initiate_contact_object.chat_friend_stored == "request_received":
# user is responding to a request to "connect" for chat and therefore a click will make them connected
initiate_contact_object.chat_friend_stored = "connected"
chat_request_action_on_receiver = "connected"
counter_modify = 1
elif initiate_contact_object.chat_friend_stored == "connected":
# the user has decided to disconnect from the other user -- however the other users request still remains
initiate_contact_object.chat_friend_stored = "request_received"
chat_request_action_on_receiver = "connected"
counter_modify = -1
else:
assert(False)
return (counter_modify, chat_request_action_on_receiver)
except:
error_reporting.log_exception(logging.critical)
return (0, None)
###########################################################################
def modify_passive_initiate_contact_object(chat_request_action_on_receiver, add_or_remove, userobject_key, other_userobject_key):
# chat-request_action_on_receiver is either "friend_request" or "connected",
# and add_or_remove is either +1 or -1 respectively
#
# "passive" initiate contact object means the initiate contact object referring to "other_userobject" kisses,keys,etc.
# sent to "userobject". (this is the "opposite" of the "active" userobject).
def txn(initiate_contact_object_key):
# use a transaction to ensure that only a single update to this object will happen at a time.
initiate_contact_object = initiate_contact_object_key.get()
if chat_request_action_on_receiver == "friend_request":
if add_or_remove == +1:
initiate_contact_object.chat_friend_stored = "request_received"
if add_or_remove == -1:
initiate_contact_object.chat_friend_stored = None
if chat_request_action_on_receiver == "connected":
if add_or_remove == +1:
initiate_contact_object.chat_friend_stored = "connected"
if add_or_remove == -1:
initiate_contact_object.chat_friend_stored = "request_sent"
initiate_contact_object.chat_friend_stored_date = datetime.datetime.now()
# NOTE: reversed userobjects in the following call to get the "passive" object (the user receiving
# the chat request)
utils.put_initiate_contact_object(initiate_contact_object, other_userobject_key, userobject_key)
# currently this function should only be called for chat_friend requests.
assert(chat_request_action_on_receiver == "friend_request" or chat_request_action_on_receiver == "connected")
assert(add_or_remove == +1 or add_or_remove == -1)
# NOTE: reversed userobjects in the following call to get the "passive" object (the user receiving
# the chat request)
initiate_contact_object = utils.get_initiate_contact_object(other_userobject_key, userobject_key, create_if_does_not_exist=True)
initiate_contact_object_key = initiate_contact_object.key
try:
ndb.transaction(lambda: txn(initiate_contact_object_key))
update_bool = True
except:
# transaction failed -- object not modified
update_bool = False
return update_bool
def invalidate_memcache_for_friends_lists(owner_uid):
# invalidate memcache for chat structures for the user indicated in owner_uid - this ensures that they will
# receive a fresh friend list immediately.
all_friends_dict_memcache_key = constants.ALL_CHAT_FRIENDS_DICT_MEMCACHE_PREFIX + owner_uid
memcache.delete(all_friends_dict_memcache_key)
check_friends_online_last_update_memcache_key = constants.CHECK_CHAT_FRIENDS_ONLINE_LAST_UPDATE_MEMCACHE_PREFIX + owner_uid
memcache.delete(check_friends_online_last_update_memcache_key)
online_contacts_names_dict_memcache_key = constants.ONLINE_CHAT_CONTACTS_INFO_MEMCACHE_PREFIX + owner_uid
memcache.delete(online_contacts_names_dict_memcache_key)
def modify_active_initiate_contact_object(action, initiate_contact_object, userobject_key, other_userobject_key, override_minimum_delay = False):
# modifys the initiate_contact_object based on the "action" that the user has taken
# userobject refers to the user taking the action
# other_userobject refers to the profile that is being viewed when the action is taken
#
# "active" initiate contact object means the initiate contact object referring to "userobject" kisses,keys,etc.
# sent to "other_userobject".
try:
def txn(initiate_contact_object_key, action):
# use a transaction to ensure that only a single update to this object will happen at a time.
counter_modify = 0
chat_request_action_on_receiver = None
initiate_contact_object = initiate_contact_object_key.get()
action_stored_str = action + "_stored"
action_stored_date_str = action + "_stored_date"
if action != "chat_friend":
# Toggle the value, based on the current setting.
if not getattr(initiate_contact_object, action_stored_str):
setattr(initiate_contact_object, action_stored_str, True)
counter_modify = 1
else:
setattr(initiate_contact_object, action_stored_str, False)
counter_modify = -1
else:
# this is a chat_friend request, which requires more complex processing due to
# the different states that are possible.
(counter_modify, chat_request_action_on_receiver) = \
toggle_chat_friend_status(initiate_contact_object)
# invalidate memcache for chat_friend_tracker so that online users will
# immediately see their new friends online
invalidate_memcache_for_friends_lists(userobject_key.urlsafe())
invalidate_memcache_for_friends_lists(other_userobject_key.urlsafe())
# Update the set time for bot setting and removing the action
setattr(initiate_contact_object, action_stored_date_str, datetime.datetime.now())
utils.put_initiate_contact_object(initiate_contact_object, userobject_key, other_userobject_key)
return (counter_modify, chat_request_action_on_receiver, initiate_contact_object)
initiate_contact_object_modified = False
action_stored_date_str = action + "_stored_date"
previous_chat_friend_stored_value = initiate_contact_object.chat_friend_stored
initiate_contact_object_key = initiate_contact_object.key
action_stored_date = getattr(initiate_contact_object, action_stored_date_str)
# prevent double submission (rapid clicking) -- check the value needs to be set before writing
if action_stored_date: # make sure not None
time_since_stored = datetime.datetime.now() - action_stored_date
else:
# this is the first time the action is stored, so an infinite amount of | |
<reponame>josepablocam/common-code-extraction
# Function 0
def cleaning_func_0(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', low_memory=False)
data.earliest_cr_line = pd.to_datetime(data.earliest_cr_line)
return data
#=============
# Function 1
def cleaning_func_1(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', low_memory=False)
data['term'] = data['term'].apply((lambda x: x.lstrip()))
return data
#=============
# Function 2
def cleaning_func_2(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', low_memory=False)
data.earliest_cr_line = pd.to_datetime(data.earliest_cr_line)
data['earliest_cr_line_year'] = data['earliest_cr_line'].dt.year
return data
#=============
# Function 3
def cleaning_func_3(data):
# additional context code from user definitions
def impute_missing_algo(df, target, cats, cols, algo):
y = pd.DataFrame(df[target])
X = df[cols].copy()
X.drop(cats, axis=1, inplace=True)
cats = pd.get_dummies(df[cats])
X = pd.concat([X, cats], axis=1)
y['null'] = y[target].isnull()
y['null'] = y.loc[:, target].isnull()
X['null'] = y[target].isnull()
y_missing = y[(y['null'] == True)]
y_notmissing = y[(y['null'] == False)]
X_missing = X[(X['null'] == True)]
X_notmissing = X[(X['null'] == False)]
y_missing.loc[:, target] = ''
dfs = [y_missing, y_notmissing, X_missing, X_notmissing]
for df in dfs:
df.drop('null', inplace=True, axis=1)
y_missing = y_missing.values.ravel(order='C')
y_notmissing = y_notmissing.values.ravel(order='C')
X_missing = X_missing.as_matrix()
X_notmissing = X_notmissing.as_matrix()
algo.fit(X_notmissing, y_notmissing)
y_missing = algo.predict(X_missing)
y.loc[((y['null'] == True), target)] = y_missing
y.loc[((y['null'] == False), target)] = y_notmissing
return y[target]
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', low_memory=False)
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(max_depth=5, n_estimators=100, max_features=1)
catiables = ['term', 'purpose', 'grade']
columns = ['loan_amnt', 'funded_amnt', 'funded_amnt_inv', 'int_rate', 'grade', 'purpose', 'term']
data['earliest_cr_line_year'] = impute_missing_algo(data, 'earliest_cr_line_year', catiables, columns, rf)
return data
#=============
# Function 4
def cleaning_func_4(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', low_memory=False)
data['emp_length'] = data['emp_length'].astype(int)
return data
#=============
# Function 5
def cleaning_func_5(data):
# additional context code from user definitions
def impute_missing_algo(df, target, cats, cols, algo):
y = pd.DataFrame(df[target])
X = df[cols].copy()
X.drop(cats, axis=1, inplace=True)
cats = pd.get_dummies(df[cats])
X = pd.concat([X, cats], axis=1)
y['null'] = y[target].isnull()
y['null'] = y.loc[:, target].isnull()
X['null'] = y[target].isnull()
y_missing = y[(y['null'] == True)]
y_notmissing = y[(y['null'] == False)]
X_missing = X[(X['null'] == True)]
X_notmissing = X[(X['null'] == False)]
y_missing.loc[:, target] = ''
dfs = [y_missing, y_notmissing, X_missing, X_notmissing]
for df in dfs:
df.drop('null', inplace=True, axis=1)
y_missing = y_missing.values.ravel(order='C')
y_notmissing = y_notmissing.values.ravel(order='C')
X_missing = X_missing.as_matrix()
X_notmissing = X_notmissing.as_matrix()
algo.fit(X_notmissing, y_notmissing)
y_missing = algo.predict(X_missing)
y.loc[((y['null'] == True), target)] = y_missing
y.loc[((y['null'] == False), target)] = y_notmissing
return y[target]
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', low_memory=False)
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(max_depth=5, n_estimators=100, max_features=1)
catiables = ['term', 'purpose', 'grade']
columns = ['loan_amnt', 'funded_amnt', 'funded_amnt_inv', 'int_rate', 'grade', 'purpose', 'term']
data['emp_length'] = impute_missing_algo(data, 'emp_length', catiables, columns, rf)
return data
#=============
# Function 6
def cleaning_func_6(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', low_memory=False)
data.issue_d = pd.Series(data.issue_d).str.replace('-2015', '')
return data
#=============
# Function 7
def cleaning_func_8(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', low_memory=False)
data.earliest_cr_line = pd.to_datetime(data.earliest_cr_line)
s = pd.value_counts(data['earliest_cr_line']).to_frame().reset_index()
s.columns = ['date', 'count']
return s
#=============
# Function 8
def cleaning_func_9(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', low_memory=False)
data.earliest_cr_line = pd.to_datetime(data.earliest_cr_line)
s = pd.value_counts(data['earliest_cr_line']).to_frame().reset_index()
s['year'] = s['date'].dt.year
return s
#=============
# Function 9
def cleaning_func_11(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', low_memory=False)
data['emp_length'] = data['emp_length'].astype(int)
s = pd.value_counts(data['emp_length']).to_frame().reset_index()
s.columns = ['type', 'count']
return s
#=============
# Function 10
def cleaning_func_12(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', low_memory=False)
data.earliest_cr_line = pd.to_datetime(data.earliest_cr_line)
s = pd.value_counts(data['earliest_cr_line']).to_frame().reset_index()
s['month'] = s['date'].dt.month
return s
#=============
# Function 11
def cleaning_func_0(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['term'] = dataset['term'].astype('category').cat.codes
return dataset
#=============
# Function 12
def cleaning_func_1(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['verification_status'] = dataset['verification_status'].astype('category').cat.codes
return dataset
#=============
# Function 13
def cleaning_func_2(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['purpose'] = dataset['purpose'].astype('category').cat.codes
return dataset
#=============
# Function 14
def cleaning_func_3(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['application_type'] = dataset['application_type'].astype('category').cat.codes
return dataset
#=============
# Function 15
def cleaning_func_4(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['addr_state'] = dataset['addr_state'].astype('category').cat.codes
return dataset
#=============
# Function 16
def cleaning_func_5(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['sub_grade'] = dataset['sub_grade'].astype('category').cat.codes
return dataset
#=============
# Function 17
def cleaning_func_6(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['loan_status'] = dataset['loan_status'].astype('category').cat.codes
return dataset
#=============
# Function 18
def cleaning_func_7(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['initial_list_status'] = dataset['initial_list_status'].astype('category').cat.codes
return dataset
#=============
# Function 19
def cleaning_func_8(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['emp_length'] = dataset['emp_length'].astype('category').cat.codes
return dataset
#=============
# Function 20
def cleaning_func_9(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['verification_status_joint'] = dataset['verification_status_joint'].astype('category').cat.codes
return dataset
#=============
# Function 21
def cleaning_func_10(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['home_ownership'] = dataset['home_ownership'].astype('category').cat.codes
return dataset
#=============
# Function 22
def cleaning_func_11(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['pymnt_plan'] = dataset['pymnt_plan'].astype('category').cat.codes
return dataset
#=============
# Function 23
def cleaning_func_12(dataset):
# core cleaning code
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['grade'] = dataset['grade'].astype('category').cat.codes
return dataset
#=============
# Function 24
def cleaning_func_13(dataset):
# core cleaning code
import numpy as np
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['earliest_cr_line'] = pd.to_datetime(dataset['earliest_cr_line'])
dataset['earliest_cr_line'] = ((dataset['earliest_cr_line'] - dataset['earliest_cr_line'].min()) / np.timedelta64(1, 'D'))
return dataset
#=============
# Function 25
def cleaning_func_14(dataset):
# core cleaning code
import numpy as np
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['last_pymnt_d'] = pd.to_datetime(dataset['last_pymnt_d'])
dataset['last_pymnt_d'] = ((dataset['last_pymnt_d'] - dataset['last_pymnt_d'].min()) / np.timedelta64(1, 'D'))
return dataset
#=============
# Function 26
def cleaning_func_15(dataset):
# core cleaning code
import numpy as np
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['last_credit_pull_d'] = pd.to_datetime(dataset['last_credit_pull_d'])
dataset['last_credit_pull_d'] = ((dataset['last_credit_pull_d'] - dataset['last_credit_pull_d'].min()) / np.timedelta64(1, 'D'))
return dataset
#=============
# Function 27
def cleaning_func_16(dataset):
# core cleaning code
import numpy as np
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['issue_d'] = pd.to_datetime(dataset['issue_d'])
dataset['issue_d'] = ((dataset['issue_d'] - dataset['issue_d'].min()) / np.timedelta64(1, 'D'))
return dataset
#=============
# Function 28
def cleaning_func_17(dataset):
# core cleaning code
import numpy as np
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['next_pymnt_d'] = pd.to_datetime(dataset['next_pymnt_d'])
dataset['next_pymnt_d'] = ((dataset['next_pymnt_d'] - dataset['next_pymnt_d'].min()) / np.timedelta64(1, 'D'))
return dataset
#=============
# Function 29
def cleaning_func_18(dataset):
# additional context code from user definitions
def LoanResult(status):
if ((status == 5) or (status == 1) or (status == 7)):
return 1
else:
return 0
# core cleaning code
import numpy as np
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['loan_status'] = dataset['loan_status'].astype('category').cat.codes
non_numerics = [x for x in dataset.columns if (not ((dataset[x].dtype == np.float64) or (dataset[x].dtype == np.int8) or (dataset[x].dtype == np.int64)))]
df = dataset
df = df.drop(non_numerics, 1)
df['loan_status'] = df['loan_status'].apply(LoanResult)
return df
#=============
# Function 30
def cleaning_func_19(dataset):
# core cleaning code
import numpy as np
import pandas as pd
# dataset = pd.read_csv('../input/loan.csv', low_memory=False)
dataset = dataset.fillna(0)
dataset['loan_status'] = dataset['loan_status'].astype('category').cat.codes
non_numerics = [x for x in dataset.columns if (not ((dataset[x].dtype == np.float64) or (dataset[x].dtype == np.int8) or (dataset[x].dtype == np.int64)))]
df = dataset
return df
#=============
# Function 31
def cleaning_func_0(df):
# additional context code from user definitions
def status_class(text):
if (text == 'Fully Paid'):
return 'Fully Paid'
elif ((text == 'Charged Off') or (text == 'Default')):
return 'Default'
elif ((text == 'Current') or (text == 'Issued')):
return 'Current'
else:
return 'Late'
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df['status_class'] = df['loan_status'].apply(status_class)
return df
#=============
# Function 32
def cleaning_func_1(df):
# additional context code from user definitions
def emp_length_class(text):
if ((text == '< 1 year') or (text == '1 year') or (text == '2 years') or (text == '3 years')):
return '<=3 years'
elif ((text == '4 years') or (text == '5 years') or (text == '6 years')):
return '4-6 years'
elif ((text == '7 years') or (text == '8 years') or (text == '9 years')):
return '7-9 years'
elif (text == '10+ years'):
return '>=10 years'
else:
return None
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df['emp_length_class'] = df['emp_length'].apply(emp_length_class)
return df
#=============
# Function 33
def cleaning_func_2(df):
# additional context code from user definitions
def inc_class(num):
if (num <= 50000):
return '<=50000'
elif (num <= 75000):
return '50000-75000'
elif (num <= 100000):
return '75000-100000'
elif (num <= 125000):
return '100000-125000'
elif (num <= 150000):
return '125000-150000'
else:
return '>150000'
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df['inc_class'] = df['annual_inc'].apply(inc_class)
return df
#=============
# Function 34
def cleaning_func_3(df):
# additional context code from user definitions
def loan_class(num):
if (num <= 10000):
return '<=10000'
elif (num <= 20000):
return '10000-20000'
elif (num <= 30000):
return '20000-30000'
else:
return '>30000'
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df['loan_class'] = df['loan_amnt'].apply(loan_class)
return df
#=============
# Function 35
def cleaning_func_4(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df.drop(['id', 'member_id', 'funded_amnt', 'funded_amnt_inv', 'sub_grade', 'emp_title', 'issue_d', 'zip_code', 'out_prncp', 'out_prncp_inv', 'total_pymnt', 'total_pymnt_inv', 'total_rec_prncp', 'total_rec_int', 'total_rec_late_fee', 'recoveries', 'collection_recovery_fee', 'last_pymnt_d', 'last_pymnt_amnt', 'desc', 'url', 'title', 'initial_list_status', 'pymnt_plan', 'policy_code', 'application_type', 'earliest_cr_line', 'last_credit_pull_d', 'next_pymnt_d', 'addr_state'], axis=1)
df = df.dropna(thresh=(len(df) / 2), axis=1)
return df
#=============
# Function 36
def cleaning_func_5(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv')
df = df.drop(['id', 'member_id', 'funded_amnt', 'funded_amnt_inv', 'sub_grade', 'emp_title', 'issue_d', 'zip_code', 'out_prncp', 'out_prncp_inv', 'total_pymnt', 'total_pymnt_inv', 'total_rec_prncp', 'total_rec_int', 'total_rec_late_fee', 'recoveries', 'collection_recovery_fee', 'last_pymnt_d', 'last_pymnt_amnt', 'desc', 'url', 'title', 'initial_list_status', 'pymnt_plan', 'policy_code', 'application_type', 'earliest_cr_line', | |
<filename>plasticc/get_data.py
# -*- coding: UTF-8 -*-
"""
Get PLASTICC data from SQL database
"""
import sys
import os
import numpy as np
import warnings
import argparse
import pandas as pd
import astropy.table as at
import astropy.io.fits as afits
from collections import OrderedDict
import database
import helpers
ROOT_DIR = os.getenv('PLASTICC_DIR')
DATA_DIR = os.path.join(ROOT_DIR, 'plasticc_data')
def parse_getdata_options(argv=None):
if argv is None:
argv = sys.argv[1:]
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def_sql_wildcard = '%'
parser = argparse.ArgumentParser(description="Get options to the GetData structure")
group = parser.add_mutually_exclusive_group(required=False)
parser.register('type', 'bool', str2bool)
parser.add_argument('--data_release', required=True, help='PLAsTiCC data release index table to process')
field_choices = ('WFD', 'DDF', '%')
parser.add_argument('--field', required=False, default='DDF', type=str.upper, choices=field_choices, \
help='PLAsTiCC field to process')
type_mapping = GetData.get_sntypes()
inverse_mapping = {v: k for k, v in type_mapping.items()}
model_choices = list(type_mapping.keys()).append('%')
model_name_choices = list(type_mapping.values()).append('%')
group.add_argument('--model', required=False, action="store", default=def_sql_wildcard, choices=model_choices, \
help='PLAsTiCC model to process')
group.add_argument('--model_name', required=False, action="store", default=def_sql_wildcard,
choices=model_name_choices, \
help='PLAsTiCC model name to process')
parser.add_argument('--base', required=False, default=def_sql_wildcard,
help='PLAsTiCC model base filename (probably not a good idea to touch this)')
parser.add_argument('--snid', required=False, default=def_sql_wildcard,
help='PLAsTiCC object ID number (useful for debugging/testing)')
parser.add_argument('--limit', required=False, type=int, default=None,
help='Limit the number of returned results from the MySQL index')
parser.add_argument('--shuffle', required=False, type="bool", default="False",
help='Shuffle the returned results from the MySQL index')
parser.add_argument('--sort', required=False, type="bool", default="True",
help='Sort the returned results from the MySQL index')
parser.add_argument('--survey', required=False, default="LSST",
help="Specify the survey to process")
parser.add_argument('--offset', required=False, default=None, type=int,
help='Return the MySQL results AFTER offset rows')
parser.add_argument('--extrasql', required=False, default=None,
help='Extra SQL for the selection function - enter as quoted string - used as is')
args = parser.parse_args(args=argv)
out = vars(args)
model_name = out.pop('model_name')
model = out.pop('model')
if model_name == '%':
if model == '%':
out['model'] = '%'
else:
out['model'] = model
else:
this_model = inverse_mapping.get(model_name)
if model == '%':
out['model'] = this_model
else:
if this_model == model:
out['model'] = this_model
else:
out['model'] = model
return out
class GetData(object):
"""
Class to access the ANTARES parsed PLaSTiCC index and light curve data
"""
def __init__(self, data_release):
self.data_release = "release_{}".format(data_release)
self.phot_fields = ['MJD', 'FLT', 'FLUXCAL', 'FLUXCALERR', 'ZEROPT', 'PHOTFLAG']
self.phot_fields_dtypes = {'FLT': np.str_, 'PHOTFLAG': np.int_}
def get_phot_fields(self):
"""
list of the photometry column names and a dictionary of NON-FLOAT
columns
For the default columns, this is only FLT
"""
return list(self.phot_fields), dict(self.phot_fields_dtypes)
def set_phot_fields(self, fields, dtypes):
"""
set the list of photometry column fields to retrieve and a dictionary
of data types for NON-FLOAT columns hashed by column name in the PHOT.FITS
i.e. if your column is a float, don't even bother listing it
Can be used to retrieve custom columns from the PHOT.FITS
Kinda kludgey - se with caution
"""
self.phot_fields = list(fields)
self.phot_fields_dtypes = dict(dtypes)
def get_object_ids(self):
""" Get list of all object ids """
obj_ids = database.exec_sql_query("SELECT objid FROM {0};".format(self.data_release))
return obj_ids
def get_column_for_sntype(self, column_name, sntype, field='%'):
""" Get an sql column for a particular sntype class
Parameters
----------
column_name : str
column name. E.g. column_name='peakmjd'
sntype : int
sntype number. E.g. sntype=4
field : str, optional
The field name. E.g. field='DDF' or field='WFD'. The default is '%' indicating that all fields will be included.
Return
-------
column_out: list
A list containing all the entire column for a particular sntype class
"""
try:
column_out = database.exec_sql_query(
"SELECT {0} FROM {1} WHERE objid LIKE '{2}%' AND sntype={3};".format(column_name, self.data_release,
field, sntype))
column_out = np.array(column_out)[:, 0]
except IndexError:
print("No data in the database satisfy the given arguments. field: {}, sntype: {}".format(field, sntype))
return []
return column_out
def get_photfile_for_objid(self, objid):
"""
Returns the phot file for the object ID
"""
field, model, base, snid = objid.split('_')
if field == 'IDEAL':
filename = "{0}_MODEL{1}/{0}_{2}_PHOT.FITS".format(field, model, base)
elif field == 'MSIP':
filename = "ZTF_{0}_MODEL{1}/ZTF_{0}_{2}_PHOT.FITS".format(field, model, base)
else:
filename = "LSST_{0}_MODEL{1}/LSST_{0}_{2}_PHOT.FITS".format(field, model, base)
phot_file = os.path.join(DATA_DIR, self.data_release.replace('release_', ''), filename)
if not os.path.exists(phot_file):
phot_file = phot_file + '.gz'
return phot_file
def get_light_curve(self, objid, ptrobs_min, ptrobs_max, standard_zpt=27.5):
""" Get lightcurve from fits file
Parameters
----------
objid : str
The object ID. E.g. objid='DDF_04_NONIa-0004_87287'
ptrobs_min : int
Min index of object in _PHOT.FITS.
ptrobs_max : int
Max index of object in _PHOT.FITS.
Return
-------
phot_out: pandas DataFrame
A DataFrame containing the MJD, FLT, FLUXCAL, FLUXCALERR, ZEROPT seperated by each filter.
E.g. Access the magnitude in the z filter with phot_out['z']['MAG'].
"""
phot_file = self.get_photfile_for_objid(objid)
try:
phot_HDU = afits.open(phot_file, memmap=True)
except Exception as e:
message = f'Could not open photometry file {phot_file}'
raise RuntimeError(message)
phot_data = phot_HDU[1].data[ptrobs_min - 1:ptrobs_max]
phot_dict = OrderedDict()
filters = list(set(phot_data['FLT'])) # e.g. ['i', 'r', 'Y', 'u', 'g', 'z']
dtypes = dict(self.phot_fields_dtypes)
for f in filters:
fIndexes = np.where(phot_data['FLT'] == f)[0]
phot_dict[f] = OrderedDict()
for pfield in self.phot_fields:
if pfield == 'ZEROPT':
phot_dict[f][pfield] = np.repeat(standard_zpt, len(fIndexes))
elif pfield == 'FLT':
true_zpt = phot_data['ZEROPT'][fIndexes]
nobs = len(true_zpt)
phot_dict[f][pfield] = np.repeat(f.strip(), nobs)
else:
phot_dict[f][pfield] = phot_data[pfield][fIndexes]
if not pfield in dtypes:
dtypes[pfield] = np.float64
phot_out = pd.DataFrame(phot_dict)
phot_HDU.close()
del phot_HDU[1].data
return phot_out
def get_light_curve_array(self, objid, ptrobs_min, ptrobs_max, standard_zpt=27.5):
""" Get lightcurve from fits file as an array - avoid some Pandas overhead
Parameters
----------
objid : str
The object ID. E.g. objid='DDF_04_NONIa-0004_87287'
ptrobs_min : int
Min index of object in _PHOT.FITS.
ptrobs_max : int
Max index of object in _PHOT.FITS.
Return
-------
phot_out: pandas DataFrame
A DataFrame containing the MJD, FLT, FLUXCAL, FLUXCALERR, ZEROPT seperated by each filter.
E.g. Access the magnitude in the z filter with phot_out['z']['MAG'].
"""
phot_file = self.get_photfile_for_objid(objid)
try:
phot_HDU = afits.open(phot_file, memmap=True)
except Exception as e:
message = f'Could not open photometry file {phot_file}'
raise RuntimeError(message)
phot_data = phot_HDU[1].data[ptrobs_min - 1:ptrobs_max]
phot_data = at.Table(phot_data)
return phot_data
@staticmethod
def convert_pandas_lc_to_recarray_lc(phot, passbands=('u', 'g', 'r', 'i', 'z', 'Y')):
"""
ANTARES_object not Pandas format broken up by passband
TODO: This is ugly - just have an option for get_lcs_data to return one or the other
"""
pbs = passbands
# name mapping for the defaults in phot_fields
# any other column is going to become just lowercase it's current name
name_map = {'FLUXCAL': 'flux', 'FLUXCALERR': 'dflux', 'ZEROPT': 'zpt',\
'FLT': 'pb', 'MJD': 'mjd', 'PHOTFLAG': 'photflag'}
out = None
out_names = None
for this_pb in phot:
# do we know what this passband is
# this is just a sanity test in case we accidentally retrieve dummy entries with passband = -9
if this_pb not in pbs:
continue
this_pb_lc = phot.get(this_pb)
if this_pb_lc is None:
continue
if out is None:
out = np.rec.fromarrays(list(this_pb_lc))
if out_names is None:
out_names = list(this_pb_lc.axes[0])
out_names = [name_map.get(x, x.lower()) for x in out_names]
else:
temp = np.rec.fromarrays(list(this_pb_lc))
out = np.concatenate((out, temp), axis=-1)
out.dtype.names = out_names
return out
@staticmethod
def get_sntypes():
return helpers.get_sntypes()
@staticmethod
def aggregate_sntypes(reverse=False):
return helpers.aggregate_sntypes(reverse=reverse)
def get_avail_sntypes(self):
""" Returns a list of the different transient classes in the database. """
sntypes = database.exec_sql_query("SELECT DISTINCT sntype FROM {};".format(self.data_release))
sntypes_map = self.get_sntypes()
return sorted([sntype[0] for sntype in sntypes]), sntypes_map
def get_lcs_headers(self, columns=None, field='%', model='%', base='%', snid='%', extrasql='', survey='LSST',
get_num_lightcurves=False, limit=None, shuffle=False, sort=True, offset=0, big=False):
""" Gets the header data given specific conditions.
Parameters
----------
columns : list
A list of strings of the names of the columns you want to retrieve from the database.
You must at least include ['objid', 'ptrobs_min', 'ptrobs_max'] at the beginning of the input list.
E.g. columns=['objid', 'ptrobs_min', 'ptrobs_max', 'sntype', 'peakmjd'].
field : str, optional
The field name. E.g. field='DDF' or field='WFD'. The default is '%' indicating that all fields will be included.
model : str, optional
The model number. E.g. model='04'. The default is '%' indicating that all model numbers will be included.
base : str, optional
The base name. E.g. base='NONIa'. The default is '%' indicating that all base names will be included.
snid : str, optional
The transient id. E.g. snid='87287'. The default is '%' indicating that all snids will be included.
get_num_lightcurves : boolean, optional
If this is True, then the return value is just a single iteration generator stating the number of
light curves that satisfied the given conditions.
limit : int, optional
Limit the results to this number (> 0)
shuffle : bool, optional
Randomize | |
open(pathstr + '/Diabetic_Retinopathy_transformed_numperclass_' + all_label_number_map_str\
+ choose_indices_str + '_seed_' + str(seed) + '.pkl','wb') as g:
pkl.dump((finaldata, finallabels), g)
def gen_kmeansplusplus_CelebAdataset(pathstr, inputfile, choose_indices = True,\
num_images_per_class=1000, num_classes=5, seed=0):
f = open(pathstr + '/' + inputfile, 'rb')
data, (indices, target) = pkl.load(f)
print("Starting Kmeans++")
finaldata = None
finallabels = None
choose_indices_str = None
if choose_indices:
finaldata, finallabels = choose_cluster_image_indices(data, target, indices, num_images_per_class, seed=seed, dimension=3)
choose_indices_str = '_indices_'
else:
finaldata, finallabels = choose_cluster_images_3D(data, target, num_images_per_class, seed=seed)
choose_indices_str = '_images_'
with open(pathstr + '/CelebA_numperclass_' + str(num_images_per_class) + '_numclasses_' +\
str(num_classes) + choose_indices_str + '_seed_' + str(seed) + '.pkl','wb') as f:
pkl.dump((finaldata, finallabels), f)
def transform_labels_2_zero_idx(inputfile):
f = open(inputfile, 'rb')
data, labels = pkl.load(f)
unique_labels = np.unique(np.array(labels))
map = {}
for i in range(unique_labels.shape[0]):
map[unique_labels[i]] = i
transformed_labels = torch.tensor([map[x] for x in np.array(labels)]).to(torch.float32)
outputfile = inputfile[:-4] + "_zero_index_labels.pkl"
g = open(outputfile, 'wb')
pkl.dump((data, transformed_labels), g)
g.close()
f.close()
def add_noise_and_save_again(pathstr, dataset, num_images_per_digit, num_digits, seed, mean, sigma, noise_to_all_channels=False):
torch.manual_seed(seed)
dtype = torch.double
if dataset == 'CelebA':
file_name = dataset + '_numperclass_' + str(num_images_per_digit) + '_numclasses_' +\
str(num_digits) + '_images_' '_seed_' + str(int(seed)) + '.pkl'
else:
file_name = dataset + '_numperdigit_' + str(num_images_per_digit) + '_numdigits_' +\
str(num_digits) + '_seed_' + str(seed) + '.pkl'
f = open(pathstr + '/' + file_name, 'rb')
finaldata, finallabels = pkl.load(f)
noise = np.zeros(finaldata.shape).astype('double')
batch_size = finaldata.shape[0]
num_channels = finaldata.shape[1]
########
# Imp!!!!
# Note: cv2.randn(x, mean, sigma) only fills first 2 dimensions of x with noise and not the third
# Hence, here were adding noise only in first 2 dimensions of the RGB image
# Ideally loop over the first dimension of RGB image and add noise to the rest of the 2 dimensions
#######
if noise_to_all_channels:
for batch_no in range(batch_size):
for channel in range(num_channels):
cv2.randn(noise[batch_no][channel], mean, sigma)
else:
for batch_no in range(batch_size):
cv2.randn(noise[batch_no], mean, sigma)
finalnoisydata = finaldata.clone() + torch.tensor(noise).to(dtype)
if dataset == 'CelebA':
finalnoisydata = torch.clamp(finalnoisydata, -1, 1).clone()
if noise_to_all_channels:
with open(pathstr + '/noisy_sigma_all_chan_' +str(sigma) + '_' + file_name, 'wb') as newf:
pkl.dump((finalnoisydata, finallabels), newf)
else:
with open(pathstr + '/noisy_sigma_' +str(sigma) + '_' + file_name, 'wb') as newf:
pkl.dump((finalnoisydata, finallabels), newf)
# with open(pathstr + '/noisy_sigma_' +str(sigma[0]) + '_' + file_name, 'wb') as newf:
# pkl.dump((finalnoisydata, finallabels), newf)
def semisupervised_Kmeans(datafile_path, distance_metric, num_clusters, log_final_results, supervision_level=0, seed=0):
dtype = torch.double
with open(datafile_path, 'rb') as f:
data, labels = pkl.load(f)
torch.manual_seed(seed)
unique_cluster_labels = np.unique(np.array(labels))
labels = labels.to(dtype)
num_images = data.shape[0]
clustering = (torch.zeros(num_images) - 1).to(torch.double)
cluster_centers = torch.zeros(num_clusters, data.shape[1], data.shape[2]).to(torch.double)
fixed_indices = torch.zeros(num_images).to(dtype)
if supervision_level > 0:
assert(supervision_level <= 1.0)
unique_cluster_labels = torch.tensor(np.unique(np.array(labels))).to(dtype)
assert(num_clusters == unique_cluster_labels.shape[0])
for i in range(unique_cluster_labels.shape[0]):
idx = labels == unique_cluster_labels[i]
temp_fixed_indices = torch.zeros(idx[idx == 1].shape[0])
num_images_to_select = int(temp_fixed_indices.shape[0] * supervision_level)
# temp_fixed_indices[torch.randperm(temp_fixed_indices.shape[0])[:num_images_to_select]] = 1
temp_fixed_indices[:num_images_to_select] = 1
idx[idx==1] = temp_fixed_indices.to(torch.bool)
fixed_indices += idx.to(dtype)
clustering[idx] = unique_cluster_labels[i]
cluster_centers[i] = torch.mean(data[idx], 0)
else:
assert(supervision_level == 0)
cluster_centers, dummy_cluster_center_labels = Kmeans_plus_plus(data, labels, num_clusters, distance_metric, seed)
# Kmeans algorithm
indices_to_update = (1 - fixed_indices).to(torch.long)
indices_to_update = indices_to_update == 1
finalvalues = data[indices_to_update]
old_clustering = clustering.clone()
temp_distance_arr = distance_metric(finalvalues.view(finalvalues.shape[0], 1, finalvalues.shape[1], finalvalues.shape[2]).repeat(1, num_clusters, 1, 1), cluster_centers.repeat(finalvalues.shape[0], 1, 1, 1), 2).to(dtype)
label_indices = torch.min(temp_distance_arr, 1)[1]
clustering[indices_to_update] = torch.tensor([unique_cluster_labels[j] for j in label_indices]).to(dtype)
while not torch.prod((old_clustering == clustering)):
old_clustering = clustering.clone()
# Update cluster centers
for i in range(num_clusters):
if torch.sum(clustering == unique_cluster_labels[i]) == 0:
print("cluster", i, ": No point in this cluster!")
continue
cluster_centers[i] = torch.mean(data[clustering == unique_cluster_labels[i]], 0)
# Update clustering
label_indices = torch.min(distance_metric(finalvalues.view(finalvalues.shape[0], 1, finalvalues.shape[1], finalvalues.shape[2]).repeat(1, num_clusters, 1, 1), cluster_centers.repeat(finalvalues.shape[0], 1, 1, 1), 2).to(dtype), 1)[1]
clustering[indices_to_update] = torch.tensor([unique_cluster_labels[j] for j in label_indices]).to(dtype)
nmi = NMI(np.array(labels), np.array(clustering))
ari = ARI(np.array(labels), np.array(clustering))
acc = ACC(np.array(labels.to(torch.long)), np.array(clustering.to(torch.long)))
print("NMI : ", nmi)
print("ARI : ", ari)
print("ACC : ", acc)
log_final_results.write("kmeans_metrics_NMI: " + str(nmi) + " \n")
log_final_results.write("kmeans_metrics_ARI: " + str(ari) + " \n")
log_final_results.write("kmeans_metrics_ACC: " + str(acc[0]) + " \n")
return indices_to_update, clustering.to(torch.long), nmi, ari, acc[0]
def semisupervised_Kmeans_3D(datafile_path, distance_metric, num_clusters, log_final_results, supervision_level=0, seed=0):
dtype = torch.double
with open(datafile_path, 'rb') as f:
data, labels = pkl.load(f)
torch.manual_seed(seed)
unique_cluster_labels = np.unique(np.array(labels))
labels = labels.to(dtype)
num_images = data.shape[0]
clustering = (torch.zeros(num_images) - 1).to(torch.double)
cluster_centers = torch.zeros(num_clusters, data.shape[1], data.shape[2], data.shape[3]).to(torch.double)
fixed_indices = torch.zeros(num_images).to(dtype)
if supervision_level > 0:
assert(supervision_level <= 1.0)
unique_cluster_labels = torch.tensor(np.unique(np.array(labels))).to(dtype)
assert(num_clusters == unique_cluster_labels.shape[0])
for i in range(unique_cluster_labels.shape[0]):
idx = labels == unique_cluster_labels[i]
temp_fixed_indices = torch.zeros(idx[idx == 1].shape[0])
num_images_to_select = int(temp_fixed_indices.shape[0] * supervision_level)
# temp_fixed_indices[torch.randperm(temp_fixed_indices.shape[0])[:num_images_to_select]] = 1
temp_fixed_indices[:num_images_to_select] = 1
idx[idx==1] = temp_fixed_indices.to(torch.bool)
fixed_indices += idx.to(dtype)
clustering[idx] = unique_cluster_labels[i]
cluster_centers[i] = torch.mean(data[idx], 0)
else:
assert(supervision_level == 0)
cluster_centers, dummy_cluster_center_labels = Kmeans_plus_plus_3D(data, labels, num_clusters, distance_metric, seed)
# Kmeans algorithm
indices_to_update = (1 - fixed_indices).to(torch.long)
indices_to_update = indices_to_update == 1
finalvalues = data[indices_to_update]
old_clustering = clustering.clone()
temp_distance_arr = distance_metric(finalvalues.view(finalvalues.shape[0], 1, finalvalues.shape[1], finalvalues.shape[2], finalvalues.shape[3]).repeat(1, num_clusters, 1, 1, 1), cluster_centers.repeat(finalvalues.shape[0], 1, 1, 1, 1), 3).to(dtype)
label_indices = torch.min(temp_distance_arr, 1)[1]
clustering[indices_to_update] = torch.tensor([unique_cluster_labels[j] for j in label_indices]).to(dtype)
while not torch.prod((old_clustering == clustering)):
old_clustering = clustering.clone()
# Update cluster centers
for i in range(num_clusters):
if torch.sum(clustering == unique_cluster_labels[i]) == 0:
print("cluster", i, ": No point in this cluster!")
continue
cluster_centers[i] = torch.mean(data[clustering == unique_cluster_labels[i]], 0)
# Update clustering
label_indices = torch.min(distance_metric(finalvalues.view(finalvalues.shape[0], 1, finalvalues.shape[1], finalvalues.shape[2], finalvalues.shape[3]).repeat(1, num_clusters, 1, 1, 1), cluster_centers.repeat(finalvalues.shape[0], 1, 1, 1, 1), 3).to(dtype), 1)[1]
clustering[indices_to_update] = torch.tensor([unique_cluster_labels[j] for j in label_indices]).to(dtype)
nmi = NMI(np.array(labels), np.array(clustering))
ari = ARI(np.array(labels), np.array(clustering))
acc = ACC(np.array(labels.to(torch.long)), np.array(clustering.to(torch.long)))
print("NMI : ", nmi)
print("ARI : ", ari)
print("ACC : ", acc)
log_final_results.write("kmeans_metrics_NMI: " + str(nmi) + " \n")
log_final_results.write("kmeans_metrics_ARI: " + str(ari) + " \n")
log_final_results.write("kmeans_metrics_ACC: " + str(acc[0]) + " \n")
return indices_to_update, clustering.to(torch.long), nmi, ari, acc[0]
def Kernel_Kmeans_plus_plus(data, labels, cluster_num, Kernel_distance_array, seed):
torch.manual_seed(seed)
dtype = torch.double
data = data.to(dtype)
unique_cluster_labels = torch.tensor(np.unique(np.array(labels))).to(dtype)
cumulative_prob = torch.cumsum(torch.ones(data.shape[0]) / data.shape[0], dim=0)
cluster_centers = torch.zeros(cluster_num).to(dtype)
cluster_center_labels = torch.zeros(cluster_num)
#distance array
self_distance = (torch.tensor([Kernel_distance_array[i][i] for i in range(data.shape[0])]).to(dtype)).view(data.shape[0], 1).repeat(1, data.shape[0])
pair_wise_distance = self_distance + torch.transpose(self_distance, 0, 1) - (2*Kernel_distance_array)
#first center
index = binarysearch(cumulative_prob, torch.rand(1))
cluster_centers[0] = index
cluster_center_labels[0] = labels[index]
distance_square_array = pair_wise_distance[index][:].clone().to(dtype)
#Kmeans++
for i in range(1, cluster_num):
#Next center
cumulative_prob = torch.cumsum(distance_square_array / torch.sum(distance_square_array), dim=0).to(dtype)
index = binarysearch(cumulative_prob, torch.rand(1).to(dtype))
cluster_centers[i] = index
cluster_center_labels[i] = labels[index]
#update distance matrix
torch.min(input = distance_square_array, other = pair_wise_distance[index][:].clone().to(dtype), out = distance_square_array)
clustering = unique_cluster_labels[torch.min(pair_wise_distance[:, cluster_centers.to(torch.long)], 1)[1]]
assert(clustering.shape[0]==data.shape[0])
return clustering, cluster_centers, cluster_center_labels
def semisupervised_Kernel_Kmeans(datafile_path, num_clusters, log_final_results, Kernel=RBF_Kernel, sigma=100, supervision_level=0, seed=0):
dtype = torch.double
with open(datafile_path, 'rb') as f:
data, labels = pkl.load(f)
torch.manual_seed(seed)
unique_cluster_labels = torch.tensor([i for i in range(num_clusters)]).to(dtype)
labels = labels.to(dtype)
num_images = data.shape[0]
# Precompute pairwise kernel function
Kernel_distance_array = torch.zeros([num_images, num_images]).to(dtype)
for i in range(num_images):
for j in range(num_images):
Kernel_distance_array[i][j] = Kernel(data[i], data[j], sigma, 2)
clustering = (torch.zeros(num_images) - 1).to(torch.double)
fixed_indices = torch.zeros(num_images).to(dtype)
indices_to_update = (1 - fixed_indices).to(torch.long)
indices_to_update = indices_to_update == 1
if supervision_level > 0:
assert(supervision_level <= 1.0)
unique_cluster_labels = torch.tensor(np.unique(np.array(labels))).to(dtype)
assert(num_clusters == unique_cluster_labels.shape[0])
for i in range(unique_cluster_labels.shape[0]):
idx = labels == unique_cluster_labels[i]
temp_fixed_indices = torch.zeros(idx[idx == 1].shape[0])
num_images_to_select = int(temp_fixed_indices.shape[0] * supervision_level)
# temp_fixed_indices[torch.randperm(temp_fixed_indices.shape[0])[:num_images_to_select]] = 1
temp_fixed_indices[:num_images_to_select] = 1
idx[idx==1] = temp_fixed_indices.to(torch.bool)
fixed_indices += idx.to(dtype)
clustering[idx] = unique_cluster_labels[i]
clustering = clustering.to(dtype)
old_clustering = clustering.clone()
indices_to_update = (1 - fixed_indices).to(torch.long)
indices_to_update = indices_to_update == 1
for i in range(num_images):
if clustering[i] != -1:
assert(indices_to_update[i] == 0)
continue
correct_cluster_label = None
min_distance_i_cluster_r = None
for r in range(unique_cluster_labels.shape[0]):
cluster_indices = old_clustering == unique_cluster_labels[r]
size_of_cluster = torch.sum(cluster_indices).to(dtype)
distance_i_cluster_r = Kernel_distance_array[i][i] + (-2.00 * torch.sum(Kernel_distance_array[i][cluster_indices]) / size_of_cluster) + (torch.sum(Kernel_distance_array[cluster_indices ,:][:, cluster_indices]) / (size_of_cluster**2))
if r == 0:
min_distance_i_cluster_r = distance_i_cluster_r
correct_cluster_label = unique_cluster_labels[r]
else:
if min_distance_i_cluster_r > distance_i_cluster_r:
min_distance_i_cluster_r = distance_i_cluster_r
correct_cluster_label = unique_cluster_labels[r]
clustering[i] = correct_cluster_label
else:
assert(supervision_level == 0)
old_clustering = clustering.clone()
clustering, _, _ = Kernel_Kmeans_plus_plus(data, labels, num_clusters, Kernel_distance_array, seed)
clustering = clustering.to(dtype)
unique_cluster_labels = np.unique(np.array(labels))
# Kmeans algorithm
while not torch.prod((old_clustering == clustering)):
old_clustering = clustering.clone()
# Update clustering
for i in range(num_images):
if indices_to_update[i] == 0:
continue
correct_cluster_label = None
min_distance_i_cluster_r = None
for r in range(unique_cluster_labels.shape[0]):
cluster_indices = old_clustering == unique_cluster_labels[r]
size_of_cluster = torch.sum(cluster_indices).to(dtype)
distance_i_cluster_r = Kernel_distance_array[i][i] + (-2.00 * torch.sum(Kernel_distance_array[i][cluster_indices]) / size_of_cluster) + (torch.sum(Kernel_distance_array[cluster_indices ,:][:, cluster_indices]) / (size_of_cluster**2))
if r == 0:
min_distance_i_cluster_r = distance_i_cluster_r
correct_cluster_label = unique_cluster_labels[r]
else:
if min_distance_i_cluster_r > distance_i_cluster_r:
min_distance_i_cluster_r = distance_i_cluster_r
correct_cluster_label = unique_cluster_labels[r]
clustering[i] = correct_cluster_label
nmi = NMI(np.array(labels), np.array(clustering))
ari = ARI(np.array(labels), np.array(clustering))
acc = ACC(np.array(labels.to(torch.long)), np.array(clustering.to(torch.long)))
print("NMI : ", nmi)
print("ARI : ", ari)
print("ACC : ", acc)
log_final_results.write("kernel_kmeans_metrics_NMI: " + str(nmi) + " \n")
log_final_results.write("kernel_kmeans_metrics_ARI: " + str(ari) + " \n")
log_final_results.write("kernel_kmeans_metrics_ACC: " + str(acc[0]) + " \n")
return clustering.to(torch.long), nmi, ari, acc[0]
# ################################################################
# ################################################################
def create_and_store_MNISTdataset_indexfile(pathstr, digits, img_size=64, num_images_per_digit=100, seed=0):
torch.manual_seed(seed)
transform = transforms.Compose([
transforms.Resize(img_size), #Used transforms.Resize() instead of transforms.Scale()
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])
])
dataMNIST = datasets.MNIST('data', train=True, download=True, transform=transform)
# Initializing dataset points
idx = dataMNIST.targets == digits[0]
target = dataMNIST.targets[idx]
data = dataMNIST.data[idx]
index = (idx.nonzero().reshape(-1))
for j in range(1, digits.shape[0]):
idx = dataMNIST.targets == digits[j]
target = torch.cat((target, dataMNIST.targets[idx]), 0)
data = torch.cat((data, dataMNIST.data[idx]), 0)
index = torch.cat((index, (idx.nonzero().reshape(-1))), 0)
finalindices, finallabels = choose_cluster_image_indices(data, target, index, num_images_per_digit, seed=seed, dimension=2)
# with open(pathstr + '/MNIST_' + str(num_images_per_digit) + '_' + str(unique_index) + '_seed_' + str(seed) + '.pkl','wb') as f:
# pkl.dump((finaldata, finallabels), f)
with open(pathstr + '/MNIST_indices_numperdigit_' + str(num_images_per_digit) + '_numdigits_' + str(digits.shape[0]) + '_seed_' + str(seed) + '.pkl','wb') as f:
pkl.dump((finalindices, finallabels), f)
def create_and_store_CIFARdataset_indexfile(pathstr, class_labels, img_size=32, num_images_per_digit=1000, seed=0, datapath=None):
torch.manual_seed(seed)
transform = transforms.Compose([
transforms.Resize(img_size), #Used transforms.Resize() instead of transforms.Scale()
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])
])
dataCIFAR = None
if datapath is None:
dataCIFAR = datasets.CIFAR10('data', train=True, download=True, transform=transform)
else:
dataCIFAR = datasets.CIFAR10(datapath, train=True, download=False, transform=transform)
originaltargets = torch.tensor(dataCIFAR.targets)
originaldata = torch.tensor(dataCIFAR.data)
# Initializing dataset points
idx = originaltargets == class_labels[0]
target = originaltargets[idx]
data = originaldata[idx]
index = (idx.nonzero().reshape(-1))
for j in range(1, class_labels.shape[0]):
idx = originaltargets == class_labels[j]
target = torch.cat((target, originaltargets[idx]), 0)
data = torch.cat((data, originaldata[idx]), 0)
index = torch.cat((index, (idx.nonzero().reshape(-1))), 0)
finalindices, finallabels = choose_cluster_image_indices(data, target, index, num_images_per_digit, seed=seed, dimension=3)
with open(pathstr + '/CIFAR10_indices_numperdigit_' + str(num_images_per_digit) + '_numdigits_' + str(class_labels.shape[0]) + '_seed_' + str(seed) + '.pkl','wb') as f:
pkl.dump((finalindices, finallabels), f)
def Kmeans_plus_plus_indices(data, labels, indices, cluster_num, sq_distance_function, seed, dimension):
torch.manual_seed(seed)
dtype = torch.double
data = data.to(dtype)
cumulative_prob = torch.cumsum(torch.ones(data.shape[0]) / data.shape[0], dim=0)
cluster_center_labels = torch.zeros(cluster_num)
cluster_center_indices = torch.zeros(cluster_num)
#first | |
# contains 1 or multiple text files
>>> dataset = ds.TextFileDataset(dataset_files=text_file_dataset_dir)
"""
@check_textfiledataset
def __init__(self, dataset_files, num_samples=None, num_parallel_workers=None, shuffle=Shuffle.GLOBAL,
num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_files = self._find_files(dataset_files)
self.dataset_files.sort()
def parse(self, children=None):
return cde.TextFileNode(self.dataset_files, self.num_samples, self.shuffle_flag, self.num_shards,
self.shard_id)
class UDPOSDataset(SourceDataset, TextBaseDataset):
"""
A source dataset that reads and parses UDPOS dataset.
The generated dataset has three columns: :py:obj:`[word, universal, stanford]`.
The tensor of column :py:obj:`word` is of the string type.
The tensor of column :py:obj:`universal` is of the string type.
The tensor of column :py:obj:`stanford` is of the string type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be 'train', 'test', 'valid' or 'all'. 'train' will read from
12,543 train samples, 'test' will read from 2,077 test samples, 'valid' will read from 2,002 test samples,
'all' will read from all 16,622 samples (default=None, all samples).
num_samples (int, optional): Number of samples (rows) to read (default=None, reads the full dataset).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If `dataset_dir` does not contain data files.
ValueError: If `num_parallel_workers` exceeds the max thread numbers.
RuntimeError: If `num_shards` is specified but `shard_id` is None.
RuntimeError: If `shard_id` is specified but `num_shards` is None.
Examples:
>>> udpos_dataset_dir = "/path/to/udpos_dataset_dir"
>>> dataset = ds.UDPOSDataset(dataset_dir=udpos_dataset_dir, usage='all')
"""
@check_udpos_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, shuffle=Shuffle.GLOBAL, num_shards=None,
shard_id=None, num_parallel_workers=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, 'all')
def parse(self, children=None):
return cde.UDPOSNode(self.dataset_dir, self.usage, self.num_samples, self.shuffle_flag, self.num_shards,
self.shard_id)
class WikiTextDataset(SourceDataset, TextBaseDataset):
"""
A source dataset that reads and parses WikiText2 and WikiText103 datasets.
The generated dataset has one column :py:obj:`[text]`.
The tensor of column :py:obj:`text` is of the string type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Acceptable usages include 'train', 'test', 'valid' and 'all' (default=None, all samples).
num_samples (int, optional): Number of samples (rows) to read (default=None, reads the full dataset).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, 'num_samples' reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Examples:
>>> wiki_text_dataset_dir = "/path/to/wiki_text_dataset_directory"
>>> dataset = ds.WikiTextDataset(dataset_dir=wiki_text_dataset_dir, usage='all')
About WikiTextDataset dataset:
The WikiText Long Term Dependency Language Modeling Dataset is an English lexicon containing 100 million words.
These terms are drawn from Wikipedia's premium and benchmark articles, including versions of Wikitext2 and
Wikitext103. For WikiText2, it has 36718 lines in wiki.train.tokens, 4358 lines in wiki.test.tokens and
3760 lines in wiki.valid.tokens. For WikiText103, it has 1801350 lines in wiki.train.tokens, 4358 lines in
wiki.test.tokens and 3760 lines in wiki.valid.tokens.
Here is the original WikiText dataset structure.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── WikiText2/WikiText103
├── wiki.train.tokens
├── wiki.test.tokens
├── wiki.valid.tokens
Citation:
.. code-block::
@article{merity2016pointer,
title={Pointer sentinel mixture models},
author={<NAME> Xiong, <NAME> <NAME> and <NAME>},
journal={arXiv preprint arXiv:1609.07843},
year={2016}
}
"""
@check_wiki_text_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=Shuffle.GLOBAL,
num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.WikiTextNode(self.dataset_dir, self.usage, self.num_samples, self.shuffle_flag, self.num_shards,
self.shard_id)
class YahooAnswersDataset(SourceDataset, TextBaseDataset):
"""
A source dataset that reads and parses the YahooAnswers dataset.
The generated dataset has three columns :py:obj:`[class, title, content, answer]`.
The tensor of column :py:obj:`class` is of the string type.
The tensor of column :py:obj:`title` is of the string type.
The tensor of column :py:obj:`content` is of the string type.
The tensor of column :py:obj:`answer` is of the string type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be 'train', 'test' or 'all'. 'train' will read
from 1,400,000 train samples, 'test' will read from 60,000 test samples, 'all' will read from
all 1,460,000 samples (default=None, all samples).
num_samples (int, optional): The number of samples to be included in the dataset
(default=None, will include all text).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing
(default=None, which means no cache is used).
Raises:
RuntimeError: If `dataset_dir` does not contain data files.
ValueError: If `num_parallel_workers` exceeds the max thread numbers.
RuntimeError: If `num_shards` is specified but `shard_id` is None.
RuntimeError: If `shard_id` is specified but `num_shards` is None.
ValueError: If `shard_id` is invalid (< 0 or >= `num_shards`).
Examples:
>>> yahoo_answers_dataset_dir = "/path/to/yahoo_answers_dataset_directory"
>>>
>>> # 1) Read 3 samples from YahooAnswers dataset
>>> dataset = ds.YahooAnswersDataset(dataset_dir=yahoo_answers_dataset_dir, num_samples=3)
>>>
>>> # 2) Read train samples from YahooAnswers dataset
>>> dataset = ds.YahooAnswersDataset(dataset_dir=yahoo_answers_dataset_dir, usage="train")
About YahooAnswers dataset:
The YahooAnswers dataset consists of 630,000 text samples in 14 classes,
There are 560,000 samples in the train.csv and 70,000 samples in the test.csv.
The 10 different classes represent Society & Culture, Science & Mathematics, Health, Education & Reference,
Computers & Internet, Sports, Business & Finance, Entertainment & Music, Family & Relationships,
Politics & Government.
Here is the original YahooAnswers dataset structure.
You can unzip the dataset files into this directory structure and read by Mindspore's API.
.. code-block::
.
└── yahoo_answers_dataset_dir
├── train.csv
├── test.csv
├── classes.txt
└── readme.txt
.. code-block::
@article{YahooAnswers,
title = {Yahoo! Answers Topic Classification Dataset},
author = {<NAME>},
year = {2015},
howpublished = {}
}
"""
@check_yahoo_answers_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=Shuffle.GLOBAL,
num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.YahooAnswersNode(self.dataset_dir, self.usage, self.num_samples, self.shuffle_flag,
self.num_shards, self.shard_id)
class YelpReviewDataset(SourceDataset, TextBaseDataset):
"""
A source dataset that reads and parses Yelp Review Polarity and Yelp Review Full dataset.
The generated dataset has | |
c_dgcdist(rlat1,rlon1,rlat2,rlon2,2)
################################################################
def gc_interp(rlat1,rlon1,rlat2,rlon2,numi):
"""
Interpolates points along a great circle between two specified points
on the globe. The returned latitudes and longitudes are returned as
NumPy arrays in degrees in the interval [0.,360) if npts is positive
and in the interval [-180.,180.) if npts is negative.
lat,lon = Ngl.gc_interp(lat1, lon1, lat2, lon2, npts)
lat1, lon1 -- Latitude and longitude, in degrees, of the first point
on the globe.
lat2, lon2 -- Latitude and longitude, in degrees, of second point on
the globe.
npts -- The number of equally-spaced points you want to interpolate to.
"""
num = abs(numi)
if (abs(num) < 2):
print("gc_interp: the number of points must be at least two.")
elif (num == 2):
lat = numpy.array([rlat1,rlat2],'f')
lon = numpy.array([rlon1,rlon2],'f')
return [lat,lon]
else:
lat_tmp = numpy.zeros(num,'f')
lon_tmp = numpy.zeros(num,'f')
lat,lon = mapgci(rlat1,rlon1,rlat2,rlon2,num-2)
lon0_tmp = rlon1
lon1_tmp = rlon2
#
# Adjust points to be in the desired range.
#
for i in range(0,num-2):
if (numi > 0):
lon[i] = normalize_angle(lon[i],0)
else:
lon[i] = normalize_angle(lon[i],1)
if (numi > 0):
lon0_tmp = normalize_angle(lon0_tmp,0)
lon1_tmp = normalize_angle(lon1_tmp,0)
else:
lon0_tmp = normalize_angle(lon0_tmp,1)
lon1_tmp = normalize_angle(lon1_tmp,1)
#
# Set up return arrays.
#
lat_tmp[1:num-1] = lat[0:num-2]
lon_tmp[1:num-1] = lon[0:num-2]
lat_tmp[0] = rlat1
lat_tmp[num-1] = rlat2
lon_tmp[0] = lon0_tmp
lon_tmp[num-1] = lon1_tmp
del lat,lon
return lat_tmp,lon_tmp
################################################################
def gc_inout(plat,plon,lat,lon):
"""
Determines if a set of lat/lon points are inside or outside of a spherical polygon.
inout = Ngl.gc_inout(plat, plon, lat, lon)
plat, plon -- Latitude and longitude, in degrees, of a set
of points on the globe.
lat, lon -- Latitude and longitude, in degrees, of the vertices
of a spherical polygon.
"""
#
# Promote plat and plon to numpy arrays that have at least a dimension of 1.
#
plat2 = _promote_scalar(plat)
plon2 = _promote_scalar(plon)
return(fplib.gc_inout(plat2,plon2,lat,lon))
################################################################
def gc_tarea(lat1, lon1, lat2, lon2, lat3, lon3, radius=1.):
"""
Finds the area of a triangular patch on a sphere whose vertices
are given in degrees as lat/lon pairs.
area = Ngl.gc_tarea(lat1, lon1, lat2, lon2, lat3, lon3, radius=1.)
lat1, lon1 -- Latitude and longitude, in degrees, of the first vertex.
These can be scalars, lists, or NumPy arrays.
lat2, lon2 -- Latitude and longitude, in degrees, of the second vertex.
These can be scalars, lists, or NumPy arrays.
lat3, lon3 -- Latitude and longitude, in degrees, of the third vertex.
These can be scalars, lists, or NumPy arrays.
radius -- An optional argument specifying the radius of the sphere.
The returned object is a scalar if the arguments are scalars,
or a NumPy array of the same size as the input arrays otherwise.
Any area returned is that bounded by the arcs of great circles
connecting the vertices.
"""
lat1t = numpy.atleast_1d(numpy.array(lat1)).astype(float)
lon1t = numpy.atleast_1d(numpy.array(lon1)).astype(float)
lat2t = numpy.atleast_1d(numpy.array(lat2)).astype(float)
lon2t = numpy.atleast_1d(numpy.array(lon2)).astype(float)
lat3t = numpy.atleast_1d(numpy.array(lat3)).astype(float)
lon3t = numpy.atleast_1d(numpy.array(lon3)).astype(float)
rtn = numpy.zeros(len(lat1t),'f')
pi = 4.*math.atan(1.)
d2r = pi/180.
tol = 1.e-7
for i in range(len(lat1t)):
a = d2r*gc_dist(lat1t[i], lon1t[i], lat2t[i], lon2t[i])
b = d2r*gc_dist(lat2t[i], lon2t[i], lat3t[i], lon3t[i])
c = d2r*gc_dist(lat3t[i], lon3t[i], lat1t[i], lon1t[i])
sa, sb, sc = math.sin(a), math.sin(b), math.sin(c)
if (abs(sa) < tol or abs(sb) < tol or abs(sc) < tol):
print("gc_tarea: input vertices must be distinct and not be polar opposites.")
sys.exit()
ca, cb, cc = math.cos(a), math.cos(b), math.cos(c)
sang1 = math.acos( (ca-cb*cc)/(sb*sc) )
sang2 = math.acos( (cb-ca*cc)/(sa*sc) )
sang3 = math.acos( (cc-ca*cb)/(sa*sb) )
rtn[i] = radius*radius*(sang1 + sang2 + sang3 - pi)
del lat1t,lon1t,lat2t,lon2t,lat3t,lon3t,a,b,c,sa,sb,sc,ca,cb,cc, \
sang1,sang2,sang3,tol
if (_is_scalar(lat1)):
return rtn[0]
else:
return rtn
################################################################
def gc_qarea(lat1, lon1, lat2, lon2, lat3, lon3, lat4, lon4, radius=1.):
"""
Finds the area of a convex quadrilateral patch on a sphere whose vertices
are given in degrees as lat/lon pairs.
area = Ngl.gc_qarea(lat1, lon1, lat2, lon2, lat3, lon3, radius=1.)
lat1, lon1 -- Latitude and longitude, in degrees, of the first vertex.
These can be scalars, lists, or NumPy arrays.
lat2, lon2 -- Latitude and longitude, in degrees, of the second vertex.
These can be scalars, lists, or NumPy arrays.
lat3, lon3 -- Latitude and longitude, in degrees, of the third vertex.
These can be scalars, lists, or NumPy arrays.
lat4, lon4 -- Latitude and longitude, in degrees, of the fourth vertex.
These can be scalars, lists, or NumPy arrays.
radius -- An optional argument specifying the radius of the sphere.
The returned spherical area is a scalar if the arguments are scalars
or a NumPy array of the same size as the input arrays otherwise.
The vertices must be entered in either clockwise or counter-clockwise order.
A returned area is that bounded by arcs of great circles connecting
the vertices.
"""
return gc_tarea(lat1, lon1, lat2, lon2, lat3, lon3, radius=radius) + \
gc_tarea(lat1, lon1, lat3, lon3, lat4, lon4, radius=radius)
################################################################
def generate_2d_array(dims, num_low, num_high, minv, maxv, seed=0, \
highs_at=None, lows_at=None):
"""
Generates smooth 2D arrays primarily for use in examples.
array = generate_2d_array(dims, num_low, num_high, minv, maxv, seed=0,
highs_at=None, lows_at=None)
dims -- a list (or array) containing the dimensions of the
two-dimensional array to be returned.
num_low, num_high -- Integers representing the approximate minimum
and maximum number of highs and lows that the
output array will have. They must be in the
range 1 to 25. If not, then they will be set to
either 1 or 25.
minv, maxv -- The exact minimum and maximum values that the output array
will have.
iseed -- an optional argument specifying a seed for the random number
generator. If iseed is outside the range 0 to 99, it will
be set to 0.
lows_at -- an optional argument that is a list of coordinate
pairs specifying where the lows will occur. If this
argument appears, then its length must equal num_low and
the coordinates must be in the ranges specified in dims.
highs_at -- an optional argument that is a list of coordinate
pairs specifying where the highs will occur. If this
argument appears, then its length must equal num_high and
the coordinates must be in the ranges specified in dims.
"""
#
# Globals for random numbers.
#
global dfran_iseq
dfran_iseq = seed
#
# Check arguments.
#
try:
alen = len(dims)
except:
print("generate_2d_array: first argument must be a list, tuple, or array having two elements specifying the dimensions of the output array.")
return None
if (alen != 2):
print("generate_2d_array: first argument must have two elements specifying the dimensions of the output array.")
return None
if (int(dims[0]) <=1 and int(dims[1]) <=1):
print("generate_2d_array: array must have at least two elements.")
return None
if (num_low < 1):
print("generate_2d_array: number of lows must be at least 1 - defaulting to 1.")
num_low = 1
if (num_low > 25):
print("generate_2d_array: number of lows must be at most 25 - defaulting to 25.")
num_high =25
if (num_high < 1):
print("generate_2d_array: number of highs must be at least 1 - defaulting to 1.")
num_high = 1
if (num_high > 25):
print("generate_2d_array: number of highs must be at most 25 - defaulting to 25.")
num_high =25
if (seed > 100 or seed < 0):
print("generate_2d_array: seed must be in the interval [0,100] - seed set to 0.")
seed = 0
if not lows_at is None:
if (len(lows_at) != num_low):
print("generate_2d_array: the list of positions for the lows must be the same size as num_low.")
if not highs_at is None:
if (len(highs_at) != num_high):
print("generate_2d_array: the list of positions for the highs must be the same size as num_high.")
#
# Dims are reversed in order to get the same results as the NCL function.
#
nx = int(dims[1])
ny = int(dims[0])
out_array = numpy.zeros([nx,ny],'f')
tmp_array = numpy.zeros([3,51],'f')
fovm = 9./float(nx)
fovn = 9./float(ny)
nlow = max(1,min(25,num_low))
nhgh = max(1,min(25,num_high))
ncnt = nlow + nhgh
#
for k in range(num_low):
if not lows_at is None:
tmp_array[0,k] = float(lows_at[k][1]) # lows at specified locations.
tmp_array[1,k] = float(lows_at[k][0])
tmp_array[2,k] = -1.
else:
tmp_array[0,k] = 1.+(float(nx)-1.)*_dfran() # lows at random locations.
tmp_array[1,k] = 1.+(float(ny)-1.)*_dfran() # lows at random locations.
tmp_array[2,k] = -1.
for k in range(num_low,num_low+num_high):
if not highs_at is None:
tmp_array[0,k] = float(highs_at[k-num_low][1]) # highs locations
tmp_array[1,k] = float(highs_at[k-num_low][0]) # highs locations
tmp_array[2,k] = 1.
else:
tmp_array[0,k] = 1.+(float(nx)-1.)*_dfran() # highs at random locations.
tmp_array[1,k] = 1.+(float(ny)-1.)*_dfran() # highs at random locations.
tmp_array[2,k] = | |
start)
return prices, rets
def simulate_gbm_from_prices(n_years=10, n_scenarios=20, mu=0.07, sigma=0.15, periods_per_year=12, start=100.0):
'''
Evolution of an initial stock price using Geometric Brownian Model:
S_t = S_0 exp( (mu-sigma^2/2)*dt + sigma*sqrt(dt)*xi ),
where xi are normal random variable N(0,1).
The equation for (log-)returns above is used to generate the prices and then log-returns are
computed by definition of log(S_{t+dt}/S_t).
Note that default periods_per_year=12 means that the method generates monthly prices (and returns):
change to 52 or 252 for weekly or daily prices and returns, respectively.
The method returns a dataframe of prices and the dataframe of returns.
'''
dt = 1 / periods_per_year
n_steps = int(n_years * periods_per_year)
# from GBM equation for log-prices:
prices_dt = np.exp( np.random.normal(loc=(mu - 0.5*sigma**2)*dt, scale=sigma*(dt**(0.5)), size=(n_steps, n_scenarios)) )
# equivalent (but faster) to:
# prices_dt = np.exp( (mu - 0.5*sigma**2)*dt + sigma*np.random.normal(loc=0, scale=(dt)**(0.5), size=(n_steps, n_scenarios)) )
prices = start * pd.DataFrame(prices_dt).cumprod()
prices = insert_first_row_df(prices, start)
# compute log-returns from generated prices
rets = compute_logreturns(prices).dropna()
return prices, rets
def show_gbm(n_years=10, n_scenarios=10, mu=0.05, sigma=0.15, periods_per_year=12, start=100):
'''
Plot the evolution of prices genrated by a GBM.
The method simply calls the *simulate_gbm_from_returns* function and plot the genrated prices.
This method is designed to be used together with the *interact* method form *ipywidgets*.
'''
prices, rets = simulate_gbm_from_returns(n_years=n_years, n_scenarios=n_scenarios,
mu=mu, sigma=sigma, periods_per_year=periods_per_year, start=start)
ax = prices.plot(figsize=(12,5), grid=True, legend=False, color="sandybrown", alpha=0.7, linewidth=2)
ax.axhline(y=start, ls=":", color="black")
if periods_per_year == 12:
xlab = "months"
elif periods_per_year == 52:
xlab = "weeks"
elif periods_per_year == 252:
xlab = "days"
ax.set_xlabel(xlab)
ax.set_ylabel("price")
ax.set_title("Prices generated by GBM")
def show_cppi(n_years=10, n_scenarios=50, m=3, floor=0, mu=0.04, sigma=0.15,
risk_free_rate=0.03, periods_per_year=12, start=100, ymax=100):
'''
CPPI simulation using Brownian Motion generated returns with mean mu and std sigma.
The method will plot the simulated CPPI wealths as well as an histogram of the
CPPI wealths at the end of the given period (n_year).
'''
# generate returs using geometric brownian motions
_, risky_rets = simulate_gbm_from_returns(n_years=n_years, n_scenarios=n_scenarios, mu=mu, sigma=sigma,
periods_per_year=periods_per_year, start=start)
# run the CPPI strategy with fixed floor (i.e., with no drawdown constraint)
cppiw = cppi(risky_rets, start_value=start, floor=floor, m=m, drawdown=None,
risk_free_rate=risk_free_rate, periods_per_year=periods_per_year )["CPPI wealth"]
# make sure that start price is included
cols = [i for i in range(0,cppiw.shape[1])]
row = {}
for col in cols:
row[col] = start
cppiw = insert_first_row_df(cppiw, row)
# Plot parameters
fig, (wealth_ax, hist_ax) = plt.subplots(figsize=(20,7), nrows=1,ncols=2,sharey=True, gridspec_kw={"width_ratios":[3,2]} )
plt.subplots_adjust(wspace=0.005)
simclr = "sandybrown"
floorclr = "red"
startclr = "black"
ymax = (cppiw.values.max() - start)/100*ymax + start
# Plot the random walks
cppiw.plot(ax=wealth_ax, grid=True, legend=False, color=simclr, alpha=0.5, linewidth=2)
wealth_ax.axhline(y=start, ls=":", color=startclr)
wealth_ax.axhline(y=start*floor, ls=":", color=floorclr, linewidth=2)
if periods_per_year == 12:
xlab = "months"
elif periods_per_year == 52:
xlab = "weeks"
elif periods_per_year == 252:
xlab = "days"
wealth_ax.set_xlabel(xlab)
wealth_ax.set_ylim(top=ymax)
wealth_ax.set_title("CPPI wealths due to brownian motion generated returns", fontsize=14)
# Plot the histogram
violations_per_scenarios = (cppiw < start*floor).sum() # number of CPPI wealth violations of the floor per each scenario
total_violations = violations_per_scenarios.sum() # overall number of CPPI wealth violations during the entire period
terminal_wealth = cppiw.iloc[-1] # CPPI wealth at the end of the period
tw_mean = terminal_wealth.mean()
tw_median = terminal_wealth.median()
failure_mask = np.less(terminal_wealth, start*floor)
n_failures = failure_mask.sum()
p_fail = n_failures / n_scenarios
e_shorfall = np.dot(terminal_wealth - start*floor,failure_mask) / n_failures if n_failures > 0.0 else 0.0
terminal_wealth.hist(grid=False, ax=hist_ax, bins=50, ec="white", fc=simclr, orientation="horizontal")
hist_ax.axhline(y=start, ls=":", color=startclr)
hist_ax.axhline(y=start*floor, ls=":", color=floorclr, linewidth=2)
hist_ax.axhline(y=tw_mean, ls=":", color=simclr)
hist_ax.axhline(y=tw_median, ls=":", color=simclr)
hist_ax.annotate("Mean: ${:.2f}".format(tw_mean), xy=(0.5, 0.9), xycoords="axes fraction", fontsize=15)
hist_ax.annotate("Median: ${:.2f}".format(tw_mean), xy=(0.5, 0.85), xycoords="axes fraction", fontsize=15)
if floor > 0.0:
hist_ax.annotate("Violations (overall): {}".format(total_violations), xy=(0.5, 0.75), xycoords="axes fraction", fontsize=15)
hist_ax.annotate("Violations (end period): {} ({:.1f}%)".format(n_failures, p_fail*100), xy=(0.5, 0.7), xycoords="axes fraction", fontsize=15)
hist_ax.annotate("E(shortfall) (end period): ${:.2f}".format(e_shorfall), xy=(0.5, 0.65), xycoords="axes fraction", fontsize=15)
hist_ax.set_title("Histogram of the CPPI wealth at the end of the period", fontsize=14)
# ---------------------------------------------------------------------------------
# Securities
# ---------------------------------------------------------------------------------
def discount(t, r):
'''
Compute the price of a pure discount bond that pays 1 at time t (year),
given an interest rate (return) r. That is, considering FV = 1 at time t,
want to obtain the PV given r, i.e., PV = FV/(1+r)^t = 1/(1+r)^t.
Note that t has to be a pd.Series of times.
'''
if not isinstance(t, pd.Series):
t = pd.Series(t)
if not isinstance(r, list):
r = [r]
ds = pd.DataFrame( [1/(1+rate)**(t) for rate in r] ).T
ds.index = t
return ds
def present_value(L, r):
'''
Computes the (cumulative) present value PV of a DataFrame
of liabilities L at a given interest rate r.
Liabilities L has to be a pd.DataFrame
'''
if not isinstance(L, pd.DataFrame):
raise TypeError("Expected pd.DataFrame")
dates = pd.Series(L.index)
ds = discount(dates, r) # this is the series of present values of future cashflows
return (ds * L).sum()
def funding_ratio(asset_value, liabilities, r):
'''
Computes the funding ratio between the value of holding assets and the present
value of the liabilities given an interest rate r (or a list of)
'''
return asset_value / present_value(liabilities, r)
def compounding_rate(r, periods_per_year=None):
'''
Given a nominal rate r, it returns the continuously compounded rate R = e^r - 1 if periods_per_year is None.
If periods_per_year is not None, then returns the discrete compounded rate R = (1+r/N)**N-1.
'''
if periods_per_year is None:
return np.exp(r) - 1
else:
return (1 + r/periods_per_year)**periods_per_year - 1
def compounding_rate_inv(R, periods_per_year=None):
'''
Given a compounded rate, it returns the nominal rate from continuously compounding
r = log(1+R) if periods_per_year is None.
If periods_per_year is not None, then returns the nominal rate from discrete
compounding r = N*((1+R)^1/N-1).
'''
if periods_per_year is None:
return np.log(1+R)
else:
return periods_per_year * ( (1+R)**(1/periods_per_year) - 1 )
def simulate_cir(n_years=10, n_scenarios=10, a=0.05, b=0.03, sigma=0.05, periods_per_year=12, r0=None):
'''
Evolution of (instantaneous) interest rates and corresponding zero-coupon bond using the CIR model:
dr_t = a*(b-r_t) + sigma*sqrt(r_t)*xi,
where xi are normal random variable N(0,1).
The analytical solution for the zero-coupon bond price is also computed.
The method returns a dataframe of interest rate and zero-coupon bond prices
'''
if r0 is None:
# Assign the long-term mean interest rate as initial rate
r0 = b
# Compute the price of a ZCB
def zcbprice(ttm,r,h):
A = ( ( 2*h*np.exp(0.5*(a+h)*ttm) ) / ( 2*h + (a+h)*(np.exp(h*ttm)-1) ) )**(2*a*b/(sigma**2))
B = ( 2*(np.exp(h*ttm)-1) ) / ( 2*h + (a+h)*(np.exp(h*ttm)-1) )
return A * np.exp(-B * r)
dt = 1 / periods_per_year
n_steps = int(n_years * periods_per_year) + 1
# get the nominal (instantaneous) rate
r0 = compounding_rate_inv(r0)
# the schock is sqrt(dt)*xi_t, with xi_t being standard normal r.v.
shock = np.random.normal(loc=0, scale=(dt)**(0.5), size=(n_steps, n_scenarios))
# Rates initialization
rates = np.zeros_like(shock)
rates[0] = r0
# Price initialization and parameters
zcb_prices = np.zeros_like(shock)
h = np.sqrt(a**2 + 2*sigma**2)
zcb_prices[0] = zcbprice(n_years,r0,h)
for step in range(1,n_steps):
# previous interest rate
r_t = rates[step-1]
# Current (updated) interest rate: CIR equation
rates[step] = r_t + a*(b - r_t) + sigma*np.sqrt(r_t)*shock[step]
# Current (updated) ZCB price
zcb_prices[step] = zcbprice(n_years - dt*step, r_t, h)
# the rates generated (according to the periods_per_year) are transformed back to annual rates
rates = pd.DataFrame( compounding_rate(rates) )
zcb_prices = pd.DataFrame( zcb_prices )
return rates, zcb_prices
def bond_cash_flows(principal=100, maturity=10, coupon_rate=0.03, coupons_per_year=2):
'''
Generates a pd.Series of cash flows of a regular bond. Note that:
'''
# total number of coupons
n_coupons = round(maturity * coupons_per_year)
# coupon amount
coupon_amount = (coupon_rate / coupons_per_year) * principal
# Cash flows
cash_flows = pd.DataFrame(coupon_amount, index = np.arange(1,n_coupons+1), columns=[0])
cash_flows.iloc[-1] = cash_flows.iloc[-1] + principal
return cash_flows
def bond_price(principal=100, maturity=10, coupon_rate=0.02, coupons_per_year=2, ytm=0.03, cf=None):
'''
Return the price of regular coupon-bearing bonds
Note that:
- the maturity is intended as an annual variable (e.g., for | |
* I1Ii111 * I11i - I1ii11iIi11i + I1Ii111
if 50 - 50: OoooooooOO * II111iiii
if 7 - 7: ooOoO0o / I11i * iII111i
if 17 - 17: O0 % I1Ii111
if 28 - 28: i1IIi * ooOoO0o
lisp_remove_eid_from_map_notify_queue ( oOoOOo . eid_list )
if ( lisp_map_notify_queue . has_key ( ii1i1I1111ii ) ) :
oOoOOo = lisp_map_notify_queue [ ii1i1I1111ii ]
lprint ( "Map-Notify with nonce 0x{} pending for ITR {}" . format ( oOoOOo . nonce , red ( xtr . print_address_no_iid ( ) , False ) ) )
if 14 - 14: II111iiii + II111iiii - I11i / I11i . OoOoOO00 + OoO0O00
return
if 92 - 92: II111iiii - II111iiii % IiII
if 48 - 48: oO0o / II111iiii + oO0o
if 16 - 16: o0oOOo0O0Ooo % II111iiii - i11iIiiIii - IiII + O0 - i11iIiiIii
if 58 - 58: OoooooooOO / I1ii11iIi11i - Oo0Ooo / II111iiii
if 13 - 13: o0oOOo0O0Ooo + OoOoOO00 * ooOoO0o % IiII
lisp_map_notify_queue [ ii1i1I1111ii ] = oOoOOo
if 18 - 18: I1IiiI . I1ii11iIi11i + Oo0Ooo - iII111i
if 53 - 53: ooOoO0o / IiII
if 36 - 36: iIii1I11I1II1
if 78 - 78: II111iiii * I11i
iIIi1IIiiI1 = site_eid . rtrs_in_rloc_set ( )
if ( iIIi1IIiiI1 ) :
if ( site_eid . is_rtr_in_rloc_set ( xtr ) ) : iIIi1IIiiI1 = False
if 66 - 66: i11iIiiIii / OoOoOO00
if 100 - 100: o0oOOo0O0Ooo . iIii1I11I1II1 . Oo0Ooo . O0 - OOooOOo
if 8 - 8: ooOoO0o % o0oOOo0O0Ooo
if 22 - 22: O0 * IiII . OoO0O00
if 63 - 63: oO0o % Oo0Ooo * OoO0O00 / II111iiii / Ii1I - ooOoO0o
iiI = lisp_eid_record ( )
iiI . record_ttl = 1440
iiI . eid . copy_address ( site_eid . eid )
iiI . group . copy_address ( site_eid . group )
iiI . rloc_count = 0
for iIII in site_eid . registered_rlocs :
if ( iIIi1IIiiI1 ^ iIII . is_rtr ( ) ) : continue
iiI . rloc_count += 1
if 14 - 14: ooOoO0o . o0oOOo0O0Ooo + II111iiii
IIii1i = iiI . encode ( )
if 50 - 50: Ii1I - i1IIi * oO0o
if 52 - 52: I11i / oO0o - oO0o
if 84 - 84: iIii1I11I1II1 - o0oOOo0O0Ooo
if 37 - 37: iII111i * o0oOOo0O0Ooo
oOoOOo . print_notify ( )
iiI . print_record ( " " , False )
if 23 - 23: ooOoO0o + OoooooooOO * iII111i . I11i
if 2 - 2: iIii1I11I1II1 * I1ii11iIi11i - OoooooooOO
if 93 - 93: iII111i % ooOoO0o * Oo0Ooo
if 34 - 34: O0 * oO0o
for iIII in site_eid . registered_rlocs :
if ( iIIi1IIiiI1 ^ iIII . is_rtr ( ) ) : continue
iIii1IiIiI = lisp_rloc_record ( )
iIii1IiIiI . store_rloc_entry ( iIII )
IIii1i += iIii1IiIiI . encode ( )
iIii1IiIiI . print_record ( " " )
if 58 - 58: OOooOOo . iII111i - Oo0Ooo / iII111i . I11i
if 86 - 86: iIii1I11I1II1 - iII111i % Ii1I
if 18 - 18: oO0o / IiII - OOooOOo % Ii1I
if 88 - 88: i11iIiiIii
if 13 - 13: I1IiiI
IIii1i = oOoOOo . encode ( IIii1i , "" )
if ( IIii1i == None ) : return
if 52 - 52: Ii1I * oO0o / I1Ii111 . IiII
if 84 - 84: OoooooooOO - oO0o - I1Ii111
if 69 - 69: OoOoOO00 * Ii1I % OoooooooOO % OOooOOo * OoOoOO00
if 20 - 20: IiII
lisp_send_map_notify ( lisp_sockets , IIii1i , xtr , LISP_CTRL_PORT )
if 17 - 17: o0oOOo0O0Ooo % iIii1I11I1II1
if 66 - 66: OoooooooOO + IiII . II111iiii
if 66 - 66: iIii1I11I1II1 % I11i
if 38 - 38: I1ii11iIi11i * ooOoO0o
oOoOOo . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ oOoOOo ] )
oOoOOo . retransmit_timer . start ( )
return
if 77 - 77: OOooOOo - i11iIiiIii - I1ii11iIi11i
if 94 - 94: OoO0O00 % iII111i - I1Ii111 + OoO0O00 - I1IiiI
if 65 - 65: OOooOOo
if 90 - 90: O0
if 91 - 91: O0 * OoOoOO00 - OoOoOO00 * II111iiii - iII111i
if 38 - 38: oO0o * I11i % OOooOOo
if 80 - 80: O0 % II111iiii / O0 . Oo0Ooo * OoOoOO00 + OOooOOo
def lisp_queue_multicast_map_notify ( lisp_sockets , rle_list ) :
i11IIiiII = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 31 - 31: OoO0O00 + i11iIiiIii / I11i % O0 / Ii1I
for iiI1 in rle_list :
oo0O0000O0 = lisp_site_eid_lookup ( iiI1 [ 0 ] , iiI1 [ 1 ] , True )
if ( oo0O0000O0 == None ) : continue
if 90 - 90: i11iIiiIii * i1IIi
if 88 - 88: i11iIiiIii - OoOoOO00
if 53 - 53: iIii1I11I1II1 % I1Ii111 / Oo0Ooo % Oo0Ooo
if 6 - 6: iII111i
if 44 - 44: oO0o
if 23 - 23: I1IiiI + iIii1I11I1II1 . iII111i + OOooOOo - OoO0O00 + i1IIi
if 60 - 60: i11iIiiIii + Oo0Ooo * OoOoOO00 . iII111i - iIii1I11I1II1 * IiII
oOO0OOOOoo = oo0O0000O0 . registered_rlocs
if ( len ( oOO0OOOOoo ) == 0 ) :
iI11IiI1 = { }
for III1i1IIi111i in oo0O0000O0 . individual_registrations . values ( ) :
for iIII in III1i1IIi111i . registered_rlocs :
if ( iIII . is_rtr ( ) == False ) : continue
iI11IiI1 [ iIII . rloc . print_address ( ) ] = iIII
if 46 - 46: OOooOOo
if 7 - 7: I11i + ooOoO0o
oOO0OOOOoo = iI11IiI1 . values ( )
if 28 - 28: OoooooooOO * iII111i / oO0o / iII111i
if 80 - 80: OoO0O00 - I1IiiI + OOooOOo - iII111i / i1IIi
if 11 - 11: i1IIi + O0 * IiII / O0 % I11i . I11i
if 39 - 39: II111iiii . i11iIiiIii + I1IiiI + I1ii11iIi11i
if 6 - 6: O0 % Ii1I . oO0o
if 91 - 91: O0 - oO0o * O0
oOoO0O0O0O0 = [ ]
o0III = False
if ( oo0O0000O0 . eid . address == 0 and oo0O0000O0 . eid . mask_len == 0 ) :
Ii1I1 = [ ]
o0o = [ ]
if ( len ( oOO0OOOOoo ) != 0 and oOO0OOOOoo [ 0 ] . rle != None ) :
o0o = oOO0OOOOoo [ 0 ] . rle . rle_nodes
if 63 - 63: I1ii11iIi11i % I11i % OoooooooOO
for iIIII1iiIII in o0o :
oOoO0O0O0O0 . append ( iIIII1iiIII . address )
Ii1I1 . append ( iIIII1iiIII . address . print_address_no_iid ( ) )
if 100 - 100: O0
lprint ( "Notify existing RLE-nodes {}" . format ( Ii1I1 ) )
else :
if 9 - 9: Ii1I
if 87 - 87: I1IiiI
if 56 - 56: OOooOOo % oO0o - OoOoOO00
if 27 - 27: I1ii11iIi11i - IiII * OoooooooOO * I1ii11iIi11i + i11iIiiIii . IiII
if 81 - 81: oO0o / iIii1I11I1II1
for iIII in oOO0OOOOoo :
if ( iIII . is_rtr ( ) ) : oOoO0O0O0O0 . append ( iIII . rloc )
if 15 - 15: Ii1I + I1IiiI . OOooOOo / OoooooooOO + I11i - I11i
if 27 - 27: Ii1I / o0oOOo0O0Ooo . iIii1I11I1II1 . I1IiiI - OoO0O00
if 28 - 28: ooOoO0o
if 88 - 88: oO0o
if 77 - 77: ooOoO0o + I1Ii111 . OoOoOO00
o0III = ( len ( oOoO0O0O0O0 ) != 0 )
if ( o0III == False ) :
oO00Oooo0o0o0 = lisp_site_eid_lookup ( iiI1 [ 0 ] , i11IIiiII , False )
if ( oO00Oooo0o0o0 == None ) : continue
if 2 - 2: i1IIi - IiII + iIii1I11I1II1 % i1IIi * II111iiii
for iIII in oO00Oooo0o0o0 . registered_rlocs :
if ( iIII . rloc . is_null ( ) ) : continue
oOoO0O0O0O0 . append ( iIII | |
import sys, string, math, types
from pandac.PandaModules import *
import direct.gui.DirectGuiGlobals as DGG
from direct.gui.DirectGui import *
from PieMenu import *
from ScrollMenu import *
dnaDirectory = Filename.expandFrom(base.config.GetString("dna-directory", "leveleditor"))
# Colors used by all color menus
DEFAULT_COLORS = [
Vec4(1, 1, 1, 1),
Vec4(0.75, 0.75, 0.75, 1.0),
Vec4(0.5, 0.5, 0.5, 1.0),
Vec4(0.25, 0.25, 0.25, 1.0)
]
# The list of items with color attributes
COLOR_TYPES = ['wall_color', 'window_color',
'window_awning_color', 'sign_color', 'door_color',
'door_awning_color', 'cornice_color',
'prop_color']
# The list of dna components maintained in the style attribute dictionary
DNA_TYPES = ['wall', 'window', 'sign', 'door_double', 'door_single', 'cornice', 'toon_landmark',
'prop', 'street']
BUILDING_TYPES = ['10_10', '20', '10_20', '20_10', '10_10_10',
'4_21', '3_22', '4_13_8', '3_13_9', '10',
'12_8', '13_9_8', '4_10_10', '4_10', '4_20',
]
BUILDING_HEIGHTS = [10, 14, 20, 24, 25, 30]
NUM_WALLS = [1, 2, 3]
LANDMARK_SPECIAL_TYPES = ['', 'hq', 'gagshop', 'clotheshop', 'petshop', 'kartshop']
OBJECT_SNAP_POINTS = {
'street_5x20': [(Vec3(5.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_10x20': [(Vec3(10.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_20x20': [(Vec3(20.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_30x20': [(Vec3(30.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_40x20': [(Vec3(40.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_80x20': [(Vec3(80.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_5x40': [(Vec3(5.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_10x40': [(Vec3(10.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_20x40': [(Vec3(20.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_20x40_15': [(Vec3(20.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_30x40': [(Vec3(30.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_40x40': [(Vec3(40.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_20x60': [(Vec3(20.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_40x60': [(Vec3(40.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_40x40_15': [(Vec3(40.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_80x40': [(Vec3(80.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_angle_30': [(Vec3(0), Vec3(-30, 0, 0)),
(Vec3(0), Vec3(0))],
'street_angle_45': [(Vec3(0), Vec3(-45, 0, 0)),
(Vec3(0), Vec3(0))],
'street_angle_60': [(Vec3(0), Vec3(-60, 0, 0)),
(Vec3(0), Vec3(0))],
'street_inner_corner': [(Vec3(20.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_outer_corner': [(Vec3(20.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_full_corner': [(Vec3(40.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_tight_corner': [(Vec3(40.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_tight_corner_mirror': [(Vec3(40.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_double_corner': [(Vec3(40.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_curved_corner': [(Vec3(40.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_curved_corner_15': [(Vec3(40.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_t_intersection': [(Vec3(40.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_y_intersection': [(Vec3(30.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_street_20x20': [(Vec3(20.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_street_40x40': [(Vec3(40.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_sidewalk_20x20': [(Vec3(20.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_sidewalk_40x40': [(Vec3(40.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_divided_transition': [(Vec3(40.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_divided_40x70': [(Vec3(40.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_divided_transition_15': [(Vec3(40.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_divided_40x70_15': [(Vec3(40.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_stairs_40x10x5': [(Vec3(40.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_4way_intersection': [(Vec3(40.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_incline_40x40x5': [(Vec3(40.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_square_courtyard': [(Vec3(0.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_courtyard_70': [(Vec3(0.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_courtyard_70_exit': [(Vec3(0.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_courtyard_90': [(Vec3(0.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_courtyard_90_exit': [(Vec3(0.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_courtyard_70_15': [(Vec3(0.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_courtyard_70_15_exit': [(Vec3(0.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_courtyard_90_15': [(Vec3(0.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_courtyard_90_15_exit': [(Vec3(0.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_50_transition': [(Vec3(10.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_20x50': [(Vec3(20.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_40x50': [(Vec3(40.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_keyboard_10x40': [(Vec3(10.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_keyboard_20x40': [(Vec3(20.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_keyboard_40x40': [(Vec3(40.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
'street_sunken_40x40': [(Vec3(40.0, 0, 0), Vec3(0)),
(Vec3(0), Vec3(0))],
}
# Precompute class types for type comparisons
DNA_CORNICE = DNACornice.getClassType()
DNA_DOOR = DNADoor.getClassType()
DNA_FLAT_DOOR = DNAFlatDoor.getClassType()
DNA_FLAT_BUILDING = DNAFlatBuilding.getClassType()
DNA_NODE = DNANode.getClassType()
DNA_GROUP = DNAGroup.getClassType()
DNA_VIS_GROUP = DNAVisGroup.getClassType()
DNA_LANDMARK_BUILDING = DNALandmarkBuilding.getClassType()
DNA_NODE = DNANode.getClassType()
DNA_PROP = DNAProp.getClassType()
DNA_SIGN = DNASign.getClassType()
DNA_SIGN_BASELINE = DNASignBaseline.getClassType()
DNA_SIGN_TEXT = DNASignText.getClassType()
DNA_SIGN_GRAPHIC = DNASignGraphic.getClassType()
DNA_STREET = DNAStreet.getClassType()
DNA_WALL = DNAWall.getClassType()
DNA_WINDOWS = DNAWindows.getClassType()
# DNA Utility functions (possible class extensions?)
def DNARemoveChildren(dnaObject):
""" Utility function to delete all the children of a DNANode """
children = []
for i in range(dnaObject.getNumChildren()):
children.append(dnaObject.at(i))
for child in children:
dnaObject.remove(child)
DNASTORE.removeDNAGroup(child)
def DNARemoveChildOfClass(dnaNode, classType, childNum = 0):
""" Remove the nth object of that type you come across """
childCount = 0
for i in range(dnaNode.getNumChildren()):
child = dnaNode.at(i)
if DNAClassEqual(child, classType):
if childCount == childNum:
dnaNode.remove(child)
DNASTORE.removeDNAGroup(child)
return 1
childCount = childCount + 1
# None found
return 0
def DNARemoveAllChildrenOfClass(dnaNode, classType):
""" Remove the objects of that type """
children = []
for i in range(dnaNode.getNumChildren()):
child=dnaNode.at(i)
if DNAClassEqual(child, classType):
children.append(child)
for child in children:
dnaNode.remove(child)
DNASTORE.removeDNAGroup(child)
def DNAGetChildren(dnaNode, classType=None):
""" Return the objects of that type """
children = []
for i in range(dnaNode.getNumChildren()):
child=dnaNode.at(i)
if ((not classType)
or DNAClassEqual(child, classType)):
children.append(child)
return children
def DNAGetChild(dnaObject, type = DNA_NODE, childNum = 0):
childCount = 0
for i in range(dnaObject.getNumChildren()):
child = dnaObject.at(i)
if DNAClassEqual(child, type):
if childCount == childNum:
return child
childCount = childCount + 1
# Not found
return None
def DNAGetChildRecursive(dnaObject, type = DNA_NODE, childNum = 0):
childCount = 0
for i in range(dnaObject.getNumChildren()):
child = dnaObject.at(i)
if DNAClassEqual(child, type):
if childCount == childNum:
return child
childCount = childCount + 1
else:
child = DNAGetChildRecursive(child, type, childNum-childCount)
if child:
return child
# Not found
return None
def DNAGetChildOfClass(dnaNode, classType):
for i in range(dnaNode.getNumChildren()):
child = dnaNode.at(i)
if DNAClassEqual(child, classType):
return child
# Not found
return None
def DNAGetClassType(dnaObject):
return dnaObject.__class__.getClassType()
def DNAClassEqual(dnaObject, classType):
return DNAGetClassType(dnaObject).eq(classType)
def DNAIsDerivedFrom(dnaObject, classType):
return DNAGetClassType(dnaObject).isDerivedFrom(classType)
def DNAGetWallHeights(aDNAFlatBuilding):
""" Get a list of wall heights for a given flat building """
# Init variables
heightList = []
offsetList = []
offset = 0.0
# Compute wall heights
for i in range(aDNAFlatBuilding.getNumChildren()):
child = aDNAFlatBuilding.at(i)
if DNAClassEqual(child, DNA_WALL):
height = child.getHeight()
heightList.append(height)
offsetList.append(offset)
offset = offset + height
return heightList, offsetList
def DNAGetBaselineString(baseline):
s=""
for i in range(baseline.getNumChildren()):
child = baseline.at(i)
if DNAClassEqual(child, DNA_SIGN_TEXT):
s=s+child.getLetters()
elif DNAClassEqual(child, DNA_SIGN_GRAPHIC):
s=s+'['+child.getCode()+']'
return s
def DNASetBaselineString(baseline, text):
# TODO: Instead of removing all the text and replacing it,
# replace each text item and then add or remove at the end.
# This should allow inlined graphics to stay in place.
# end of todo.
DNARemoveAllChildrenOfClass(baseline, DNA_SIGN_TEXT)
# We can't just blindly iterate through the text, because it might
# be utf-8 encoded, meaning some characters are represented using
# multi-byte sequences. Instead, create a TextNode and use it to
# iterate through the characters of the text.
t = TextNode('')
t.setText(text)
for i in range(t.getNumChars()):
ch = t.getEncodedChar(i)
text=DNASignText("text")
text.setLetters(ch)
baseline.add(text)
class LevelStyleManager:
"""Class which reads in style files and manages class variables"""
def __init__(self, NEIGHBORHOODS = [], NEIGHBORHOOD_CODES = {} ):
self.NEIGHBORHOODS = NEIGHBORHOODS
self.NEIGHBORHOOD_CODES = NEIGHBORHOOD_CODES
# The main dictionary holding all attribute objects
self.attributeDictionary = {}
# Create the style samples
self.createBaselineStyleAttributes()
self.createWallStyleAttributes()
self.createBuildingStyleAttributes()
self.createColorAttributes()
self.createDNAAttributes()
self.createMiscAttributes()
# BASELINE STYLE FUNCTIONS
def createBaselineStyleAttributes(self):
"""
Create a baselineStyle entry in the attribute dictionary
This will be a dictionary of style attributes, one per neighborhood
"""
# First create an empty dictionary
dict = self.attributeDictionary['baseline_style'] = {}
# Create a attribute object for each neighborhood
for neighborhood in self.NEIGHBORHOODS:
attribute = LevelAttribute('baseline_style')
attribute.setDict(
# Create a baseline style dictionary for each neighborhood
self.createBaselineStyleDictionary(neighborhood))
# Using this dictionary, create style pie menus
attribute.setMenu(
self.createBaselineStyleMenu(neighborhood, attribute.getDict()))
dict[neighborhood] = attribute
def createBaselineStyleDictionary(self, neighborhood):
"""
Create a dictionary of baseline styles for a neighborhood
"""
filename = neighborhood + '_baseline_styles.txt'
print 'Loading baseline styles from: ' + filename
styleData = self.getStyleFileData(filename)
return self.initializeBaselineStyleDictionary(styleData, neighborhood)
def initializeBaselineStyleDictionary(self, styleData, neighborhood):
"""
Fill in the baseline style dictionary with data from the style file
"""
styleDictionary = {}
styleCount = 0
code = self.NEIGHBORHOOD_CODES[neighborhood]
while styleData:
l = styleData[0]
if l == 'baselineStyle':
# Start of new style, strip off first line then extract style
style, styleData = self.extractBaselineStyle(styleData)
style.name = code + '_baseline_style_' + `styleCount`
# Store style in dictionary
styleDictionary[style.name] = style
styleCount = styleCount + 1
# Move to next line
styleData = styleData[1:]
return styleDictionary
def extractBaselineStyle(self, styleData):
"""
Pull out one style from a list of style data. Will keep
processing data until endBaselineStyle of end of data is reached.
Returns a baseline style and remaining styleData.
"""
# Create default style
style = DNABaselineStyle()
# Strip off first line
styleData = styleData[1:]
while styleData:
l = styleData[0]
if l == 'endBaselineStyle':
# End of style found, break out of while | |
<gh_stars>1-10
#
# Copyright (c) 2017, 2019 Oracle and/or its affiliates. All rights reserved.
# Licensed under the Universal Permissive License v 1.0 as shown
# at http://oss.oracle.com/licenses/upl.
"""
This utility assists with configuring network interfaces on Oracle Cloud
Infrastructure instances. See the manual page for more information.
"""
import argparse
import logging
import os
import sys
import time
import oci_utils.oci_api
from oci_utils.exceptions import OCISDKError
from oci_utils.vnicutils import VNICUtils
__logger = logging.getLogger("oci-utils.oci-network-config")
def parse_args():
"""
Parse the command line arguments and return an object representing the
command line (as returned by argparse's parse_args()).
Returns
-------
The argparse namespace.
"""
parser = argparse.ArgumentParser(description='Utility for configuring '
'network interfaces on an '
'instance running in the '
'Oracle Cloud '
'Infrastructure.')
parser.add_argument('-s', '--show', action='store_true',
help='Show information on all provisioning and '
'interface configuration. This is the default '
'action if no options are given.')
parser.add_argument('--create-vnic', action='store_true',
help='Create a new VNIC and attach it to '
'this instance')
parser.add_argument('--nic-index', action='store', metavar='INDEX',
type=int, default=0,
help='physical NIC card index. When used with '
'the --create-vnic option, assign the new VNIC '
'to the specified physical NIC card.')
parser.add_argument('--detach-vnic', action='store', metavar='VNIC',
help='Detach and delete the VNIC with the given OCID'
' or primary IP address')
parser.add_argument('--add-private-ip', action='store_true',
help='Add a secondary private IP to an existing VNIC')
parser.add_argument('--del-private-ip', action='store', metavar='ADDR',
help='delete the secondary private IP address with '
'the given IP address')
parser.add_argument('--private-ip', action='store', metavar='ADDR',
help='When used with the --create-vnic or '
'add-private-ip options, '
'assign the given private IP address to the VNIC')
parser.add_argument('--subnet', action='store',
help='When used with the --create-vnic option, '
'connect the new VNIC to the given subnet.')
parser.add_argument('--vnic-name', action='store', metavar='NAME',
help='When used with the --create-vnic option, '
'use NAME as the display name of the new VNIC')
parser.add_argument('--assign-public-ip', action='store_true',
help='When used with the --create-vnic option, '
'assign a public IP address to the new VNIC.')
parser.add_argument('--vnic', action='store', metavar='OCID',
help='When used with the --add-private-ip option, '
'assign the private IP to the given VNIC')
parser.add_argument('-a', '--auto', '-c', '--configure',
action='store_true',
help='Add IP configuration for VNICs that are not '
'configured and delete for VNICs that are no '
'longer provisioned.')
parser.add_argument('-d', '--deconfigure', action='store_true',
help='Deconfigure all VNICs (except the primary). If '
'a -e option is also present only the secondary '
'IP address(es) are deconfigured.')
parser.add_argument('-e', nargs=2, metavar=('IP_ADDR', 'VNIC_OCID'),
dest='sec_ip', action='append',
help='Secondary private IP address to configure or '
'deconfigure. Use in conjunction with -c or -d.')
parser.add_argument('-n', '--ns', action='store', metavar='FORMAT',
help='When configuring, place interfaces in namespace '
'identified by the given format. Format can '
'include $nic and $vltag variables.')
parser.add_argument('-r', '--sshd', action='store_true',
help='Start sshd in namespace (if -n is present)')
parser.add_argument('-X', '--exclude', metavar='ITEM', action='append',
type=str, dest='exclude',
help='Persistently exclude ITEM from automatic '
'configuration/deconfiguration. Use the '
'--include option to include the ITEM again.')
parser.add_argument('-I', '--include', metavar='ITEM', action='append',
type=str, dest='include',
help='Include an ITEM that was previously excluded '
'using the --exclude option in automatic '
'configuration/deconfiguration.')
parser.add_argument('--quiet', '-q', action='store_true',
help='Suppress information messages')
args = parser.parse_args()
return args
def get_oci_api_session(opt_name=None):
"""
Ensure the OCI SDK is available if the option is not None.
Parameters
----------
opt_name : str
Operation name currently been exceuted (used for logging).
Returns
-------
OCISession
The session or None if cannot get one
"""
sess = None
if oci_utils.oci_api.HAVE_OCI_SDK:
try:
sess = oci_utils.oci_api.OCISession()
except Exception as e:
sdk_error = str(e)
if opt_name is not None:
__logger.error("To use the %s option, you need to "
"install and configure the OCI Python SDK "
"(python-oci-sdk)\n" % opt_name)
__logger.error(sdk_error)
else:
__logger.error("Failed to access OCI services: %s" % sdk_error)
return sess
def api_show_network_config():
"""
Show the current network configuration of the instance based on
information obtained through OCI API calls, if the OCI SDK is
configured.
Returns
-------
No return value.
"""
sess = get_oci_api_session()
if sess is None:
__logger.error("Failed to get API session.")
return
inst = sess.this_instance()
if inst is None:
__logger.error("Failed to get information from OCI.")
return
vnics = inst.all_vnics()
i = 1
print "VNIC configuration for instance %s" % inst.get_display_name()
print
for vnic in vnics:
primary = ""
if vnic.is_primary():
primary = " (primary)"
print "VNIC %d%s: %s" % (i, primary, vnic.get_display_name())
print " Hostname: %s" % vnic.get_hostname()
print " OCID: %s" % vnic.get_ocid()
print " MAC address: %s" % vnic.get_mac_address()
print " Public IP address: %s" % vnic.get_public_ip()
print " Private IP address: %s" % vnic.get_private_ip()
_subn = vnic.get_subnet()
if _subn is not None:
print " Subnet: %s (%s)" % (_subn.get_display_name(), _subn)
else:
print " Subnet: Not found"
privips = vnic.all_private_ips()
if len(privips) > 0:
print " Private IP addresses:"
for privip in privips:
print " IP address: %s" % privip.get_address()
print " OCID: %s" % privip.get_ocid()
print " Hostname: %s" % privip.get_hostname()
print " Subnet: %s (%s)" % \
(privip.get_subnet().get_display_name(),
privip.get_subnet().get_cidr_block())
print
else:
print
i += 1
def do_show_network_config(vnic_utils):
"""
Display the currect network interface configuration as well as the
VNIC configuration from OCI.
Parameters
----------
vnic_utils :
The VNIC configuration instance.
Returns
-------
No return value.
"""
if oci_utils.oci_api.HAVE_OCI_SDK:
api_show_network_config()
__logger.info("Operating System level network configuration")
(ret, out) = vnic_utils.get_network_config()
if ret:
__logger.error("Failed to execute the VNIC configuration script.")
else:
print "%s" % out.decode('utf-8')
def do_detach_vnic(detach_options, vnic_utils):
"""
Detach and delete the VNIC with the given ocid or primary ip address
Parameters
----------
detach_options : namespace
The argparse namespace.
vnic_utils :
The VNIC configuration instance.
Returns
-------
No return value on success;.
Raises
------
StandardError
if session cannot be acquired
if the VNIC cannot be detached
"""
# needs the OCI SDK installed and configured
sess = get_oci_api_session(opt_name="--detach-vnic")
if sess is None:
raise StandardError("Failed to get API session.")
vnics = sess.this_instance().all_vnics()
for vnic in vnics:
if vnic.get_ocid() == detach_options.detach_vnic or \
vnic.get_private_ip() == detach_options.detach_vnic:
if not vnic.is_primary():
vnic_utils.delete_all_private_ips(vnic.get_ocid())
vnic.detach()
break
else:
raise StandardError("The primary VNIC cannot be detached.")
return sess.this_shape()
def do_create_vnic(create_options):
"""
Create and attach a VNIC to this instance.
Parameters
----------
create_options :
The VNIC configuration instance.
Returns
-------
No return value on success; errors out with return value 1 otherwise.
Raises
------
StandardError
if session cannot be acquired
"""
# needs the OCI SDK installed and configured
sess = get_oci_api_session(opt_name="--create-vnic")
if sess is None:
raise StandardError("Failed to get API session.")
subnet_id = None
if create_options.subnet:
if create_options.subnet.startswith('ocid1.subnet.'):
subnet = sess.get_subnet(create_options.subnet)
if subnet is None:
raise StandardError(
"Subnet not found: %s\n" % create_options.subnet)
else:
subnet_id = subnet.get_ocid()
else:
subnets = sess.find_subnets(create_options.subnet)
if len(subnets) == 0:
raise StandardError(
"No subnet matching %s found\n" % create_options.subnet)
elif len(subnets) > 1:
__logger.error("More than one subnet matching %s found:\n"
% create_options.subnet)
for sn in subnets:
__logger.error(" %s\n" % sn.get_display_name())
raise StandardError("More than one subnet matching")
subnet_id = subnets[0].get_ocid()
try:
vnic = sess.this_instance().attach_vnic(
private_ip=create_options.private_ip,
assign_public_ip=create_options.assign_public_ip,
subnet_id=subnet_id,
nic_index=create_options.nic_index,
display_name=create_options.vnic_name)
except OCISDKError as e:
raise StandardError('Failed to create VNIC: %s' % e)
public_ip = vnic.get_public_ip()
if public_ip is not None:
__logger.info(
'creating VNIC: %s (public IP %s)' % (vnic.get_private_ip(),
public_ip))
else:
__logger.info('creating VNIC: %s' % vnic.get_private_ip())
def do_add_private_ip(vnic_utils, add_options):
"""
Add a secondary private IP for an existing VNIC.
Parameters
----------
vnic_utils : VNICUtils
The VNICUtils helper instance.
add_options : namespace
The argparse namespace.
Returns
-------
tuple
(private_IP,vnic_ocid) for the new IP on success; errors out with
return value 1 otherwise.
Raises
------
StandardError
On any error.
"""
# needs the OCI SDK installed and configured
sess = get_oci_api_session(opt_name="--add-private-ip")
if sess is None:
raise StandardError("Failed to get API session.")
if add_options.vnic:
if add_options.vnic.startswith('ocid1.vnic.'):
vnic = sess.get_vnic(add_options.vnic)
if vnic is None:
raise StandardError("VNIC not found: %s" % add_options.vnic)
else:
pass
else:
raise StandardError("Invalid VNIC OCID: %s" % add_options.vnic)
else:
vnics = sess.this_instance().all_vnics()
if len(vnics) > 1:
__logger.error("More than one VNIC found."
"Use the --vnic option to select the one to add "
"a secondary IP for:")
for vnic in vnics:
__logger.error(" %s: %s" % (vnic.get_private_ip(),
vnic.get_ocid()))
raise StandardError("Too many VNICs found")
vnic = vnics[0]
try:
priv_ip = vnic.add_private_ip(private_ip=add_options.private_ip)
except OCISDKError as e:
raise StandardError('Failed to provision private IP: %s' % e)
__logger.info(
'provisioning secondary private IP: %s' % priv_ip.get_address())
vnic_utils.add_private_ip(priv_ip.get_address(), vnic.get_ocid())
return priv_ip.get_address(), vnic.get_ocid()
def do_del_private_ip(vnic_utils, delete_options):
"""
Delete a secondary | |
import datetime
from datetime import date
from datetime import datetime, timedelta
import time
from time import strftime
# django settings for script
from django.conf import settings
# from djequis.core.utils import sendmail
from djzbar.utils.informix import do_sql
from djzbar.utils.informix import get_engine
# Imports for additional modules and functions written as part of this project
from djequis.adp.utilities import fn_validate_field, fn_write_log, fn_write_error, fn_needs_update
DEBUG = settings.INFORMIX_DEBUG
# set up command-line options
desc = """
Upload ADP data to CX
"""
# write out the .sql file
scr = open("apdtocx_output.sql", "a")
#############################################
# Begin Processing
#############################################
def fn_process_job(carthid, workercatcode, workercatdescr, businessunitcode,
businessunitdescr, homedeptcode, homedeptdescr, jobtitlecode,
jobtitledescr, positioneffective, terminationdate, payrollcompcode,
jobfunctioncode, jobfuncdtiondescription, jobclass,
jobclassdescr, primaryposition, supervisorid, last, first,
middle,EARL):
engine = get_engine(EARL)
try:
##############################################################
# must validate ID of supervisor
# Split PCN_Aggr (Home Cost Number) into separate components
# first I should determine if this is an insert or update - see if
# pcn_aggr is in the pos_table
# validate function code in the func_table
# Place dept number in func_area field in position table
# Must account for Division, Dept
# use PCN Codes to tie employee to job number
# validate a number of fields as needed
# add GL Func code to func_area in position table
# if there is a secondary job record, do the same..
##############################################################
# There are records with no job number. Nothing to validate against.
# If Jobtitle_code is empty, end the process with an invalid data message
# print("Job Title Code = " + jobtitlecode + ", " + jobtitledescr
# + ", " + str(terminationdate) + "------------------")
if jobtitlecode is None:
# print("Missing Job Title Code for " + last + "," + first
# + " ID = " + carthid)
raise ValueError("Missing Job Title Code for " + last + ","
+ first + " ID = " + carthid)
elif jobtitlecode == '':
# print("Missing Job Title Code for " + last + "," + first
# + " ID = " + carthid)
raise ValueError("Missing Job Title Code for " + last + ","
+ first + " ID = " + carthid)
# There is a supervisor flag in ADP. But it may not be valid to use
# for validation at this point. Just note.
spvrID = fn_validate_supervisor(supervisorid[3:10], EARL)
# Construct the pcn code from existing items?
# func_area = left(homedeptcode,3)
# hrdept/pcn_03 = homedeptcode[:3]
# pcn_04 = job title code
# hrdiv = business unit code
# pcn_aggr = jobfunctioncode-businessunitcode-homedeptcode-jobtitlecode
pcnaggr = payrollcompcode + "-" + businessunitcode[:4] + "-" \
+ homedeptcode[:3] + "-" + jobtitlecode
print("PCN Aggregate = " + pcnaggr)
func_code = fn_validate_field(homedeptcode[:3],"func","func",
"func_table", "char", EARL)
# print("Function Code = " + str(func_code))
if func_code != '':
fn_write_log("Valid func_code")
# print('Validated func_code = ' + homedeptcode[:3] + '\n')
else:
# print('Invalid Function Code ' + str(homedeptcode[:3]) + '\n')
fn_write_log('Invalid Function Code ' + str(homedeptcode[:3]) + '\n')
# fn_write_error("Error in jobrec.py - Invalid Function Code for ' "
# + id + ' Code = ' + str(homedeptcode[:3]) + '\n');
# print("Error in jobrec.py - Invalid Function Code for " +
# str(carthid) + " Code = " + str(homedeptcode[:3]) + "\n");
# print("Supervisor = " + str(spvrID) +'\n')
##############################################################
# validate hrpay, values in this table should not change without
# a project request as they affect a number of things
##############################################################
hrpay_rslt = fn_validate_field(payrollcompcode,"hrpay","hrpay",
"hrpay_table", "char", EARL)
if hrpay_rslt != '':
#print('Validated HRPay Code = ' + str(hrpay_rslt) + '\n')
fn_write_log('Valid HRPay Code ' + str(hrpay_rslt) + '\n');
else:
#print('Invalid Payroll Company Code ' + str(payrollcompcode) + '\n')
fn_write_error('Error in jobrec.py - Invalid Payroll Company Code '+
str(payrollcompcode) +'\n');
fn_write_error("Error in jobrec.py - Invalid Payroll Company Code " +
str(payrollcompcode) + '\n')
##############################################################
# New table in Informix - Worker Category
# Not maintained in CX, so we will have to maintain it with
# inserts and updates
#############################################################
# print("Worker Cat Code")
v_work_cat_update = fn_needs_update(workercatcode, workercatdescr,
"work_cat_code", "work_cat_descr",
"cc_work_cat_table", "char", EARL)
# print("Work Cat Update = " + str(v_work_cat_update))
if v_work_cat_update == None or len(str(v_work_cat_update)) == 0:
q_ins_wc = '''
INSERT INTO cc_work_cat_table (work_cat_code, work_cat_descr,
active_date)
VALUES (?,?,?)'''
q_ins_wc_args = (workercatcode,workercatdescr,
datetime.now().strftime("%m/%d/%Y"))
# print(q_ins_wc)
# print(q_ins_wc_args)
engine.execute(q_ins_wc, q_ins_wc_args)
fn_write_log("Inserted into cc_work_cat_table, code = " + workercatcode)
scr.write(q_ins_wc + '\n' + str(q_ins_wc_args) + '\n')
else:
'Exists but no match'
if v_work_cat_update[1] != workercatdescr:
q_upd_wc = '''
UPDATE cc_work_cat_table set work_cat_descr = ?
WHERE work_cat_code = ?'''
q_upd_wc_args = (workercatdescr, workercatcode)
# print(q_upd_wc)
# print(q_upd_wc_args)
engine.execute(q_upd_wc, q_upd_wc_args)
fn_write_log("Updated cc_work_cat_table, code = " + workercatcode)
scr.write(q_upd_wc + '\n' + str(q_upd_wc_args) + '\n')
##############################################################
# Job Class Code, HRClass field in Job Rec
##############################################################
if jobclass.strip() != "" and jobclass is not None:
# print(jobclass)
# print(jobclassdescr)
# Find out if class is in the hrclass table
q_hrclass = '''
SELECT *
FROM hrclass_table
WHERE hrclass = "{0}"
AND inactive_date is null'''.format(jobclass)
jclass = do_sql(q_hrclass, key=DEBUG, earl=EARL)
row = jclass.fetchone()
if row is None:
q_hrclass_ins = '''
INSERT INTO hrclass_table
(hrclass, txt, active_date, inactive_date)
VALUES(?, ?, ?, ?)'''
q_hrclass_ins_args = (jobclass, jobclassdescr,
datetime.now().strftime("%m/%d/%Y"),
None)
engine.execute(q_hrclass_ins, q_hrclass_ins_args)
fn_write_log("Inserted into hrclass_table, code = " + jobclass)
scr.write(q_hrclass_ins + '\n' + str(q_hrclass_ins_args) + '\n')
else:
# print(row[1])
if row[1] != jobclassdescr:
q_hrclass_upd = '''
UPDATE hrclass_table
SET txt = ?
WHERE hrclass = ?'''
q_hrclass_upd_args = (jobclassdescr, jobclass)
engine.execute(q_hrclass_upd, q_hrclass_upd_args)
scr.write(q_upd_dept + '\n' + str(q_hrclass_upd_args) + '\n');
fn_write_log("Updated hrclass_table, code = " + jobclass)
else:
#print("No change in HRClass Description")
fn_write_log('There were no changes in HRClass '
'description.\n');
# else:
# print("No Job Class")
##############################################################
# validate the position, division, department
##############################################################
hrdivision = fn_needs_update(str(businessunitcode[:4]), businessunitdescr,
"hrdiv", "descr", "hrdiv_table", "char", EARL)
if hrdivision is None:
q_ins_div = '''
INSERT INTO hrdiv_table(hrdiv, descr, beg_date, end_date)
VALUES(?, ?, ?, null)'''
q_ins_div_args = (businessunitcode[:4], businessunitdescr,
datetime.now().strftime("%m/%d/%Y"))
# print("New HR Division = " + businessunitcode[:4] + '\n')
# print(q_ins_div + str(q_ins_div_args))
fn_write_log(
"Inserted into hrdiv_table, code = " + businessunitcode[:4])
engine.execute(q_ins_div, q_ins_div_args)
scr.write(q_ins_div + '\n' + str(q_ins_div_args) + '\n')
elif hrdivision == "":
q_ins_div = '''
INSERT INTO hrdiv_table(hrdiv, descr, beg_date, end_date)
VALUES(?, ?, ?, null)'''
q_ins_div_args = (businessunitcode[:4], businessunitdescr,
datetime.now().strftime("%m/%d/%Y"))
# print("New HR Division = " + businessunitcode[:4] + '\n')
# print(q_ins_div + str(q_ins_div_args))
fn_write_log("Inserted into hrdiv_table, code = " + businessunitcode[:4])
engine.execute(q_ins_div, q_ins_div_args)
scr.write(q_ins_div + '\n' + str(q_ins_div_args) + '\n')
else:
if hrdivision[1] != businessunitdescr:
# This query works 5/25/18
q_upd_div = '''
UPDATE hrdiv_table SET descr = ?,
beg_date = ?
WHERE hrdiv = ?'''
q_upd_div_args = (businessunitdescr,
datetime.now().strftime("%m/%d/%Y"),
businessunitcode[:4])
# print("Existing HR Division = " + hrdivision[0] + '\n')
# print(q_upd_div + str(q_upd_div_args))
fn_write_log("Updated hrdiv_table, code = " + businessunitcode[:4])
engine.execute(q_upd_div, q_upd_div_args)
scr.write(q_upd_div + '\n' + str(q_upd_div_args) + '\n');
# print("Home Dept Code = " + homedeptcode[:3])
# print("Home Dept descr = " + homedeptdescr)
hrdepartment = fn_needs_update(homedeptcode[:3], homedeptdescr,
"hrdept", "descr", "hrdept_table", "char", EARL)
# print("HR Dept Needs update = " + str(hrdepartment))
if hrdepartment==None or hrdepartment=="" or len(hrdepartment)==0:
# print("Insert Dept")
# This query works 5/25/18
q_ins_dept = '''
INSERT INTO hrdept_table(hrdept, hrdiv, descr,
beg_date, end_date)
VALUES(?, ?, ?, ?, ?)'''
q_ins_dept_args = (homedeptcode[:3], businessunitcode[:4],
homedeptdescr,
datetime.now().strftime("%m/%d/%Y"),None)
# print(q_ins_dept)
# print(q_ins_dept_args)
engine.execute(q_ins_dept, q_ins_dept_args)
fn_write_log("Inserted into hrdept_table, code = " + homedeptcode[:3])
scr.write(q_ins_dept + '\n' + str(q_ins_dept_args) + '\n');
else:
# print("Update Dept")
if hrdepartment[1] != homedeptdescr:
q_upd_dept = '''
UPDATE hrdept_table SET hrdiv = ?, descr = ?,
beg_date = ?
WHERE hrdept = ?'''
q_upd_dept_args = (businessunitcode[:4], homedeptdescr,
datetime.now().strftime("%m/%d/%Y"), func_code)
# print(q_upd_dept)
# print(q_upd_dept_args)
engine.execute(q_upd_dept, q_upd_dept_args)
fn_write_log("Updated hrdept_table, code = " + homedeptcode[:3])
scr.write(q_upd_dept + '\n' + str(q_upd_dept_args) + '\n');
else:
# Need to make sure the department is linked to the division
q_check_dept_div = '''
select hrdiv from hrdept_table where hrdept = {0}
'''.format(homedeptcode[:3])
div_val = do_sql(q_check_dept_div, key=DEBUG, earl=EARL)
if div_val != businessunitcode[:4]:
# print("update dept/div relationship")
q_upd_dept_div = '''
UPDATE hrdept_table SET hrdiv = ?, descr = ?, | |
string.
'''
(retval, funcname, params) = get_func_params_from_prototype(prototype)
print(HDR1)
print("Now generating OpenOSC ASM-Label Redirect Mapping code for: " + funcname)
print(HDR1)
magic_str = '_CASE3'
va_args_code = generate_va_args_redefine_code(funcname, params, magic_str)
if not va_args_code:
return generate_openosc_redirect_map_code(prototype)
func_code = generate_osc_mapping_func_body_with_va_arg_pack(funcname, magic_str)
code = '/* Mapping for ' + funcname + ' */\n\n'
code += '#ifdef __va_arg_pack\n'
code += func_code
code += '#elif !defined __cplusplus\n\n'
code += va_args_code
code += '\n#endif\n\n'
print(code)
openosc_write_filename("openosc_fortify_redirect_map.h", code)
return code
def generate_openosc_redirect_map_code(prototype):
'''
Generate ASM-Label Redirect code in openosc_fortify_redirect_map.h for a function prototype.
:param prototype: a function prototype
:returns the code as a long string.
'''
(retval, funcname, params) = get_func_params_from_prototype(prototype)
(dest_type, dest_param, copylen, src_type, src_param) = analyze_func_params(params)
funcname_up = funcname.upper()
comma_params = get_comma_joined_param_names(params)
if "..." in params:
return "printf_like"
#print(HDR1)
#print("Now generating OpenOSC ASM-Label Redirect Mapping code for: " + funcname)
#print(HDR1)
code = '/* Mapping for ' + funcname + ' */\n\nextern '
code += retval
code += '\n__' + funcname + '_to_buf(size_t dest_len, ' + params + ');\n'
code += 'extern ' + retval + ' __REDIRECT_NTH (__openosc_' + funcname + '_alias,\n'
code += ' (' + params + '), ' + funcname + ');\n'
code += 'extern ' + retval + ' __REDIRECT_NTH (openosc_' + funcname + '_chk_warn,\n'
code += ' (size_t dest_len, ' + params + '), __' + funcname + '_to_buf)\n'
code += ' __warnattr ("' + funcname + ' caller with bigger length than size of destination buffer");\n\n'
code += '__fortify_function ' + retval + '\n'
code += '__NTH (' + funcname + ' (' + params + '))\n{\n'
if not dest_param:
code += ' return (' + funcname_up + '_CASE3 __' + funcname + '_to_buf(OPENOSC_USE_FORTIFY_LEVEL - 1, ' + comma_params + '));\n'
code += '}\n\n'
print(code)
openosc_write_filename("openosc_fortify_redirect_map.h", code)
return code
bosc_check_type = "OSC_OBJECT_SIZE_CHECK_0"
if "char" in dest_type or "wchar_t" in dest_type:
bosc_check_type = "OSC_OBJECT_SIZE_CHECK_1"
byte_sz = "_sz"
if "wchar_t" in dest_type:
byte_sz = "_sz_bytes"
code += ' size_t _sz_bytes = __builtin_object_size(' + dest_param + ', ' + bosc_check_type + ');\n'
code += ' size_t _sz = _sz_bytes / sizeof(wchar_t);\n'
else:
code += ' size_t _sz = __builtin_object_size(' + dest_param + ', ' + bosc_check_type + ');\n'
if copylen:
code += ' int is_len_constant = __builtin_constant_p(' + copylen + ');\n'
code += ' return (((' + byte_sz + ' != (size_t) -1) && (' + byte_sz + ' != 0))\n'
code += ' ? (is_len_constant\n'
code += ' ? ((_sz >= ' + copylen + ')\n'
code += ' ? (' + funcname_up + '_CASE1 __openosc_' + funcname + '_alias(' + comma_params + '))\n'
code += ' : (' + funcname_up + '_CASE2 openosc_' + funcname + '_chk_warn(_sz, ' + comma_params + ')))\n'
code += ' : (' + funcname_up + '_CASE3 __' + funcname + '_to_buf(_sz, ' + comma_params + ')))\n'
code += ' : (' + funcname_up + '_CASE4 __openosc_' + funcname + '_alias(' + comma_params + ')));\n'
else:
code += ' return (((' + byte_sz + ' != (size_t) -1) && (' + byte_sz + ' != 0))\n'
code += ' ? (' + funcname_up + '_CASE3 __' + funcname + '_to_buf(_sz, ' + comma_params + '))\n'
code += ' : (' + funcname_up + '_CASE4 __openosc_' + funcname + '_alias(' + comma_params + ')));\n'
code += '}\n\n'
print(code)
openosc_write_filename("openosc_fortify_redirect_map.h", code)
return code
osc_runtime_check_macro_template = '''
/*
* Define Runtime check macros
*/
#if (OSC_RUNTIME_CHK == OSC_RUNTIME_USE_LIBOSC)
'''
osc_compiletime_check_macro_template = '''
/*
* Define compile-time static asserts to report buffer overflow
* detected by OSC
*
* Since support for compile-time error reporting are compiler dependent,
* there will be multiple definitions of these macros. To select a specific
* implemetaton, define the appropriate flag within this header file. For
* the build enviroment that use multiple compilers, the flag can be defined
* on the command line using the "-D" switch.
*
* Note: OSC_ASSERT_USE_BUILTIN should only be used for compile-time check
* only. It must NOT be used in release code or a risk of abort() call get
* inserted into production image by the compiler.
*/
#if (OSC_COMPILE_CHK == OSC_ASSERT_USE_ERR_ATTR)
'''
### #pragma message ("No compile time OSC checking, use original libc/safec function")
osc_compiletime_check_macro_end = '''
#else
#pragma message ("No compile time OSC checking method selected, contact jaguars-dev alias for guidance")
#endif
'''
def generate_osc_original_runtime_check_macro(prototype):
'''
Generate runtime check macro to itself mapping.
:param prototype: function prototype
:returns the generated code.
'''
(retval, funcname, params) = get_func_params_from_prototype(prototype)
comma_params = get_comma_joined_param_names(params)
return '#define OSC_RUNTIME_CHECK_' + funcname + '()\t\t' + funcname + '(' + comma_params + ')\n'
def generate_osc_runtime_check_macro(prototype):
'''
Generate runtime check macro to __memcpy_to_buf mapping.
:param prototype: function prototype
:returns the generated code.
'''
(retval, funcname, params) = get_func_params_from_prototype(prototype)
comma_params = get_comma_joined_param_names(params)
return '#define OSC_RUNTIME_CHECK_' + funcname + '()\t\t__' + funcname + '_to_buf(_sz, ' + comma_params + ')\n'
def generate_osc_compiletime_check_macro(prototype, checktype):
'''
Generate compile-time check macro for a prototype.
:param prototype: function prototype
:param checktype: a few different compile time check types that are supported
:returns the generated code.
'''
(retval, funcname, params) = get_func_params_from_prototype(prototype)
code = ''
if "..." in params:
return code
comma_params = get_comma_joined_param_names(params)
if checktype == 'OSC_ASSERT_USE_ERR_ATTR':
code += 'extern int __attribute__((error("Compiler Assertion: ' + funcname + ' len will always overflow dst"))) \\\n'
code += ' osc_compile_check_' + funcname + '(void);\n'
code += '#define OSC_ASSERT_' + funcname + '() ({osc_compile_check_' + funcname + '(), (' + retval + ')0;})\n\n'
elif checktype == 'OSC_ASSERT_USE_BUILTIN':
code += '#define OSC_ASSERT_' + funcname + '()\t\t__' + funcname + '_chk(' + comma_params + ', _sz)\n'
elif checktype == 'OSC_ASSERT_USE_RUNTIME_CHK':
code += '#define OSC_ASSERT_' + funcname + '()\t\t__' + funcname + '_to_buf(_sz, ' + comma_params + ')\n'
elif checktype == 'OSC_ASSERT_USE_NONE':
code += '#define OSC_ASSERT_' + funcname + '()\t\t' + funcname + '(' + comma_params + ')\n'
return code
def generate_osc_redefine_macro_code(funcname, params):
'''
Generate macro redefine code for a function.
:param funcname: function name
:param params: the whole string of function parameters like "void *dest, const void *src, size_t n" for memcpy
:returns the generated code.
'''
code = '#undef ' + funcname + '\n'
if "..." in params:
code += '#define ' + funcname + '\t\t' + 'openosc_' + funcname + '\n'
return code
names = get_comma_joined_param_names(params)
code += '#define ' + funcname + '(' + names + ')\t\t'
code += 'openosc_' + funcname + '(' + names + ')\n'
return code
def get_byte_array_of_magic_word(magic):
'''
Return a byte array for a 64bit integer magic word.
:param magic: a 64bit integer for the magic word like 0x0102030405060708
:returns a comma-joined byte array as a string, which is then used for asm(".byte 0x1,0x2,0x3,0x4,0x5,0x6,0x7,0x8").
'''
byte_list = []
m = magic
for i in range(8):
byte_list.append(hex(m & 0xff))
m = m >> 8
#print(byte_list)
byte_array = ", ".join(byte_list[::-1])
return byte_array
def get_wrap_magic_word(magic, alignment):
'''
Get next aligned magic word.
for example, let's assume alignment = 8 bit.
if magic = 0x0, new magic = 0x100
if magic = 0x1, new magic = 0x100
if magic = 0xFF, new magic = 0x100
if magic = 0x100, new magic = 0x200
:param magic: 64bit integer magic word
:param alignment: number of bits, if 8 bits, then wrap at 2^8=256byte.
:returns a new 64bit integer magic word which is aligned at 2^alignment
'''
#return ((magic + (1 << alignment)) >> alignment) << alignment
return ((magic >> alignment) + 1) << alignment
def update_global_magic_word(magic):
'''
Update the global magic variable with the new value.
:param magic: the new 64bit integer value for magic
:returns None
'''
global g_magic_word
g_magic_word = magic
def get_hex_string_of_int_magic(int_magic):
'''
Return a hex string for a 64bit integer magic word.
:param int_magic: a 64bit integer magic word like 0x02030405060708
:returns a string like "0002030405060708" without "0x" prefix.
'''
hex_magic = hex(int_magic)[2:]
hex_magic = '0' * (16 - len(hex_magic)) + hex_magic
return hex_magic
def generate_curioscan_code(funcname, int_magic):
'''
Generate the magic array for a single function.
:param funcname: the function name
:param int_magic: the 64bit | |
return False
'''
def importNecessaryHeaders(isKernel=False):
''' Import header file from publich xcode headers and kernel ida file's exported headers'''
phase = "importNecessaryHeaders"
#if checkPhaseDone(phase):
# return
print "[+] Import Necessary Headers"
#importHeaderFile(getFilePathWithRelPath("../Headers/IOUserClient_mine.h"))
if not isKernel:
#loadTilFile()
#importHeaderFile(getFilePathWithRelPath("../Headers/kernel_development.h"))
importHeaderFile(getFilePathWithRelPath("../Headers/kernel.h"))
importHeaderFile(getFilePathWithRelPath("../Headers/XcodePublicHeaders.h"))
for i in range(1, idaapi.get_ordinal_qty(idaapi.cvar.idati)):
try:
typeStr = GetLocalType(i, 0)
if typeStr.startswith("struct "):
typeStructName = typeStr[typeStr.rfind(" ")+1:]
if GetStrucIdByName(typeStructName) == BADADDR:
idaapi.import_type(idaapi.cvar.idati, i, idaapi.idc_get_local_type_name(i))
except Exception as e:
None
markPhaseDone(phase)
def preparePredefinedStructNameToIdMap():
print "[+] Prepare Predefined Structs"
for idx, sid, name in Structs():
predefinedStructNameToIdMap[name] = sid
def hideAllStructs():
for structTuple in Structs():
set_struc_hidden(get_struc(structTuple[1]), 1)
def backResolveInPseudoCodeFunc(funcStartEA, variable, startEA):
cfunc = None
try:
cfunc = decompile(funcEA)
cfunc_treeitems = cfunc.treeitems
for item in cfunc_treeitems:
None
except Exception as e:
print 'could not decompile: %s' % (str(e), )
def getStringAtAddr(ea):
#segName = get_segm_name(ea)
#if segName.endswith("__cstring"):
return idc.GetString(ea)
def getTinfoForTypeStr(typeStr):
if None is typeStr:
return None
typeStr = typeStr.replace("const ", "")
tinfo = tinfo_t()
if getVersionNumber() >= 7.0:
parse_decl2(idaapi.cvar.idati, typeStr + ";", tinfo, 1)
else:
parse_decl2(idaapi.cvar.idati, typeStr + ";", typeStr, tinfo, 1)
if tinfo.is_well_defined():
return tinfo
else:
return None
def setTypeForFuncAtEA(funcEA, typeToSet):
orignalName = getName(funcEA)
ret = 0
if orignalName != "___cxa_pure_virtual":
forceFunction(funcEA)
ret = SetType(funcEA, typeToSet)
xref = get_first_dref_to(funcEA)
vfuncMemId = BADADDR
while xref != None and xref != BADADDR:
if is_member_id(xref):
memberName = ida_struct.get_member_name(xref)
if not memberName is None:
vfuncMemId = xref
funcType = GetType(funcEA)
if funcType != None:
''' I should also do SetType for children's vtable structs '''
funcTypeArgStartLoc = funcType.find("(")
funcPTRType = funcType[:funcTypeArgStartLoc] + "(*)" + funcType[funcTypeArgStartLoc:]
SetType(vfuncMemId, funcPTRType)
else:
xrefSegName = get_segm_name(xref)
if xrefSegName.endswith(":__got"):
keepCon_ItemAndGOTItem(funcEA, xref)
xref = get_next_dref_to(funcEA, xref)
return ret
def getAllChildFuncEAsForClass(className):
childFuncEASetList = []
vtableStartEA = 0
vtableEndEA = BADADDR
vtableStartEA, vtableEndEA = getVTableAddrOfClass(className)
if vtableStartEA != BADADDR and vtableEndEA != BADADDR :
vtableSize = vtableEndEA - vtableStartEA
classNameSet = getDescendantsForClass(className)
for vtOff in range(0, vtableSize, 8):
childFuncEASet = set()
parentFuncEA = Qword(vtableStartEA + vtOff)
for childClassName in classNameSet:
cVTStartEA, cVTEndEA = getVTableAddrOfClass(childClassName)
if cVTStartEA != BADADDR and vtOff != BADADDR and cVTStartEA + vtOff < cVTEndEA:
childFuncEA = Qword(cVTStartEA + vtOff)
if (childFuncEA != parentFuncEA):
childFuncEASet.add(childFuncEA)
childFuncEASetList.append(childFuncEASet)
return childFuncEASetList
def getChildFuncEAsForClassAtVTOff(className, vtOff, shouldResultExcludeParent=True):
childFuncEASet = set()
vtableStartEA = 0
vtableEndEA = BADADDR
if shouldResultExcludeParent:
vtableStartEA, vtableEndEA = getVTableAddrOfClass(className)
if vtableStartEA != BADADDR and vtOff != BADADDR and vtableStartEA + vtOff < vtableEndEA:
parentFuncEA = Qword(vtableStartEA + vtOff)
classNameSet = getDescendantsForClass(className)
for childClassName in classNameSet:
cVTStartEA, cVTEndEA = getVTableAddrOfClass(childClassName)
if cVTStartEA != BADADDR and vtOff != BADADDR and cVTStartEA + vtOff < cVTEndEA:
childFuncEA = Qword(cVTStartEA + vtOff)
if (not shouldResultExcludeParent) or (shouldResultExcludeParent and childFuncEA != parentFuncEA):
childFuncEASet.add(childFuncEA)
return childFuncEASet
def getParentFuncEAsForClassAtVTOff(className, vtOff):
parentFuncEASet = set()
vtableStartEA = 0
vtableEndEA = BADADDR
vtableStartEA, vtableEndEA = getVTableAddrOfClass(className)
if vtableStartEA != BADADDR and vtOff != BADADDR and vtableStartEA + vtOff < vtableEndEA:
parentFuncEA = Qword(vtableStartEA + vtOff)
classNameSet = getDescendantsForClass(className)
for childClassName in classNameSet:
cVTStartEA, cVTEndEA = getVTableAddrOfClass(childClassName)
if cVTStartEA != BADADDR and vtOff != BADADDR and cVTStartEA + vtOff < cVTEndEA:
parentFuncEA = Qword(cVTStartEA + vtOff)
if (not shouldResultExcludeParent) or (shouldResultExcludeParent and parentFuncEA != parentFuncEA):
parentFuncEASet.add(parentFuncEA)
return parentFuncEASet
def getRetTypeOfFuncAtAddr(ea):
funcType = GetType(ea)
return getRetTypeFromFuncType(funcType)
def getRetTypeFromFuncType(funcType):
if not None is funcType:
loc = funcType.find(" ")
returnType = funcType[:loc]
for i in range(loc, len(funcType)):
if funcType[i] == " ":
returnType = returnType + " "
elif funcType[i] == "*":
returnType = returnType + "*"
else:
break
return returnType
return None
returnTypeChangedFuncs = {}
def changeRetTypeOfFuncAtAddr(funcAddr, newReturnType):
segName = getSegName(funcAddr)
if not segName.endswith("__text"):
return
oldFuncTinfo = getTinfoOfFuncAtEA(funcAddr)
if None is oldFuncTinfo:
forceFunction(funcAddr)
oldFuncTinfo = getTinfoOfFuncAtEA(funcAddr)
oldReturnType = "uint64_t"
funcType = GetType(funcAddr)
if not None is oldFuncTinfo:
oldReturnTinfo = str(oldFuncTinfo.get_rettype())
if isTinfoInterested(oldReturnTinfo):
return False
else:
oldReturnType = getRetTypeFromFuncType(funcType)
#if (None is oldReturnType) or (oldReturnType != newReturnType and (not isTinfoInterested(getTinfoForTypeStr(oldReturnType)))):
if oldReturnType != newReturnType:
if not None is funcType:
funcTypeArgStartLoc = funcType.find("(")
newFuncType = newReturnType + " " + getName(funcAddr) + funcType[funcTypeArgStartLoc:]
setTypeForFuncAtEA(funcAddr, newFuncType)
#print "changeRetTypeOfFuncAtAddr 0x%016x, newType: %s"%(funcAddr, newFuncType)
return True
return False
def setTypeForMemeber(structId, memberOff, typeString):
#print "setTypeForMemeber", hex(structId), memberOff, typeString
if typeString.startswith("whole_vtable_") or typeString.startswith("vtable_") :
return
if memberOff == 0:
# Do not change vtable member
return
memberId = GetMemberId(structId, memberOff)
oldMemberName = ida_struct.get_member_name(memberId)
if oldMemberName != "vtable":
SetType(memberId, typeString)
if "*" in typeString:
typeStringPrefix = typeString[:typeString.find("*")].strip()
else:
typeStringPrefix = typeString
if None is oldMemberName:
SetOrAddMemberName(structId, memberOff, "member" + str(memberOff/8) + "_" + typeStringPrefix )
elif (not typeStringPrefix.startswith("whole_vtable_")) and \
(not typeStringPrefix.startswith("vtable_")) and \
not oldMemberName == "vtable":
memberName = "member{}_{}".format(memberOff/8, typeStringPrefix)
SetOrAddMemberName(structId, memberOff, memberName)
#elif (not typeStringPrefix.startswith("whole_vtable_")) and \
# (not typeStringPrefix.startswith("vtable_")) and \
# (not (("_" + typeStringPrefix) in oldMemberName)):
# SetOrAddMemberName(structId, memberOff, oldMemberName + "_" + typeStringPrefix )
def solveVariableTypeInAllFuncs():
phase = "solveVariableTypeInAllFuncs"
if checkPhaseDone(phase):
return
print "[+] Solve Variable Type In All Functions"
for funcEA in Functions():
solveVariableTypeInFuncAtEA(funcEA)
markPhaseDone(phase)
def solveVariableTypeInFuncsForKext(kextPrefix):
textSegName = kextPrefix + ":__text"
textSegs = getSegsByName(textSegName)
for textSeg in textSegs:
for funcEA in Functions(textSeg.startEA, textSeg.endEA):
solveVariableTypeInFuncAtEA(funcEA)
def findAssignTargetOfExprResult(cfunc, currentItem):
currentItem = cfunc.body.find_parent_of(currentItem).to_specific_type
if currentItem.op == 48: #cast:
currentItem = cfunc.body.find_parent_of(currentItem).to_specific_type
if currentItem.op == 80: #return
return currentItem.to_specific_type
elif currentItem.op == 2: # asg
assignTargetItem = currentItem.x.to_specific_type
if assignTargetItem.op == 65 or assignTargetItem.op == 60: # var or memptr
return assignTargetItem
elif assignTargetItem.op == 57 and assignTargetItem.x.op == 68 and len(assignTargetItem.a) == 1 and (assignTargetItem.a[0].op == 60 or assignTargetItem.a[0].op == 65): # 68 stands for helper functions, e.g., BYTE(), LDWORD(), HDWORD()...
return assignTargetItem.a[0]
return None
def isAddrInTextSeg(funcStartEA):
funcSegName = get_segm_name(funcStartEA)
if funcSegName is None:
return False
return funcSegName.endswith("__text")
def isAddrInUNDEFSeg(funcEA):
funcSegName = get_segm_name(funcStartEA)
if funcSegName is None:
return False
return funcSegName.endswith("UNDEF")
def isFuncVirtual(funcStartEA):
xref = get_first_dref_to(funcStartEA)
while xref != None and xref != BADADDR:
member = get_member_by_id(xref)
if member:
return True
xref = get_next_dref_to(funcStartEA, xref)
return False
#return funcStartEA in virtualFuncEASet
def propagateTypeInBB(bbStartEA, varTypes):
None
def solveVariableTypeInFuncAtEA(funcStartEA):
nullDevFile = open("/dev/null", "w")
cfunc = None
#print "[-] Solve Variable Type In Func At EA 0x%016x"%(funcStartEA)
try:
if isAddrInTextSeg(funcStartEA):
cfunc = idaapi.decompile(funcStartEA)
cfunc.build_c_tree()
nullDevFile.write(str(cfunc))
#print cfunc
solveVariableTypeInFunc(cfunc)
funcEA = cfunc.entry_ea
if funcEA in AllLvarTypesToModify:
lvarTypesToModify = AllLvarTypesToModify[funcEA]
print lvarTypesToModify
for t in lvarTypesToModify:
print t[0].name, t[1].dstr()
saveLvarTinfoListInFuncAtEA(funcEA, lvarTypesToModify)
except Exception as e:
print 'could not solveVariableTypeInFuncAtEA 0x%016x: %s' % (funcStartEA, str(e), )
traceback.print_exc()
nullDevFile.close()
def solveVariableTypeInFunc(cfunc):
""" indeed, this function can only set members' type and function's return type according to their assignments in functions,
since cfunc comes from decompile(funcAddr), which can not affect the original disassemble code
"""
solveVariableTypeByArgumentsInFunc(cfunc)
solveVariableTypeByCallsInFunc(cfunc)
def solveVariableTypeByArgumentsInFunc(cfunc):
arguments = cfunc.arguments
for argLvar in arguments:
propagateTypeInfuncByNameFromIndex(cfunc, argLvar.name, argLvar.tif, 1)
def findLvarNearestAssignWithMetaClass(cfunc, fromExpr, LvarIdx):
# This is not acurate
treeItems = cfunc.treeitems
fromItemIndex = fromExpr.index
fromItem = treeItems[fromItemIndex]
fromEA = fromItem.ea
for itemIndex in range(fromItemIndex-1, -1, -1):
currentItem = treeItems[itemIndex].to_specific_type
if currentItem.op == 2: # asg
assignTarget = None
assignTargetItem = currentItem.x.to_specific_type
if assignTargetItem.op == 65 or assignTargetItem.op == 60: # var or memptr
assignTarget = assignTargetItem
elif assignTargetItem.op == 57 and assignTargetItem.x.op == 68 and len(assignTargetItem.a) == 1 and (assignTargetItem.a[0].op == 60 or assignTargetItem.a[0].op == 65): # 68 stands for helper functions, e.g., BYTE(), LDWORD(), HDWORD()...
assignTarget = assignTargetItem.a[0]
if not assignTarget is None and assignTarget.op == 65: # var
assignTargetLVarIdx = assignTarget.v.idx
if assignTargetLVarIdx == LvarIdx:
assignSource = currentItem.y.to_specific_type
if assignSource.op == 51: # ptr, *
assignSource = assignSource.x.to_specific_type
if assignSource.op == 48: # cast, (X)
assignSource = assignSource.x.to_specific_type
if assignSource.op == 52: #ref, &
assignSource = assignSource.x.to_specific_type
if assignSource.op == 64: #obj
classString = None
# found cast metaClass obj. for now, only consider const metaClass obj, not lvar
metaClassDemangledName = getDeNameAtEA(assignSource.obj_ea)
if not None is metaClassDemangledName:
if metaClassDemangledName.endswith("::gMetaClass"):
classString = metaClassDemangledName[:-len("::gMetaClass")]
elif metaClassDemangledName.endswith("::metaClass"):
classString = metaClassDemangledName[:-len("::metaClass")]
if classString != None:
return classString
return None
def findLvarNearestAheadAssignSource(cfunc, fromExpr, lvarIdx):
# This is not acurate
treeItems = cfunc.treeitems
fromItemIndex = fromExpr.index
fromItem = treeItems[fromItemIndex]
fromEA = fromItem.ea
for itemIndex in range(fromItemIndex-1, -1, -1):
currentItem = treeItems[itemIndex].to_specific_type
if currentItem.op == 2: # asg
assignTarget = None
assignTargetItem = currentItem.x.to_specific_type
if assignTargetItem.op == 65 or assignTargetItem.op == 60: # var or memptr
assignTarget = assignTargetItem | |
os.path.splitext(path)
if ext in ['.xml', '.cdml']:
if mode != 'r':
raise ModeNotSupported(mode)
datanode = load(path)
else:
# If the doesn't exist allow it to be created
# Ok mpi has issues with bellow we need to test this only with 1
# rank
if mode == "r" and not os.path.exists(path):
raise FileNotFoundError(path)
elif mode == "w":
try:
os.remove(path)
except BaseException:
pass
return CdmsFile(path, mode, mpiBarrier=CdMpi)
if libcf is not None:
file = CdmsFile(path, mode, hostObj)
if hasattr(file, libcf.CF_FILETYPE):
if getattr(file, libcf.CF_FILETYPE) == libcf.CF_GLATT_FILETYPE_HOST:
file.close()
file = gsHost.open(path, mode)
return file
else:
return CdmsFile(path, mode)
elif scheme in ['http', 'gridftp', 'https']:
if (dods):
if mode != 'r':
raise ModeNotSupported(mode)
# DODS file?
try:
file = CdmsFile(uri, mode)
return file
except Exception:
msg = "Error in DODS open of: " + uri
if os.path.exists(os.path.join(
os.path.expanduser("~"), ".dodsrc")):
msg += "\nYou have a .dodsrc in your HOME directory, try to remove it"
raise CDMSError(msg)
else:
try:
datanode = loadURI(uri)
return datanode
except BaseException:
datanode = loadURI(uri)
raise CDMSError("Error in loadURI of: " + uri)
else:
raise SchemeNotSupported(scheme)
# Determine dpath, the absolute path to data files:
# dpath =
# (1) head + node.directory, if .directory is relative
# (2) node.directory, if absolute
# (3) head, if no directory entry found (assume XML file is
# at top level of data directory)
#
# Note: In general, dset.datapath is relative to the URL of the
# enclosing database, but here the database is null, so the
# datapath should be absolute.
if dpath is None:
direc = datanode.getExternalAttr('directory')
head = os.path.dirname(path)
if direc and (os.path.isabs(direc) or urlparse(direc).scheme != ''):
dpath = direc
elif direc:
dpath = os.path.join(head, direc)
else:
dpath = head
dataset = Dataset(uri, mode, datanode, None, dpath)
return dataset
# Functions for parsing the file map.
def parselist(text, f):
"""Parse a string of the form [A, A, ...].
Parameters
----------
text : Input String.
f : function which parses A and returns (A, nconsumed).
Returns
-------
Parser results.
n number of matches.
"""
n = 0
m = _ListStart.match(text)
if m is None:
raise CDMSError("Parsing cdms_filemap near " + text[0:_NPRINT])
result = []
n += m.end()
s, nconsume = f(text[n:])
result.append(s)
n += nconsume
while True:
m = _ListSep.match(text[n:])
if m is None:
break
else:
n += m.end()
s, nconsume = f(text[n:])
result.append(s)
n += nconsume
m = _ListEnd.match(text[n:])
if m is None:
raise CDMSError("Parsing cdms_filemap near " + text[n:n + _NPRINT])
n += m.end()
return result, n
def parseIndexList(text):
"""Parse a string of the form [i,j,k,l,...,path].
Parameters
----------
text : i,j,k,l,... are indices or '-', and path is a filename. Coerce the indices to integers.
Returns
-------
Parser results.
n number of matches.
"""
m = _IndexList4.match(text)
nindices = 4
if m is None:
m = _IndexList5.match(text)
nindices = 5
if m is None:
raise CDMSError("Parsing cdms_filemap near " + text[0:_NPRINT])
result = [None] * (nindices + 1)
for i in range(nindices):
s = m.group(i + 1)
if s != '-':
result[i] = int(s)
result[nindices] = m.group(nindices + 1)
return result, m.end()
def parseName(text):
m = _Name.match(text)
if m is None:
raise CDMSError("Parsing cdms_filemap near " + text[0:_NPRINT])
return m.group(), m.end()
def parseVarMap(text):
"""Parse a string of the form [ namelist, slicelist ]"""
n = 0
m = _ListStart.match(text)
if m is None:
raise CDMSError("Parsing cdms_filemap near " + text[0:_NPRINT])
result = []
n += m.end()
s, nconsume = parselist(text[n:], parseName)
result.append(s)
n += nconsume
m = _ListSep.match(text[n:])
if m is None:
raise CDMSError("Parsing cdms_filemap near " + text[n:n + _NPRINT])
n += m.end()
s, nconsume = parselist(text[n:], parseIndexList)
result.append(s)
n += nconsume
m = _ListEnd.match(text[n:])
if m is None:
raise CDMSError("Parsing cdms_filemap near " + text[n:n + _NPRINT])
n += m.end()
return result, n
def parseFileMap(text):
"""Parse a CDMS filemap.
Parameters
----------
filemap : list [ varmap, varmap, ...]
varmap : list [ namelist, slicelist ]
namelist : list [name, name, ...]
slicelist : list [indexlist, indexlist, ,,,]
indexlist : list [i,j,k,l,path]
Returns
-------
Parsing results.
"""
result, n = parselist(text, parseVarMap)
if n < len(text):
raise CDMSError("Parsing cdms_filemap near " + text[n:n + _NPRINT])
return result
# A CDMS dataset consists of a CDML/XML file and one or more data files
try:
from .cudsinterface import cuDataset
except BaseException:
pass
class Dataset(CdmsObj, cuDataset):
def __init__(self, uri, mode, datasetNode=None,
parent=None, datapath=None):
if datasetNode is not None and datasetNode.tag != 'dataset':
raise CDMSError('Node is not a dataset node')
CdmsObj.__init__(self, datasetNode)
for v in ['datapath',
'variables',
'axes',
'grids',
'xlinks',
'dictdict',
'default_variable_name',
'parent',
'uri',
'mode']:
if v not in self.__cdms_internals__:
val = self.__cdms_internals__ + [v, ]
self.___cdms_internals__ = val
cuDataset.__init__(self)
self.parent = parent
self.uri = uri
self.mode = mode
# Path of data files relative to parent db.
# Note: .directory is the location of data relative to the location of
# the XML file
self.datapath = datapath
self.variables = {}
self.axes = {}
self.grids = {}
self.xlinks = {}
self._gridmap_ = {}
# Gridmap:(latname,lonname,order,maskname,gridclass) => grid
(scheme, netloc, xmlpath, parameters,
query, fragment) = urlparse(uri)
self._xmlpath_ = xmlpath
# Dictionary of dictionaries, keyed on node tags
self.dictdict = {'variable': self.variables,
'axis': self.axes,
'rectGrid': self.grids,
'curveGrid': self.grids,
'genericGrid': self.grids,
'xlink': self.xlinks
}
# Dataset IDs are external, so may not have been defined yet.
if not hasattr(self, 'id'):
self.id = '<None>'
self._status_ = 'open'
self._convention_ = convention.getDatasetConvention(self)
# Collect named children (having attribute 'id') into dictionaries
if datasetNode is not None:
coordsaux = self._convention_.getDsetnodeAuxAxisIds(datasetNode)
for node in list(datasetNode.getIdDict().values()):
if node.tag == 'variable':
if node.id in coordsaux:
if node.getDomain().getChildCount() == 1:
obj = DatasetAuxAxis1D(self, node.id, node)
else:
obj = DatasetAxis2D(self, node.id, node)
else:
obj = DatasetVariable(self, node.id, node)
self.variables[node.id] = obj
elif node.tag == 'axis':
obj = Axis(self, node)
self.axes[node.id] = obj
elif node.tag == 'rectGrid':
obj = RectGrid(self, node)
self.grids[node.id] = obj
# elif node.tag == 'xlink':
# obj = Xlink(node)
# self.xlinks[node.id] = obj
else:
dict = self.dictdict.get(node.tag)
if dict is not None:
dict[node.id] = node
else:
self.dictdict[node.tag] = {node.id: node}
# Initialize grid domains
for grid in list(self.grids.values()):
grid.initDomain(self.axes, self.variables)
latname = grid.getLatitude().id
lonname = grid.getLongitude().id
mask = grid.getMaskVar()
if mask is None:
maskname = ""
else:
maskname = mask.id
self._gridmap_[
(latname, lonname, grid.getOrder(), maskname)] = grid
# Initialize variable domains.
for var in list(self.variables.values()):
var.initDomain(self.axes, self.grids)
for var in list(self.variables.values()):
# Get grid information for the variable. gridkey has the form
# (latname,lonname,order,maskname,abstract_class).
gridkey, lat, lon = var.generateGridkey(
self._convention_, self.variables)
# If the variable is gridded, lookup the grid. If no such grid exists,
# create a unique gridname, create the grid, and add to the
# gridmap.
if gridkey is None:
grid = None
else:
grid = self._gridmap_.get(gridkey)
if grid is None:
if hasattr(var, 'grid_type'):
gridtype = var.grid_type
else:
gridtype = "generic"
candidateBasename = None
if gridkey[4] == 'rectGrid':
gridshape = (len(lat), len(lon))
elif gridkey[4] == 'curveGrid':
gridshape = lat.shape
elif gridkey[4] == 'genericGrid':
gridshape = lat.shape
candidateBasename = 'grid_%d' % gridshape
else:
gridshape = (len(lat), len(lon))
if candidateBasename is None:
candidateBasename = 'grid_%dx%d' % gridshape
if candidateBasename not in self.grids:
gridname = candidateBasename
else:
foundname = 0
for i in range(97, 123): # Lower-case letters
candidateName = candidateBasename + \
'_' + chr(i)
if candidateName not in self.grids:
gridname = candidateName
foundname = 1
break
if not foundname:
print(
'Warning: cannot generate a grid for variable', var.id)
continue
# Create the grid
if gridkey[4] == 'rectGrid':
node = cdmsNode.RectGridNode(
gridname, lat.id, lon.id, gridtype, gridkey[2])
grid = RectGrid(self, node)
grid.initDomain(self.axes, self.variables)
elif gridkey[4] == 'curveGrid':
grid = DatasetCurveGrid(lat, lon, gridname, self)
else:
grid = DatasetGenericGrid(lat, lon, gridname, self)
self.grids[grid.id] = grid
self._gridmap_[gridkey] = grid
# Set the variable grid
var.setGrid(grid)
# Attach boundary variables
for name in coordsaux:
var = self.variables[name]
bounds = self._convention_.getVariableBounds(self, var)
var.setBounds(bounds)
# Create the internal filemap, if attribute 'cdms_filemap' is present.
# _filemap_ is a dictionary, mapping (varname, timestart, levstart) => path
#
# Also, for each partitioned variable, | |
#!/usr/bin/python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import re
import sys
from distutils.version import LooseVersion
from refresh_source import Refresher
from spinnaker.run import run_quick
class CommitTag:
"""Provides a model class to capture the output of 'git show-ref --tags'.
We also capture the tag versions using `distutils.version` for easy semantic
version comparison for sorting.
"""
def __init__(self, ref_line):
# ref_line is in the form "$commit_hash refs/tags/$tag"
tokens = ref_line.split(' ')
self.__hash = tokens[0]
tag_parts = tokens[1].split('/')
self.__tag = tag_parts[len(tag_parts) - 1]
self.__version = LooseVersion(self.__tag)
def __repr__(self):
return 'hash: %s, tag: %s, version: %s' % (self.__hash, self.__tag, self.__version)
@property
def hash(self):
return self.__hash
@property
def tag(self):
return self.__tag
@property
def version(self):
return self.__version
class CommitMessage:
"""Provides a model class to capture the output of 'git log --pretty'.
"""
def __init__(self, hash, msg):
self.__hash = hash
self.__msg = msg
def __repr__(self):
return 'hash: %s, message: %s' % (self.__hash, self.__msg)
@property
def hash(self):
return self.__hash
@property
def msg(self):
return self.__msg
class VersionBump:
"""Provides a model for a semantic version bump.
"""
def __init__(self, version_str, commit_hash, major=False, minor=False, patch=False):
self.__version_str = version_str
self.__commit_hash = commit_hash
self.__major = major
self.__minor = minor
self.__patch = patch
def __repr__(self):
return ('version_str: {}, commit_hash: {}, major: {}, minor: {}, patch: {}'
.format(self.version_str,
self.commit_hash,
self.major,
self.minor,
self.patch))
def __eq__(self, other):
return (self.version_str == other.version_str
and self.commit_hash == other.commit_hash
and self.major == other.major
and self.minor == other.minor
and self.patch == other.patch)
@property
def commit_hash(self):
return self.__commit_hash
@property
def version_str(self):
return self.__version_str
@property
def major(self):
return self.__major
@property
def minor(self):
return self.__minor
@property
def patch(self):
return self.__patch
class GitTagMissingException(Exception):
"""Exception for misconfigured git tags in the operating repository."""
def __init__(self, message):
self.message = message
class Annotator(object):
"""Provides semantic version tagging for Spinnaker repositories.
Each Spinnaker repository has tags that denote releases. These tags follow
semantic versioning. At the present time, there are two sets of tags in use
for the Spinnaker repositories: 'vX.Y.Z' for Netflix releases and 'version-X.Y.Z-$build'
for Spinnaker product releases. This class handles annotations of the
'version-X.Y.Z-$build' pattern.
This class provides support for resolving semantic version tags
based on commit messages and annotating local source trees with the
tagging information. It is assumed that the commit messages follow
conventional-changelog commit message conventions. This class also provides
support for creating release branches and pushing to and pulling from remote
repositories through extending the Refresher class.
"""
# regex for 'version-X.Y.Z' versions
TAG_MATCHER = re.compile('^version-[0-9]+\.[0-9]+\.[0-9]+$')
def __init__(self, options, path=None, next_tag=None):
self.__next_tag = next_tag or options.next_tag
self.__path = path or options.path
self.__branch = options.branch
self.__build_number = options.build_number or os.environ.get('BUILD_NUMBER', '0')
self.__force_rebuild = options.force_rebuild
self.__tags_to_delete = []
self.__filtered_tags = []
self.__current_version = None
@property
def build_number(self):
return self.__build_number
@property
def current_version(self):
return self.__current_version
@property
def branch(self):
return self.__branch
@branch.setter
def branch(self, branch):
self.__branch = branch
@property
def path(self):
return self.__path
@path.setter
def path(self, path):
self.__path = path
def __partition_tags_on_pattern(self):
"""Partitions the tags into two lists based on TAG_MATCHER.
One of the lists of tags will be deleted locally (self.__tags_to_delete) so
gradle will use our tag version as the package version during the
build/publish task.
One of the lists will be used to determine the next semantic version
for out tag pattern (self.__filtered_tags).
"""
tag_ref_result = run_quick('git -C {path} show-ref --tags'
.format(path=self.path),
echo=False)
ref_lines = tag_ref_result.stdout.strip().split('\n')
hash_tags = [CommitTag(s) for s in ref_lines]
self.__filtered_tags = [ht for ht in hash_tags if self.TAG_MATCHER.match(ht.tag)]
self.__tags_to_delete = [ht for ht in hash_tags if not self.TAG_MATCHER.match(ht.tag)]
def parse_git_tree(self):
self.__partition_tags_on_pattern()
self.__determine_current_version()
def tag_head(self):
"""Tags the current branch's HEAD with the next semver tag.
Returns:
[VersionBump]: The version bump used to tag the git repository, or None
if the tagging fails.
"""
if self.__is_head_current():
# We manually specified a tag and want to override with that one.
if self.__next_tag:
self.__tag_head_with_build(self.__next_tag)
run_quick('git -C {path} tag {next_tag} HEAD'
.format(path=self.path, next_tag=self.__next_tag))
return VersionBump(self.__next_tag, self.get_head_commit())
# We didn't manually specify, but want to force a rebuild of the old tag.
elif self.__force_rebuild:
self.__tag_head_with_build(self.__current_version.tag)
run_quick('git -C {path} tag {next_tag} HEAD'
.format(path=self.path, next_tag=self.__current_version.tag))
return VersionBump(self.__current_version.tag, self.get_head_commit(), patch=True)
# Else fail.
else:
logging.warn("There is already a tag of the form 'version-X.Y.Z' at HEAD. Not forcing rebuild.")
return None
else:
version_bump = self.determine_new_tag()
# This tag is for logical identification for developers. This will be pushed
# to the upstream git repository if we choose to use this version in a
# formal Spinnaker product release.
run_quick('git -C {path} tag {next_tag} HEAD'
.format(path=self.path, next_tag=version_bump.version_str))
self.__tag_head_with_build(version_bump.version_str)
return version_bump
def __tag_head_with_build(self, version_bump_tag):
"""Tags the current branch's HEAD with the next semver gradle build tag.
Args:
version_bump_tag [String]: Semver string to add as a gradle build tag.
"""
next_tag_with_build = '{0}-{1}'.format(version_bump_tag,
self.build_number)
# This tag is for gradle to use as the package version. It incorporates the
# build number for uniqueness when publishing. This tag is of the form
# 'X.Y.Z-$build_number' for gradle to use correctly. This is not pushed
# to the upstream git repository.
first_dash_idx = next_tag_with_build.index('-')
gradle_version = next_tag_with_build[first_dash_idx + 1:]
run_quick('git -C {path} tag {next_tag} HEAD'
.format(path=self.path, next_tag=gradle_version))
def delete_unwanted_tags(self):
"""Locally deletes tags that don't match TAG_MATCHER.
This is so that gradle will use the latest resolved semantic version from
our tag pattern when it builds the package.
"""
print ('Deleting {0} unwanted git tags locally from {1}'
.format(len(self.__tags_to_delete), self.path))
for bad_hash_tag in self.__tags_to_delete:
run_quick('git -C {path} tag -d {tag}'
.format(path=self.path, tag=bad_hash_tag.tag), echo=False)
def checkout_branch(self):
"""Checks out a branch.
"""
run_quick('git -C {path} checkout {branch}'.format(path=self.path,
branch=self.branch))
def get_head_commit(self):
"""Retrieves the head commit hash.
"""
head_commit_res = run_quick('git -C {path} rev-parse HEAD'
.format(path=self.path),
echo=False)
return head_commit_res.stdout.strip()
def __is_head_current(self):
"""Checks if the current version is at HEAD.
Returns:
[Boolean]: True if the current version tag is on HEAD, else False.
"""
head_commit = self.get_head_commit()
return self.__current_version.hash == head_commit
def __determine_current_version(self):
"""Determines and stores the current (latest) semantic version from
'version-X.Y.Z' tags.
"""
sorted_filtered_tags = sorted(self.__filtered_tags,
key=lambda ht: ht.version, reverse=True)
if len(sorted_filtered_tags) == 0:
raise GitTagMissingException("No version tags of the form 'version-X.Y.Z'.")
self.__current_version = sorted_filtered_tags[0]
def determine_new_tag(self):
"""Determines the next semver tag for the repository at the path.
If the commit at HEAD is already tagged with a tag matching --tag_regex_str,
this function is a no-op. Otherwise it determines the semantic version bump
for the commits since the last tag matching 'version-X.Y.Z' and suggests a new tag
based on the commit messages. This suggestion can be overridden with
--next_tag, which will be used if there are any commits after the last
semver tag matching 'version-X.Y.Z'.
Returns:
[VersionBump]: Next semantic version tag to be used, along with what type
of version bump it was. Version tag is of the form 'version-X.Y.Z'.
"""
if self.__next_tag:
return VersionBump(self.__next_tag, self.get_head_commit())
# 'git log' entries of the form '$hash $commit_title'
log_onelines = run_quick('git -C {path} log --pretty=oneline'.format(path=self.path),
echo=False).stdout.strip().split('\n')
commit_hashes = [line.split(' ')[0].strip() for line in log_onelines]
# Full commit messages, including bodies for finding 'BREAKING CHANGE:'.
msgs = [
run_quick('git -C {path} log -n 1 --pretty=medium {hash}'.format(path=self.path, hash=h),
echo=False).stdout.strip() for h in commit_hashes
]
if len(commit_hashes) != len(msgs):
raise IOError('Git commit hash list and commit message list are unequal sizes.')
return self.bump_semver(self.__current_version, commit_hashes, msgs)
def bump_semver(self, curr_version, commit_hashes, commit_msgs):
"""Determines the semver version bump based on commit messages in 'git log'.
Uses 'conventional-changelog' format to search for features and breaking
changes.
Args:
curr_version [CommitTag]: Latest 'version-X.Y.Z' tag/commit hash pair
calcluated by semver sort.
commit_hashes [String list]: List of ordered commit hashes.
commit_msgs [String list]: List of ordered, full commit messages.
Returns:
[VersionBump]: Next semantic | |
if 27 - 27: OoOoOO00 * OoO0O00 * OOooOOo % I1IiiI * o0oOOo0O0Ooo + I1ii11iIi11i
ooOOOo0o0oo = I11IiI1ii . lookup_source_cache ( self . eid , True )
if ( ooOOOo0o0oo == None ) : return
if 73 - 73: i1IIi
if ( I11IiI1ii . source_cache == None ) : return
if 52 - 52: IiII / i11iIiiIii * O0
I11IiI1ii . source_cache . delete_cache ( self . eid )
if ( I11IiI1ii . source_cache . cache_size ( ) == 0 ) :
lisp_sites_by_eid . delete_cache ( self . group )
if 67 - 67: OOooOOo / I11i - I1Ii111 % i11iIiiIii
if 3 - 3: oO0o + iII111i + OOooOOo
if 54 - 54: i11iIiiIii + OoO0O00 - IiII - iII111i / I11i
if 85 - 85: OOooOOo * OOooOOo * I1Ii111 - ooOoO0o . O0 % iII111i
def add_source_entry ( self , source_se ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_se . eid , source_se )
if 5 - 5: i1IIi * iII111i . o0oOOo0O0Ooo - I1ii11iIi11i
if 84 - 84: i1IIi
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 17 - 17: IiII + iII111i * OoO0O00 / iII111i
if 67 - 67: i1IIi * IiII . OoOoOO00 % iIii1I11I1II1 - iIii1I11I1II1 * I1ii11iIi11i
def is_star_g ( self ) :
if ( self . group . is_null ( ) ) : return ( False )
return ( self . eid . is_exact_match ( self . group ) )
if 96 - 96: iII111i / i11iIiiIii / oO0o + Oo0Ooo
if 65 - 65: OoOoOO00
def eid_record_matches ( self , eid_record ) :
if ( self . eid . is_exact_match ( eid_record . eid ) == False ) : return ( False )
if ( eid_record . group . is_null ( ) ) : return ( True )
return ( eid_record . group . is_exact_match ( self . group ) )
if 87 - 87: I11i % i1IIi + i11iIiiIii * II111iiii
if 58 - 58: OoO0O00 * I1IiiI - II111iiii / Ii1I - I1IiiI % OoooooooOO
def inherit_from_ams_parent ( self ) :
iiiIIIII1iIi = self . parent_for_more_specifics
if ( iiiIIIII1iIi == None ) : return
self . force_proxy_reply = iiiIIIII1iIi . force_proxy_reply
self . force_nat_proxy_reply = iiiIIIII1iIi . force_nat_proxy_reply
self . force_ttl = iiiIIIII1iIi . force_ttl
self . pitr_proxy_reply_drop = iiiIIIII1iIi . pitr_proxy_reply_drop
self . proxy_reply_action = iiiIIIII1iIi . proxy_reply_action
self . echo_nonce_capable = iiiIIIII1iIi . echo_nonce_capable
self . policy = iiiIIIII1iIi . policy
self . require_signature = iiiIIIII1iIi . require_signature
if 33 - 33: IiII / i1IIi + I1Ii111
if 5 - 5: O0 / iII111i % II111iiii . Oo0Ooo - I11i
def rtrs_in_rloc_set ( self ) :
for IIiO0Ooo in self . registered_rlocs :
if ( IIiO0Ooo . is_rtr ( ) ) : return ( True )
if 84 - 84: oO0o * iII111i % i11iIiiIii - O0 . iIii1I11I1II1 - OoOoOO00
return ( False )
if 73 - 73: OoOoOO00
if 66 - 66: Oo0Ooo
def is_rtr_in_rloc_set ( self , rtr_rloc ) :
for IIiO0Ooo in self . registered_rlocs :
if ( IIiO0Ooo . rloc . is_exact_match ( rtr_rloc ) == False ) : continue
if ( IIiO0Ooo . is_rtr ( ) ) : return ( True )
if 42 - 42: i11iIiiIii / II111iiii . OOooOOo
return ( False )
if 65 - 65: OoOoOO00 % II111iiii + Oo0Ooo
if 24 - 24: OoO0O00 % OoooooooOO
def is_rloc_in_rloc_set ( self , rloc ) :
for IIiO0Ooo in self . registered_rlocs :
if ( IIiO0Ooo . rle ) :
for iiiI1i1111II in IIiO0Ooo . rle . rle_nodes :
if ( iiiI1i1111II . address . is_exact_match ( rloc ) ) : return ( True )
if 16 - 16: OoOoOO00 % Oo0Ooo * OoOoOO00 . Ii1I
if 91 - 91: I1Ii111 - OoooooooOO . i1IIi . I1ii11iIi11i
if ( IIiO0Ooo . rloc . is_exact_match ( rloc ) ) : return ( True )
if 37 - 37: IiII - oO0o
return ( False )
if 92 - 92: I1IiiI
if 51 - 51: OoO0O00 + Oo0Ooo - OOooOOo + I1ii11iIi11i
def do_rloc_sets_match ( self , prev_rloc_set ) :
if ( len ( self . registered_rlocs ) != len ( prev_rloc_set ) ) : return ( False )
if 32 - 32: I1ii11iIi11i % OoOoOO00 + Oo0Ooo
for IIiO0Ooo in prev_rloc_set :
I1I1ii1 = IIiO0Ooo . rloc
if ( self . is_rloc_in_rloc_set ( I1I1ii1 ) == False ) : return ( False )
if 92 - 92: II111iiii . O0 . iIii1I11I1II1 % IiII - i11iIiiIii
return ( True )
if 9 - 9: OoO0O00
if 60 - 60: O0 / OoOoOO00 % i11iIiiIii % II111iiii / OoooooooOO
if 52 - 52: ooOoO0o
class lisp_mr ( ) :
def __init__ ( self , addr_str , dns_name , mr_name ) :
self . mr_name = mr_name if ( mr_name != None ) else "all"
self . dns_name = dns_name
self . map_resolver = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . last_dns_resolve = None
self . a_record_index = 0
if ( addr_str ) :
self . map_resolver . store_address ( addr_str )
self . insert_mr ( )
else :
self . resolve_dns_name ( )
if 100 - 100: Oo0Ooo - o0oOOo0O0Ooo + iIii1I11I1II1 / ooOoO0o % iIii1I11I1II1
self . last_used = 0
self . last_reply = 0
self . last_nonce = 0
self . map_requests_sent = 0
self . neg_map_replies_received = 0
self . total_rtt = 0
if 4 - 4: OoOoOO00 / Oo0Ooo - OoO0O00 . OoOoOO00 / I1Ii111
if 60 - 60: OOooOOo * I1Ii111
def resolve_dns_name ( self ) :
if ( self . dns_name == None ) : return
if ( self . last_dns_resolve and
time . time ( ) - self . last_dns_resolve < 30 ) : return
if 17 - 17: iII111i * I11i / iIii1I11I1II1 - II111iiii
try :
iI1 = socket . gethostbyname_ex ( self . dns_name )
self . last_dns_resolve = lisp_get_timestamp ( )
ooOoooOo00Ooo = iI1 [ 2 ]
except :
return
if 95 - 95: I11i . IiII
if 5 - 5: OoooooooOO + I1IiiI % OOooOOo + ooOoO0o . o0oOOo0O0Ooo * i11iIiiIii
if 43 - 43: I1IiiI - oO0o + OOooOOo * OoooooooOO
if 92 - 92: i11iIiiIii / II111iiii * OoO0O00
if 51 - 51: I1ii11iIi11i
if 95 - 95: I1IiiI / iII111i + i1IIi
if ( len ( ooOoooOo00Ooo ) <= self . a_record_index ) :
self . delete_mr ( )
return
if 31 - 31: OoOoOO00
if 37 - 37: iIii1I11I1II1 % IiII / i11iIiiIii - oO0o
o0o0O00 = ooOoooOo00Ooo [ self . a_record_index ]
if ( o0o0O00 != self . map_resolver . print_address_no_iid ( ) ) :
self . delete_mr ( )
self . map_resolver . store_address ( o0o0O00 )
self . insert_mr ( )
if 43 - 43: II111iiii - OoooooooOO
if 11 - 11: I1IiiI
if 76 - 76: iII111i - II111iiii % Oo0Ooo . I1Ii111
if 64 - 64: OoO0O00 - OoO0O00
if 93 - 93: Oo0Ooo . O0
if 75 - 75: iII111i * II111iiii - I1IiiI
if ( lisp_is_decent_dns_suffix ( self . dns_name ) == False ) : return
if ( self . a_record_index != 0 ) : return
if 30 - 30: i1IIi / ooOoO0o . ooOoO0o
for o0o0O00 in ooOoooOo00Ooo [ 1 : : ] :
oOO0oo = lisp_address ( LISP_AFI_NONE , o0o0O00 , 0 , 0 )
IIiIII1IIi = lisp_get_map_resolver ( oOO0oo , None )
| |
<reponame>caser789/libcache<gh_stars>0
import pickle
import six
from .base import Base
from . import _to_native
from . import _DEFAULT_SOCKET_TIMEOUT
from . import _DEFAULT_TIMEOUT
from . import _TCP_KEEP_ALIVE_OPTIONS
class Redis(Base):
"""Uses the Redis key-value store as a cache backend
:param host: address of Redis server
:param port: port number of Redis server
:param unix_socket_path: unix socket file path
:param password: password authentication for the Redis server
:param db: db (zero-based numeric index) on Redis server to connect
:param timeout: default timeout
:param prefix: A prefix added to all keys
Any additional keyword arguments will be passwed to ``redis.Redis``
"""
def __init__(self, host, port, unix_socket_path=None, password=<PASSWORD>, db=0, timeout=None, prefix='', default_scan_count=1000, **kw):
Base.__init__(timeout)
self.prefix = prefix
self._default_scan_count = default_scan_count
try:
import redis
except ImportError:
raise RuntimeError('no redis module found')
kwargs = dict(
host=host,
port=port,
unix_socket_path=unix_socket_path,
password=password,
db=db,
)
if 'socket_timeout' not in kwargs:
kwargs['socket_timeout'] = _DEFAULT_SOCKET_TIMEOUT
if 'socket_connect_timeout' not in kwargs:
kwargs['socket_connect_timeout'] = _DEFAULT_SOCKET_TIMEOUT
if 'socket_keepalive' not in kwargs:
kwargs['socket_keepalive'] = 1
if 'socket_keepalive_options' not in kwargs:
kwargs['socket_keepalive_options'] = _TCP_KEEP_ALIVE_OPTIONS
self._client = redis.Redis(**kwargs)
def _normalize_key(self, key):
key = _to_native(key, 'utf-8')
if self.prefix:
key = self.prefix + key
return key
def dumps(self, value):
"""Dumps an object into a string for redis. By default it serialized
integers as regular string and pickle dumps everyting else.
"""
if type(value) in six.integer_types: # pylint: disable=unidiomatic-typecheck
return str(value).encode('ascii')
return b'!' + pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
def loads(self, value):
"""The reversal of :meth:`dumps`. This might be called with None
"""
if value is None:
return None
if value.startswith(b'!'):
try:
return pickle.loads(value[1:])
except pickle.PickleError:
return None
try:
return int(value)
except ValueError:
return value
def get(self, key):
"""Look up the `key` in cache and return the value of it.
:param key: the `key` to be looked up
:returns: the value if it exists and is readable, else ``None``.
TODO: support __get__
"""
return self.loads(self._client.get(self._normalize_key(key)))
def get_values(self, *keys):
"""Get valeus by keys
foo, bar = cache.get_values('foo', 'bar')
Share same error handling with :meth:`get`
:param keys: the function acception multiple keys as positional arguments
"""
keys = [self._normalize_key(key) for key in keys]
return [self.loads(x) for x in self._client.mget(keys)]
def get_key_to_value(self, *keys):
"""Like :meth:`get_values` but return a dict::
if the given key is missing, it will be missing from the response dict.
d = cache.get_key_to_value('foo', 'bar')
foo = d['foo']
bar = d['bar']
:param keys: The function accepts multiple keys as positional arguments
"""
query_keys = [self._normalize_key(key) for key in keys]
values = self._client.mget(query_keys)
res = {}
for i in range(len(keys)):
value = values[i]
if value is not None:
res[keys[i]] = self.loads(value)
return res
def set(self, key, value, timeout=None, noreply=False):
"""Add a new key/value to the cache (overwrite value, if key exists)
:param key: the key to set
:param value: the value of the key
:param timeout: the cache timeout for the key.
If not specificed, use the default timeout.
If specified 0, key will never expire
:param noreply: instructs the server to not reply
:returns: Whether the key existed and has been set
:rtype: boolean
TODO: __set__
"""
if timeout is None:
timeout = self.default_timeout
value = self.dumps(value)
key = self._normalize_key(key)
if timeout == 0:
return self._client.set(name=key, value=value)
return self._client.setex(name=key, value=value, time=timeout)
def set_not_overwrite(self, key, value, timeout=None, noreply=False):
"""Works like :meth:`set` but does not overwrite the existing value
:param key: the key to set
:param value: the value of the key
:param timeout: the cache timeout for the key.
If not specificed, use the default timeout.
If specified 0, key will never expire
:param noreply: instructs the server to not reply
:returns: Whether the key existed and has been set
:rtype: boolean
"""
if timeout is None:
timeout = self.default_timeout
if timeout == 0:
timeout = None
key = self._normalize_key(key)
value = self.dumps(value)
# This requires the version of redis server >= 2.6.12, please refer
# https://github.com/andymccurdy/redis-py/issues/387 for more details.
return self._client.set(key, value, nx=True, ex=timeout)
def set_many(self, timeout=None, noreply=False, **kw):
"""Sets multiple key-value pair
:param timeout: the cache timeout for the key.
If not specificed, use the default timeout.
If specified 0, key will never expire
:param noreply: instructs the server to not reply
:returns: Whether all key-value pairs have been set
:rtype: boolean
"""
if timeout is None:
timeout = self.timeout
pipe = self._client.pipeline()
for key, value in kw.items():
value = self.dumps(value)
key = self._normalize_key(key)
if timeout == 0:
pipe.set(name=key, value=value)
else:
pipe.setex(name=key, value=value, time=timeout)
return pipe.execute()
def delete(self, key, noreply=False):
"""Delete `key` from cache.
:param key: the `key` to delete.
:param noreply: instruct the server to not reply.
:returns: whether the key been deleted.
:rtype: boolean
TODO: __del__
"""
self._client.delete(self._normalize_key(key))
return True
def delete_many(self, noreply=False, *keys):
"""Delete multiple keys at once.
:param keys: The function accept multiple keys as positional arguments
:param noreply: instructs the server not reply
:returns: Whether all given keys have been deleted
:rtype: boolen
"""
if not keys:
return True
keys = [self._normalize_key(key) for key in keys]
self._client.delete(*keys)
return True
def clear(self):
"""Clears the cache. Not all caches support completely clearing the cache
:returns: Whether the cache been cleared.
:rtype: boolean
"""
from redis.exceptions import ResponseError
status = False
client = self._client
if self.prefix:
pattern = self.prefix + '*'
try:
cursor = '0'
while cursor != 0:
cursor, keys = client.scan(cursor=cursor, match=pattern, count=self._default_scan_count)
if keys:
status = client.delete(*keys)
except ResponseError:
keys = client.keys(pattern)
if keys:
status = client.delete(*keys)
else:
status = client.flushdb()
return status
def incr(self, key, delta=1, noreply=False):
"""Increments the value of a key by `delta`. If the key does not yet exists it is initialized with `delta`
For supporting caches this is an atomic operation
:param key: the key to increment
:param delta: the delta to add
:param noreply: instructs the server not reply
:returns: The new value or ``None`` for backend errors.
"""
return self._client.incr(name=self._normalize_key(key), amount=delta)
def decr(self, key, delta=1, noreply=False):
"""Decrements the value of a key by `delta`. If the key does not yet exists it is initialized with `-delta`.
For supporting caches this is an atomic operation.
:param key: the key to increment
:param delta: the delta to subtruct
:param noreply: instructs the server not reply
:returns: The new value or `None` for backend errors.
"""
return self._client.decr(name=self._normalize_key(key), amount=delta)
def block_left_pop(self, key, timeout=0):
"""Blocking pop a value from the head of the list.
TODO: why loads res[1], is it res[1:] ?
:param key: the key of list
:param timeout: blocking timeout, 0 means block indefinitely
:returns: The popped value or None if timeout.
"""
res = self._client.blpop([self._normalize_key(key)], timeout)
if res is not None:
res = self.loads(res[1])
return res
def block_right_pop(self, key, timeout=0):
"""Blocking pop a value from the tail of the list.
TODO: ?
:param key: the key of list
:param timeout: blocking timeout, 0 means block indefinitely
:returns: The popped value or None if timeout.
"""
res = self._client.brpop([self._normalize_key(key)], timeout)
if res is not None:
res = self.loads(res[1])
return res
def lindex(self, key, index):
"""Return the item from list at position `index`
:param key: the key of list
:param index: the position, can be negative
:returns: The value at position `index` or None of index is out of range
"""
return self.loads(self._client.lindex(self._normalize_key(key), index))
def llen(self, key):
"""Return the number of elements in list
:param key: the key of list
:returns: number of elements in list
:rtype: int
"""
return self._client.llen(self._normalize_key(key))
def lpop(self, key):
"""Pop a value from the head of list
:param key: the key of list
:returns: The popped value or None if list is empty
"""
return self.loads(self._client.lpop(self._normalize_key(key)))
def lpush(self, key, value):
"""Push a value to the head of the list
:param key: the key of list
:param value: the value to be pushed
:returns: Whether the value has been added to list
:rtype: boolean
"""
return self._client.lpush(self._normalize_key(key), self.dumps(value))
def lrange(self, key, start=0, end=-1):
"""Return a slice of the list
:param key: the key of list
:param start: the start position, can be negative
:param end: the end position, can be negative
| |
from __future__ import absolute_import
from builtins import zip
from builtins import map
from builtins import str
from builtins import range
from builtins import object
from nose.tools import (assert_equal, assert_not_equal, raises, assert_true,
assert_false)
from nose.plugins.skip import SkipTest
from .test_helpers import (CallIdentity, prepend_exception_message,
make_1d_traj, raises_with_message_like,
CalvinistDynamics)
import openpathsampling as paths
import openpathsampling.engines.openmm as peng
from openpathsampling.ensemble import *
import logging
logging.getLogger('openpathsampling.ensemble').setLevel(logging.DEBUG)
logging.getLogger('openpathsampling.initialization').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.storage').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.netcdfplus').setLevel(logging.CRITICAL)
logger = logging.getLogger('openpathsampling.tests.testensemble')
import re
import random
def wrap_traj(traj, start, length):
"""Wraps the traj such that the original traj starts at frame `start`
and is of length `length` by padding beginning with traj[0] and end with
traj[-1]. Used to test the slice restricted trajectories."""
if (start < 0) or (length < len(traj)+start):
raise ValueError("""wrap_traj: start < 0 or length < len(traj)+start
{0} < 0 or {1} < {2}+{0}""".format(
start, length, len(traj)))
outtraj = traj[:] # shallow copy
# prepend
for i in range(start):
outtraj.insert(0, traj[0])
# append
for i in range(length - (len(traj)+start)):
outtraj.append(traj[-1])
return outtraj
def test_wrap_traj():
"""Testing wrap_traj (oh gods, the meta! a test for a test function!)"""
intraj = [1, 2, 3]
assert_equal(wrap_traj(intraj, 3, 6), [1, 1, 1, 1, 2, 3])
assert_equal(wrap_traj(intraj, 3, 8), [1, 1, 1, 1, 2, 3, 3, 3])
assert_equal(wrap_traj(intraj, 3, 8)[slice(3, 6)], intraj)
def build_trajdict(trajtypes, lower, upper):
upperadddict = {'a' : 'in', 'b' : 'out', 'c' : 'cross', 'o' : 'hit'}
loweradddict = {'a' : 'out', 'b' : 'in', 'c' : 'in', 'o' : 'hit'}
lowersubdict = {'a' : 'in', 'b' : 'out', 'c' : 'cross', 'o' : 'hit'}
uppersubdict = {'a' : 'out', 'b' : 'in', 'c' : 'in', 'o' : 'hit'}
adjustdict = {'a' : (lambda x: -0.05*x), 'b' : (lambda x: 0.05*x),
'c' : (lambda x: 0.05*x + 0.16), 'o' : (lambda x: 0.0)}
mydict = {}
for mystr in trajtypes:
upperaddkey = "upper"
uppersubkey = "upper"
loweraddkey = "lower"
lowersubkey = "lower"
delta = []
for char in mystr:
upperaddkey += "_"+upperadddict[char]
loweraddkey += "_"+loweradddict[char]
uppersubkey += "_"+uppersubdict[char]
lowersubkey += "_"+lowersubdict[char]
delta.append(adjustdict[char](random.randint(1, 3)))
mydict[upperaddkey] = list(map(upper.__add__, delta))
mydict[loweraddkey] = list(map(lower.__add__, delta))
mydict[uppersubkey] = list(map(upper.__sub__, delta))
mydict[lowersubkey] = list(map(lower.__sub__, delta))
return mydict
def tstr(ttraj):
return list(ttraj).__str__()
def results_upper_lower(adict):
res_dict = {}
for test in list(adict.keys()):
res_dict['upper_'+test] = adict[test]
res_dict['lower_'+test] = adict[test]
return res_dict
def setup_module():
''' Setup for tests of classes in ensemble.py. '''
#random.seed
global lower, upper, op, vol1, vol2, vol3, ttraj, length0
length0 = LengthEnsemble(0)
lower = 0.1
upper = 0.5
op = paths.FunctionCV("Id", lambda snap : snap.coordinates[0][0])
vol1 = paths.CVDefinedVolume(op, lower, upper).named('stateA')
vol2 = paths.CVDefinedVolume(op, -0.1, 0.7).named('interface0')
vol3 = paths.CVDefinedVolume(op, 2.0, 2.5).named('stateB')
# we use the following codes to describe trajectories:
# in : in the state
# out : out of the state
# hit : on the state border
#
# deltas of each letter from state edge:
# a < 0 ; 0 < b < 0.2 ; c > 0.2; o = 0
trajtypes = ["a", "o", "aa", "ab", "aob", "bob", "aba", "aaa", "abcba",
"abaa", "abba", "abaab", "ababa", "abbab", "ac", "bc",
"abaaba", "aobab", "abab", "abcbababcba", "aca", "abc",
"acaca", "acac", "caca", "aaca", "baca", "aaba", "aab",
"aabbaa", "abbb", "aaab"
]
ttraj = build_trajdict(trajtypes, lower, upper)
# make the tests from lists into trajectories
for test in list(ttraj.keys()):
ttraj[test] = make_1d_traj(coordinates=ttraj[test],
velocities=[1.0]*len(ttraj[test]))
def in_out_parser(testname):
allowed_parts = ['in', 'out']
parts = re.split("_", testname)
res = []
for part in parts:
to_append = None
if part in allowed_parts:
to_append = part
elif part == 'hit':
if 'upper' in parts:
to_append = 'out'
elif 'lower' in parts:
to_append = 'in'
elif part == 'cross':
to_append = 'out'
if to_append != None:
if res == []:
res.append(to_append)
elif to_append != res[-1]:
res.append(to_append)
return res
class EnsembleTest(object):
def _single_test(self, ensemble_fcn, traj, res, failmsg):
try:
assert_equal(ensemble_fcn(traj), res)
except AssertionError as e:
prepend_exception_message(e, failmsg)
raise
def _test_everything(self, test_fcn, non_default=[], default=False):
"""
Runs tests using *all* the trajectory test suite. This is the
ultimate in test-running simplicity!!
"""
results = {}
for test in list(ttraj.keys()):
results[test] = default
nondef_dict = {}
for test in non_default:
if test in list(ttraj.keys()):
results[test] = not default
if "lower_"+test in list(ttraj.keys()):
results["lower_"+test] = not default
if "upper_"+test in list(ttraj.keys()):
results["upper_"+test] = not default
for test in list(results.keys()):
logging.getLogger('openpathsampling.ensemble').debug(
"Starting test for " + test + "("+str(ttraj[test])+")"
)
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(test_fcn, ttraj[test], results[test], failmsg)
def _run(self, results):
"""Actually run tests on the trajectory and the wrapped trajectory.
Nearly all of the tests are just this simple. By creating custom error
messages (using prepend_exception_message) we can wrap the many tests
into loops instead of making tons of lines of code.
"""
for test in list(results.keys()):
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.ensemble, ttraj[test], results[test], failmsg)
wrapped = wrap_traj(ttraj[test], self.wrapstart, self.wrapend)
lentt = len(ttraj[test])
failmsg = "Failure in wrapped "+test+"("+str(ttraj[test])+"): "
self._single_test(self.ensemble, wrapped, results[test], failmsg)
failmsg = "Failure in slice_ens "+test+"("+str(ttraj[test])+"): "
self._single_test(self.slice_ens, wrapped, results[test], failmsg)
class TestPartOutXEnsemble(EnsembleTest):
def setup(self):
self.leaveX = PartOutXEnsemble(vol1)
def test_leaveX(self):
"""PartOutXEnsemble passes the trajectory test suite"""
for test in list(ttraj.keys()):
if "out" in in_out_parser(test):
res = True
else:
res = False
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.leaveX, ttraj[test], res, failmsg)
def test_invert(self):
inverted = ~self.leaveX
for test in list(ttraj.keys()):
if "out" in in_out_parser(test):
res = False
else:
res = True
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(inverted, ttraj[test], res, failmsg)
def test_can_append(self):
self._test_everything(self.leaveX.can_append, default=True)
def test_can_prepend(self):
self._test_everything(self.leaveX.can_prepend, default=True)
def test_strict_can_append(self):
self._test_everything(self.leaveX.strict_can_append, default=True)
def test_strict_can_prepend(self):
self._test_everything(self.leaveX.strict_can_prepend, default=True)
def test_leaveX_0(self):
"""PartOutXEnsemble treatment of zero-length trajectory"""
assert_equal(self.leaveX(paths.Trajectory([])), False)
assert_equal(self.leaveX.can_append(paths.Trajectory([])), True)
assert_equal(self.leaveX.can_prepend(paths.Trajectory([])), True)
def test_leaveX_str(self):
volstr = "{x|Id(x) in [0.1, 0.5]}"
assert_equal(self.leaveX.__str__(),
"exists t such that x[t] in (not "+volstr+")")
class TestAllInXEnsemble(EnsembleTest):
def setup(self):
self.inX = AllInXEnsemble(vol1)
def test_inX(self):
"""AllInXEnsemble passes the trajectory test suite"""
for test in list(ttraj.keys()):
if "out" in in_out_parser(test):
res = False
else:
res = True
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.inX, ttraj[test], res, failmsg)
def test_can_append(self):
for test in list(ttraj.keys()):
if "out" in in_out_parser(test):
res = False
else:
res = True
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.inX.can_append, ttraj[test], res, failmsg)
def test_can_prepend(self):
for test in list(ttraj.keys()):
if "out" in in_out_parser(test):
res = False
else:
res = True
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.inX.can_prepend, ttraj[test], res,
failmsg)
def test_strict_can_append(self):
for test in list(ttraj.keys()):
if "out" in in_out_parser(test):
res = False
else:
res = True
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.inX.strict_can_append, ttraj[test], res,
failmsg)
def test_strict_can_prepend(self):
for test in list(ttraj.keys()):
if "out" in in_out_parser(test):
res = False
else:
res = True
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.inX.strict_can_prepend, ttraj[test], res,
failmsg)
def test_inX_0(self):
"""AllInXEnsemble treatment of zero-length trajectory"""
assert_equal(self.inX(paths.Trajectory([])), False)
assert_equal(self.inX.can_append(paths.Trajectory([])), True)
assert_equal(self.inX.can_prepend(paths.Trajectory([])), True)
def test_inX_str(self):
volstr = "{x|Id(x) in [0.1, 0.5]}"
assert_equal(self.inX.__str__(),
"x[t] in "+volstr+" for all t")
class TestAllOutXEnsemble(EnsembleTest):
def setup(self):
self.outX = AllOutXEnsemble(vol1)
def test_outX(self):
"""AllOutXEnsemble passes the trajectory test suite"""
for test in list(ttraj.keys()):
if "in" in in_out_parser(test):
res = False
else:
res = True
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.outX, ttraj[test], res, failmsg)
def test_can_append(self):
for test in list(ttraj.keys()):
if "in" in in_out_parser(test):
res = False
else:
res = True
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.outX.can_append, ttraj[test], res, failmsg)
def test_can_prepend(self):
for test in list(ttraj.keys()):
if "in" in in_out_parser(test):
res = False
else:
res = True
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.outX.can_prepend, ttraj[test], res, failmsg)
def test_strict_can_append(self):
for test in list(ttraj.keys()):
if "in" in in_out_parser(test):
res = False
else:
res = True
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.outX.strict_can_append, ttraj[test], res,
failmsg)
def test_strict_can_prepend(self):
for test in list(ttraj.keys()):
if "in" in in_out_parser(test):
res = False
else:
res = True
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.outX.strict_can_prepend, ttraj[test],
res, failmsg)
def test_outX_0(self):
"""AllOutXEnsemble treatment of zero-length trajectory"""
assert_equal(self.outX(paths.Trajectory([])), False)
assert_equal(self.outX.can_append(paths.Trajectory([])), True)
assert_equal(self.outX.can_prepend(paths.Trajectory([])), True)
def test_outX_str(self):
volstr = "{x|Id(x) in [0.1, 0.5]}"
assert_equal(self.outX.__str__(),
"x[t] in (not "+volstr+") for all t")
class TestPartInXEnsemble(EnsembleTest):
def setup(self):
self.hitX = PartInXEnsemble(vol1)
def test_hitX(self):
"""PartInXEnsemble passes the trajectory test suite"""
for test in list(ttraj.keys()):
if "in" in in_out_parser(test):
res = True
else:
res = False
failmsg = "Failure in "+test+"("+str(ttraj[test])+"): "
self._single_test(self.hitX, ttraj[test], res, failmsg)
def test_can_append(self):
self._test_everything(self.hitX.can_append, default=True)
def test_can_prepend(self):
self._test_everything(self.hitX.can_prepend, default=True)
def test_strict_can_append(self):
| |
Class for pickups including fruit, extra health and extra life
class Fruit(GravityActor):
APPLE = 0
RASPBERRY = 1
LEMON = 2
EXTRA_HEALTH = 3
EXTRA_LIFE = 4
def __init__(self, pos, trapped_enemy_type=0):
super().__init__(pos)
# Choose which type of fruit we're going to be.
if trapped_enemy_type == Robot.TYPE_NORMAL:
self.type = choice([Fruit.APPLE, Fruit.RASPBERRY, Fruit.LEMON])
else:
# If trapped_enemy_type is 1, it means this fruit came from bursting an orb containing the more dangerous type
# of enemy. In this case there is a chance of getting an extra help or extra life power up
# We create a list containing the possible types of fruit, in proportions based on the probability we want
# each type of fruit to be chosen
types = 10 * [Fruit.APPLE, Fruit.RASPBERRY, Fruit.LEMON] # Each of these appear in the list 10 times
types += 9 * [Fruit.EXTRA_HEALTH] # This appears 9 times
types += [Fruit.EXTRA_LIFE] # This only appears once
self.type = choice(types) # Randomly choose one from the list
self.time_to_live = 500 # Counts down to zero
def update(self):
super().update()
# Does the player exist, and are they colliding with us?
if game.player and game.player.collidepoint(self.center):
if self.type == Fruit.EXTRA_HEALTH:
game.player.health = min(3, game.player.health + 1)
game.play_sound("bonus")
elif self.type == Fruit.EXTRA_LIFE:
game.player.lives += 1
game.play_sound("bonus")
else:
game.player.score += (self.type + 1) * 100
game.play_sound("score")
self.time_to_live = 0 # Disappear
else:
self.time_to_live -= 1
if self.time_to_live <= 0:
# Create 'pop' animation
game.pops.append(Pop((self.x, self.y - 27), 0))
anim_frame = str([0, 1, 2, 1][(game.timer // 6) % 4])
self.image = "fruit" + str(self.type) + anim_frame
class Player(GravityActor):
def __init__(self):
# Call constructor of parent class. Initial pos is 0,0 but reset is always called straight afterwards which
# will set the actual starting position.
super().__init__((0, 0))
self.lives = 2
self.score = 0
def reset(self):
self.pos = (WIDTH / 2, 100)
self.vel_y = 0
self.direction_x = 1 # -1 = left, 1 = right
self.fire_timer = 0
self.hurt_timer = 100 # Invulnerable for this many frames
self.health = 3
self.blowing_orb = None
def hit_test(self, other):
# Check for collision between player and bolt - called from Bolt.update. Also check hurt_timer - after being hurt,
# there is a period during which the player cannot be hurt again
if self.collidepoint(other.pos) and self.hurt_timer < 0:
# Player loses 1 health, is knocked in the direction the bolt had been moving, and can't be hurt again
# for a while
self.hurt_timer = 200
self.health -= 1
self.vel_y = -12
self.landed = False
self.direction_x = other.direction_x
if self.health > 0:
game.play_sound("ouch", 4)
else:
game.play_sound("die")
return True
else:
return False
def update(self):
# Call GravityActor.update - parameter is whether we want to perform collision detection as we fall. If health
# is zero, we want the player to just fall out of the level
super().update(self.health > 0)
self.fire_timer -= 1
self.hurt_timer -= 1
if self.landed:
# Hurt timer starts at 200, but drops to 100 once the player has landed
self.hurt_timer = min(self.hurt_timer, 100)
if self.hurt_timer > 100:
# We've just been hurt. Either carry out the sideways motion from being knocked by a bolt, or if health is
# zero, we're dropping out of the level, so check for our sprite reaching a certain Y coordinate before
# reducing our lives count and responding the player. We check for the Y coordinate being the screen height
# plus 50%, rather than simply the screen height, because the former effectively gives us a short delay
# before the player respawns.
if self.health > 0:
self.move(self.direction_x, 0, 4)
else:
if self.top >= HEIGHT*1.5:
self.lives -= 1
self.reset()
else:
# We're not hurt
# Get keyboard input. dx represents the direction the player is facing
dx = 0
if keyboard.left:
dx = -1
elif keyboard.right:
dx = 1
if dx != 0:
self.direction_x = dx
# If we haven't just fired an orb, carry out horizontal movement
if self.fire_timer < 10:
self.move(dx, 0, 4)
# Do we need to create a new orb? Space must have been pressed and released, the minimum time between
# orbs must have passed, and there is a limit of 5 orbs.
if space_pressed() and self.fire_timer <= 0 and len(game.orbs) < 5:
# x position will be 38 pixels in front of the player position, while ensuring it is within the
# bounds of the level
x = min(730, max(70, self.x + self.direction_x * 38))
y = self.y - 35
self.blowing_orb = Orb((x,y), self.direction_x)
game.orbs.append(self.blowing_orb)
game.play_sound("blow", 4)
self.fire_timer = 20
if keyboard.up and self.vel_y == 0 and self.landed:
# Jump
self.vel_y = -16
self.landed = False
game.play_sound("jump")
# Holding down space causes the current orb (if there is one) to be blown further
if keyboard.space:
if self.blowing_orb:
# Increase blown distance up to a maximum of 120
self.blowing_orb.blown_frames += 4
if self.blowing_orb.blown_frames >= 120:
# Can't be blown any further
self.blowing_orb = None
else:
# If we let go of space, we relinquish control over the current orb - it can't be blown any further
self.blowing_orb = None
# Set sprite image. If we're currently hurt, the sprite will flash on and off on alternate frames.
self.image = "blank"
if self.hurt_timer <= 0 or self.hurt_timer % 2 == 1:
dir_index = "1" if self.direction_x > 0 else "0"
if self.hurt_timer > 100:
if self.health > 0:
self.image = "recoil" + dir_index
else:
self.image = "fall" + str((game.timer // 4) % 2)
elif self.fire_timer > 0:
self.image = "blow" + dir_index
elif dx == 0:
self.image = "still"
else:
self.image = "run" + dir_index + str((game.timer // 8) % 4)
class Robot(GravityActor):
TYPE_NORMAL = 0
TYPE_AGGRESSIVE = 1
def __init__(self, pos, type):
super().__init__(pos)
self.type = type
self.speed = randint(1, 3)
self.direction_x = 1
self.alive = True
self.change_dir_timer = 0
self.fire_timer = 100
def update(self):
super().update()
self.change_dir_timer -= 1
self.fire_timer += 1
# Move in current direction - turn around if we hit a wall
if self.move(self.direction_x, 0, self.speed):
self.change_dir_timer = 0
if self.change_dir_timer <= 0:
# Randomly choose a direction to move in
# If there's a player, there's a two thirds chance that we'll move towards them
directions = [-1, 1]
if game.player:
directions.append(sign(game.player.x - self.x))
self.direction_x = choice(directions)
self.change_dir_timer = randint(100, 250)
# The more powerful type of robot can deliberately shoot at orbs - turning to face them if necessary
if self.type == Robot.TYPE_AGGRESSIVE and self.fire_timer >= 24:
# Go through all orbs to see if any can be shot at
for orb in game.orbs:
# The orb must be at our height, and within 200 pixels on the x axis
if orb.y >= self.top and orb.y < self.bottom and abs(orb.x - self.x) < 200:
self.direction_x = sign(orb.x - self.x)
self.fire_timer = 0
break
# Check to see if we can fire at player
if self.fire_timer >= 12:
# Random chance of firing each frame. Likelihood increases 10 times if player is at the same height as us
fire_probability = game.fire_probability()
if game.player and self.top < game.player.bottom and self.bottom > game.player.top:
fire_probability *= 10
if random() < fire_probability:
self.fire_timer = 0
game.play_sound("laser", 4)
elif self.fire_timer == 8:
# Once the fire timer has been set to 0, it will count up - frame 8 of the animation is when the actual bolt is fired
game.bolts.append(Bolt((self.x + self.direction_x * 20, self.y - 38), self.direction_x))
# Am I colliding with an orb? If so, become trapped by it
for orb in game.orbs:
if orb.trapped_enemy_type == None and self.collidepoint(orb.center):
self.alive = False
orb.floating = True
orb.trapped_enemy_type = self.type
game.play_sound("trap", 4)
break
# Choose and set sprite image
direction_idx = "1" if self.direction_x > 0 else "0"
image = "robot" + str(self.type) + direction_idx
if self.fire_timer < 12:
image += str(5 + (self.fire_timer // 4))
else:
image += str(1 + ((game.timer // 4) % 4))
self.image = | |
# labels = np.delete(labels, idx[:700]) # labels = labels[:2000]
# graphs = np.delete(graphs, idx[:700], axis=0) # graphs= graphs[:2000]
return graphs, labels
def load_dataset3s_large(ds_name, upper):
graph_dict = dict(zip([7, 8, 9], [1, 1, 1, 1, 1, 1, 1]))
num_rep = [20, 20, 20, 50, 50, 200, 200]
graphs = []
labels = []
for num, (k, v) in zip(num_rep, graph_dict.items()):
G, label = construct_graph(k, v, sub_size=1)
if upper == False:
A = construct_A3(G)
else:
A = construct_upperA3(G)
graphs.append(A)
labels.append(label)
for graph in range(num):
node_mapping = dict(zip(G.nodes(), sorted(G.nodes(), key=lambda k: random.random())))
G_new = nx.relabel_nodes(G, node_mapping)
if upper == False:
A_new = construct_A3(G_new)
else:
A_new = construct_upperA3(G_new)
graphs.append(A_new)
labels.append(label)
graphs = np.array(graphs)
labels = np.array(labels)
max_dim = max([graph.shape[0] for graph in graphs]) + 1
for i in range(graphs.shape[0]):
padded = np.zeros((max_dim, max_dim, max_dim))
padded[:graphs[i].shape[0], :graphs[i].shape[1], :graphs[i].shape[2]] = graphs[i]
graphs[i] = padded
le = preprocessing.LabelEncoder() # to find clique
le.fit(labels) # to find clique
labels = le.transform(labels) # to find clique
return graphs, labels
def load_dataset_3s_large_val(ds_name, upper):
graph_dict = dict(zip([7, 8, 9], [1, 1, 1, 1, 1, 1, 1]))
num_rep = [15, 15, 15, 50, 50, 200, 200]
graphs = []
labels = []
for num, (k, v) in zip(num_rep, graph_dict.items()):
G, label = construct_graph(k, v, sub_size=1)
for graph in range(num):
node_mapping = dict(zip(G.nodes(), sorted(G.nodes(), key=lambda k: random.random())))
G_new = nx.relabel_nodes(G, node_mapping)
f, t = random.sample(range(G_new.number_of_nodes() + 1), 2)
G_new.add_edge(f, t)
f, t = random.sample(range(G_new.number_of_nodes() + 1), 2)
G_new.add_edge(f, t)
if G_new.number_of_edges() >= G.number_of_edges() + 1:
if upper == False:
A_new = construct_A3(G_new)
else:
A_new = construct_upperA3(G_new)
graphs.append(A_new)
labels.append(label)
graphs = np.array(graphs)
labels = np.array(labels)
max_dim = max([graph.shape[0] for graph in graphs])
for i in range(graphs.shape[0]):
padded = np.zeros((max_dim, max_dim, max_dim))
padded[:graphs[i].shape[0], :graphs[i].shape[1], :graphs[i].shape[2]] = graphs[i]
graphs[i] = padded
graphs = list(graphs)
for i in range(len(graphs)):
# graphs[i] = np.transpose(graphs[i], [2,0,1]) ## ori: use all features
graphs[i] = np.expand_dims(graphs[i], axis=0)
le = preprocessing.LabelEncoder() # to find clique
le.fit(labels) # to find clique
labels = le.transform(labels) # to find clique
return graphs, labels
def construct_graph(k, v, sub_size):
G = nx.erdos_renyi_graph(k, v, directed=False)
sub_k, sub_v = np.int(k * sub_size), 0.1
G2 = nx.erdos_renyi_graph(sub_k, sub_v, directed=False)
G3 = nx.disjoint_union(G, G2)
G3.add_edge(G.number_of_nodes() - 1, G.number_of_nodes())
label = nx.clique.graph_clique_number(G3)
return G3, label
def get_cliques_by_length(G, length_clique):
""" Return the list of all cliques in an undirected graph G with length
equal to length_clique. """
cliques = []
for c in nx.enumerate_all_cliques(G):
if len(c) <= length_clique:
if len(c) == length_clique:
cliques.append(c)
else:
return cliques
# return empty list if nothing is found
return cliques
def construct_A3(G, length_clique=3):
tri = get_cliques_by_length(G, 3)
# print(tri)
nn = G.number_of_nodes()
A3 = np.zeros((nn, nn, nn), dtype='float32')
for i in tri:
perm = permutations(i)
for j in list(perm):
A3[j] = 1
return A3
def construct_upperA3(G, length_clique=3):
tri = get_cliques_by_length(G, 3)
# print(tri)
nn = G.number_of_nodes()
A3 = np.zeros((nn, nn, nn), dtype='float32')
for i in tri:
A3[tuple(i)] = 1
return A3
def motif(shape):
target = nx.Graph()
if shape == 'tree':
target.add_edge(1, 2)
target.add_edge(2, 3)
if shape == 'triangle':
target.add_edge(1, 2)
target.add_edge(2, 3)
target.add_edge(1, 3)
if shape == 'tail_triangle':
target.add_edge(1, 2)
target.add_edge(2, 3)
target.add_edge(1, 3)
target.add_edge(1, 4)
if shape == 'star':
target.add_edge(1, 2)
target.add_edge(1, 3)
target.add_edge(1, 4)
if shape == 'chain':
target.add_edge(1, 2)
target.add_edge(2, 3)
target.add_edge(3, 4)
if shape == 'box':
target.add_edge(1, 2)
target.add_edge(2, 3)
target.add_edge(3, 4)
target.add_edge(1, 4)
if shape == 'semi_clique':
target.add_edge(1, 2)
target.add_edge(2, 3)
target.add_edge(3, 4)
target.add_edge(1, 4)
target.add_edge(1, 3)
if shape == '4_clique':
target.add_edge(1, 2)
target.add_edge(2, 3)
target.add_edge(3, 4)
target.add_edge(1, 4)
target.add_edge(1, 3)
target.add_edge(2, 4)
return target
def high_order(g, target):
nn = g.number_of_nodes()
sub_node = []
if target.number_of_nodes() == 3:
A = np.zeros((nn, nn, nn), dtype='float32')
for sub_nodes in combinations(g.nodes(), len(target.nodes())):
subg = g.subgraph(sub_nodes)
if nx.is_connected(subg) and nx.is_isomorphic(subg, target):
A[tuple(subg.nodes())] = 1
sub_node.append(tuple(subg.nodes()))
if target.number_of_nodes() == 4:
A = np.zeros((nn, nn, nn, nn), dtype='float32')
for sub_nodes in combinations(g.nodes(), len(target.nodes())):
subg = g.subgraph(sub_nodes)
if nx.is_connected(subg) and nx.is_isomorphic(subg, target):
A[tuple(subg.nodes())] = 1
sub_node.append(tuple(subg.nodes()))
label = len(sub_node)
return A, label, sub_node
def high_order2(g, target):
nn = g.number_of_nodes()
sub_node = []
if target.number_of_nodes() == 3:
A = np.zeros((nn, nn, nn), dtype='float32')
for sub_nodes in combinations(g.nodes(), len(target.nodes())):
subg = g.subgraph(sub_nodes)
if nx.is_connected(subg) and nx.is_isomorphic(subg, target):
center_node = list(set(list(subg.edges)[0]).intersection(set(list(subg.edges)[1])))
edge_nodes = list(set(tuple(subg.nodes())).difference(set((center_node))))
A[center_node[0], edge_nodes[0], edge_nodes[1]] = 1
A[center_node[0], edge_nodes[1], edge_nodes[0]] = 1
A[edge_nodes[0], center_node[0], edge_nodes[1]] = 1
A[edge_nodes[1], center_node[0], edge_nodes[0]] = 1
sub_node.append(tuple(subg.nodes()))
if target.number_of_nodes() == 4:
A = np.zeros((nn, nn, nn, nn), dtype='float32')
for sub_nodes in combinations(g.nodes(), len(target.nodes())):
subg = g.subgraph(sub_nodes)
if nx.is_connected(subg) and nx.is_isomorphic(subg, target):
A[tuple(subg.nodes())] = 1
sub_node.append(tuple(subg.nodes()))
label = len(sub_node)
return A, label, sub_node
def high_order3(g, target):
nn = g.number_of_nodes()
sub_node = []
if target.number_of_nodes() == 3:
A1, A2 = np.zeros((nn, nn, nn), dtype='float32'), np.zeros((nn, nn, nn), dtype='float32')
for sub_nodes in combinations(g.nodes(), len(target.nodes())):
subg = g.subgraph(sub_nodes)
if nx.is_connected(subg) and nx.is_isomorphic(subg, target):
center_node = list(set(list(subg.edges)[0]).intersection(set(list(subg.edges)[1])))
edge_nodes = list(set(tuple(subg.nodes())).difference(set((center_node))))
A1[center_node[0], edge_nodes[0], edge_nodes[1]] = 1
A1[center_node[0], edge_nodes[1], edge_nodes[0]] = 1
A2[edge_nodes[0], center_node[0], edge_nodes[1]] = 2
A2[edge_nodes[1], center_node[0], edge_nodes[0]] = 2
sub_node.append(tuple(subg.nodes()))
if target.number_of_nodes() == 4:
A = np.zeros((nn, nn, nn, nn), dtype='float32')
for sub_nodes in combinations(g.nodes(), len(target.nodes())):
subg = g.subgraph(sub_nodes)
if nx.is_connected(subg) and nx.is_isomorphic(subg, target):
A[tuple(subg.nodes())] = 1
sub_node.append(tuple(subg.nodes()))
label = len(sub_node)
return A1, A2, label, sub_node
def multihead(ds_name, target_shape):
graphs, graphs3d, labels = [], [], []
if ds_name == 'syn':
target = motif(target_shape)
# graph_dict = dict(zip([5, 6, 9, 12, 15, 16, 25], [0.7, 0.7, 0.6, 0.8, 0.8, 0.8, 0.7]))
# num_rep = [100, 100, 100, 200, 200, 200, 200]
graph_dict = dict(zip([8, 9, 9, 10, 10, 11, 11, 12, 13], [0.3, 0.3, 0.3, 0.3, 0.4, 0.3, 0.4, 0.2, 0.2]))
num_rep = [50, 50, 50, 50, 100, 100, 100, 100, 100, 100]
for num, (k, v) in zip(num_rep, graph_dict.items()):
for s in range(num):
G = nx.erdos_renyi_graph(k, v, seed=s, directed=False)
if nx.is_connected(G):
graph3d, label, _ = high_order(G, target)
# label = nx.clique.graph_clique_number(G)
labels.append(label)
graphs3d.append(graph3d)
adj = nx.linalg.graphmatrix.adjacency_matrix(G).toarray()
graphs.append(adj)
graphs = np.array(graphs)
graphs3d = np.array(graphs3d)
for i in range(graphs.shape[0]):
graphs[i] = np.expand_dims(graphs[i], axis=0)
graphs3d[i] = np.expand_dims(graphs3d[i], axis=0)
# le = preprocessing.LabelEncoder() # to find clique
# le.fit(labels) # to find clique
# labels = le.transform(labels) # to find clique
else:
target = motif(target_shape)
directory = BASE_DIR + "/data/benchmark_graphs/{0}/{0}.txt".format(ds_name)
with open(directory, "r") as data:
num_graphs = int(data.readline().rstrip().split(" ")[0])
for i in range(num_graphs):
graph_meta = data.readline().rstrip().split(" ")
num_vertex = int(graph_meta[0])
curr_graph = np.zeros(shape=(num_vertex, num_vertex, NUM_LABELS[ds_name] + 1), dtype=np.float32)
labels.append(int(graph_meta[1])) # ori
for j in range(num_vertex):
vertex = data.readline().rstrip().split(" ")
if NUM_LABELS[ds_name] != 0:
curr_graph[j, j, int(vertex[0]) + 1] = 1.
for k in range(2, len(vertex)):
curr_graph[j, int(vertex[k]), 0] = 1.
# curr_graph = noramlize_graph(curr_graph)
graphs.append(curr_graph)
graphs = np.array(graphs)
labels = np.array(labels)
# dim = [graph.shape[0] for graph in graphs]
# sort = (sorted([(x, i) for (i, x) in enumerate(dim)], reverse=True)[:100])
# graphs = np.delete(graphs, ([sort[i][1] for i in range(len(sort))]), axis=0)
# labels = np.delete(labels, ([sort[i][1] for i in range(len(sort))]), axis=0)
for i in range(graphs.shape[0]):
graphs[i] = np.transpose(graphs[i], [2, 0, 1]) # use only A
G = nx.from_numpy_array(graphs[i][0])
graph3d, _, _ = high_order(G, target)
graphs3d.append(graph3d)
adj_powers = A_power(graphs[i][0])
graphs[i] = np.concatenate((graphs[i], adj_powers[1:]), axis=0)
graphs3d = np.array(graphs3d)
for i in range(graphs3d.shape[0]):
graphs3d[i] = np.expand_dims(graphs3d[i], axis=0)
return graphs, np.array(labels), graphs3d
def gnn3(ds_name, target_shape):
graphs, graphs3d, labels, adj_powers =[], [], [], []
if ds_name == 'syn':
target = motif(target_shape)
# graph_dict = dict(zip([5, 6, 9, 12, 15, 16, 25], [0.7, 0.7, 0.6, 0.8, 0.8, 0.8, 0.7]))
# num_rep = [100, 100, 100, 200, 200, 200, 200]
graph_dict = dict(zip([8, 9, 9, 10, 10, 11, 11, 12, 13], [0.3, 0.3, 0.3, 0.3, 0.4, 0.3, 0.4, 0.2, 0.2]))
num_rep = [50, 50, 50, 50, 100, 100, 100, 100, 100, 100]
for num, (k, v) in zip(num_rep, graph_dict.items()):
for s in range(num):
G = nx.erdos_renyi_graph(k, v, seed=s, directed=False)
if nx.is_connected(G):
graph3d, label, _ = high_order(G, target)
# label = nx.clique.graph_clique_number(G)
labels.append(label)
graphs3d.append(graph3d)
adj = nx.linalg.graphmatrix.adjacency_matrix(G).toarray()
graphs.append(adj)
graphs = np.array(graphs)
graphs3d = np.array(graphs3d)
for i in range(graphs.shape[0]):
graphs[i] = np.expand_dims(graphs[i], axis=0)
graphs3d[i] = np.expand_dims(graphs3d[i], axis=0)
# le = preprocessing.LabelEncoder() # to find clique
| |
)
)
assert 0 < fNbytes <= fKeylen + fObjlen
assert fCycle > 0
if not is_directory_key:
assert fSeekKey == location, "fSeekKey {0} location {1}".format(
fSeekKey, location
)
fSeekKey = None
classname, position = String.deserialize(
raw_bytes[position - location :], position
)
name, position = String.deserialize(raw_bytes[position - location :], position)
title, position = String.deserialize(raw_bytes[position - location :], position)
assert fKeylen == position - location
return Key(
location,
fObjlen, # uncompressed_bytes
fNbytes - fKeylen, # compressed_bytes
classname,
name,
title,
fCycle, # cycle
fSeekPdir, # parent_location
fSeekKey, # may be location
created_on=uproot._util.code_to_datetime(fDatime),
big=big,
)
_free_format_small = struct.Struct(">HII")
_free_format_big = struct.Struct(">HQQ")
class FreeSegmentsData(CascadeLeaf):
"""
A :doc:`uproot.writing._cascade.CascadeLeaf` for the FreeSegments record
"""
class_version = 1
def __init__(self, location, slices, end):
super(FreeSegmentsData, self).__init__(location, None)
self._slices = slices
self._end = end
def __repr__(self):
return "{0}({1}, {2}, {3})".format(
type(self).__name__,
self._location,
self._slices,
self._end,
)
@property
def slices(self):
return self._slices
@slices.setter
def slices(self, value):
if self._slices != value:
self._file_dirty = True
self._slices = value
@property
def end(self):
return self._end
@end.setter
def end(self, value):
if self._end != value:
self._file_dirty = True
self._end = value
@property
def allocation(self):
if self._allocation is None:
self._allocation = self.num_bytes
return self._allocation
@allocation.setter
def allocation(self, value):
if self._allocation != value:
self._file_dirty = True
self._allocation = value
@property
def num_bytes(self):
total = 0
for _, stop in self._slices:
if stop - 1 >= uproot.const.kStartBigFile:
total += _free_format_big.size
else:
total += _free_format_small.size
if self._end is None:
if total + _free_format_small.size >= uproot.const.kStartBigFile:
total += _free_format_big.size
else:
total += _free_format_small.size
elif self._end >= uproot.const.kStartBigFile:
total += _free_format_big.size
else:
total += _free_format_small.size
return total
def serialize(self):
pairs = []
for start, stop in self._slices:
if stop - 1 < uproot.const.kStartBigFile:
pairs.append(
_free_format_small.pack(self.class_version, start, stop - 1)
)
else:
pairs.append(
_free_format_big.pack(self.class_version + 1000, start, stop - 1)
)
if self._end < uproot.const.kStartBigFile:
pairs.append(
_free_format_small.pack(
self.class_version, self._end, uproot.const.kStartBigFile
)
)
else:
infinity = uproot.const.kStartBigFile
while not self._end < infinity:
infinity *= 2
pairs.append(
_free_format_big.pack(self.class_version + 1000, self._end, infinity)
)
return b"".join(pairs)
@classmethod
def deserialize(cls, raw_bytes, location, num_bytes, num_slices, in_path):
slices = []
position = 0
for _ in range(num_slices + 1):
version, fFirst, fLast = _free_format_small.unpack(
raw_bytes[position : position + _free_format_small.size]
)
if version >= 1000:
version, fFirst, fLast = _free_format_big.unpack(
raw_bytes[position : position + _free_format_big.size]
)
version -= 1000
position += _free_format_big.size
else:
position += _free_format_small.size
if version != cls.class_version:
raise ValueError(
"Uproot can't read TFree version {0} for writing, only version {1}{2}".format(
version,
cls.class_version,
in_path,
)
)
slices.append((fFirst, fLast + 1))
end = slices.pop()[0]
assert position == num_bytes
out = FreeSegmentsData(location, tuple(slices), end)
out._allocation = num_bytes
return out
class FreeSegments(CascadeNode):
"""
A :doc:`uproot.writing._cascade.CascadeNode` for writing a ROOT FreeSegments record.
The FreeSegments describes which parts of the ROOT file are unused by valid objects;
they may have been invalidated by moving/rewriting an object in a new space. Every
request for a new allocation or release of an old one goes through this object: the
disk I/O equivalent of malloc and free.
Since allocation and release usually change the size of the FreeSegments record,
forcing it to be rewritten and possibly requiring another allocation, our ``allocate``
method attempts to keep the FreeSegments record at the end of the file, where it can
grow freely without bumping into an object after it (which might itself invoke
another allocation!). The exceptions to this rule are when we are updating an
existing ROOT file, the FreeSegments record is not at the end of that file, and
updating it does not require more space.
This is different from ROOT's allocation algorithm, but compatible with it because
Uproot and ROOT can both accept a FreeSegments record at any position in a file.
See `ROOT TFree specification <https://github.com/root-project/root/blob/master/io/doc/TFile/freesegments.md>`__.
"""
def __init__(self, key, data, fileheader):
super(FreeSegments, self).__init__(key, data, fileheader)
self._key = key
self._data = data
self._fileheader = fileheader
def __repr__(self):
return "{0}({1}, {2}, {3})".format(
type(self).__name__,
self._key,
self._data,
self._fileheader,
)
@property
def key(self):
return self._key
@property
def data(self):
return self._data
@property
def fileheader(self):
return self._fileheader
@property
def at_end(self):
end_of_record = self._key.location + self._key.num_bytes + self._data.allocation
assert end_of_record <= self._data.end
return end_of_record == self._data.end
def allocate(self, num_bytes, dry_run=False):
slices = self._data.slices
for i, (start, stop) in enumerate(slices):
if stop - start == num_bytes:
# This will reduce the num_bytes of the FreeSegments record,
# but the allocation can stay the same size.
if not dry_run:
self._data.slices = tuple(
slices[j] for j in range(len(slices)) if i != j
)
return start
elif stop - start > num_bytes:
# This will not change the num_bytes of the FreeSegments record.
if not dry_run:
self._data.slices = tuple(
slices[j] if i != j else (start + num_bytes, stop)
for j in range(len(slices))
)
return start
if self.at_end:
# The new object can take FreeSegments's spot; FreeSegments will
# move to stay at the end.
out = self._key.location
if not dry_run:
self._key.location = self._key.location + num_bytes
self._data.end = (
self._key.location + self._key.allocation + self._data.allocation
)
return out
else:
# FreeSegments is not changing size and not at the end; it can
# stay where it is.
out = self._data.end
if not dry_run:
self._data.end = self._data.end + num_bytes
return out
@staticmethod
def _another_slice(slices, original_start, original_stop):
for start, stop in slices:
if start <= original_start < stop or start < original_stop <= stop:
raise RuntimeError(
"segment of data to release overlaps one already marked as free: "
"releasing [{0}, {1}) but [{2}, {3}) is free".format(
original_start, original_stop, start, stop
)
)
for i in range(len(slices) - 1):
if slices[i][1] == original_start and original_stop == slices[i + 1][0]:
# These two slices need to be merged, including the newly released interval.
return (
slices[:i] + ((slices[i][0], slices[i + 1][1]),) + slices[i + 2 :]
)
for i, (start, stop) in enumerate(slices):
if original_start == stop:
# This slice needs to grow to the right.
return tuple(
slices[j] if i != j else (start, original_stop)
for j in range(len(slices))
)
elif original_stop == start:
# This slice needs to grow to the left.
return tuple(
slices[j] if i != j else (original_start, stop)
for j in range(len(slices))
)
# The FreeSegments record will have to grow.
return tuple(sorted(slices + ((original_start, original_stop),)))
@staticmethod
def _slices_bytes(slices):
total = 0
for _, stop in slices:
if stop - 1 >= uproot.const.kStartBigFile:
total += _free_format_big.size
else:
total += _free_format_small.size
return total
def release(self, start, stop):
new_slices = self._another_slice(self._data.slices, start, stop)
if self.at_end:
self._data.slices = new_slices
self._data.allocation = None
self._key.uncompressed_bytes = self._data.allocation
self._key.compressed_bytes = self._key.uncompressed_bytes
self._data.end = (
self._key.location + self._key.allocation + self._key.uncompressed_bytes
)
elif self._slices_bytes(new_slices) <= self._slices_bytes(self._data.slices):
# Wherever the FreeSegments record is, it's not getting bigger.
# It can stay there.
self._data.slices = new_slices
self._data.allocation = None
self._key.uncompressed_bytes = self._data.allocation
self._key.compressed_bytes = self._key.uncompressed_bytes
else:
# The FreeSegments record needs to move, opening up yet another slice.
# Move it to the end (regardless of whether there's now enough room
# to put it elsewhere; we like keeping it at the end).
self._data.slices = self._another_slice(
new_slices,
self._key.location,
self._key.location + self._key.allocation + self._data.allocation,
)
self._data.allocation = None
self._key.uncompressed_bytes = self._data.allocation
self._key.compressed_bytes = self._key.uncompressed_bytes
self._key.location = self._data.end
self._data.location = self._key.location + self._key.allocation
self._data.end = self._data.location + self._key.uncompressed_bytes
def write(self, sink):
self._key.uncompressed_bytes = self._data.allocation
self._key.compressed_bytes = self._key.uncompressed_bytes
self._data.location = self._key.location + self._key.allocation
self._fileheader.free_location = self._key.location
self._fileheader.free_num_bytes = self._key.allocation + self._data.allocation
self._fileheader.free_num_slices = len(self._data.slices)
self._fileheader.end = self._data.end
super(FreeSegments, self).write(sink)
_tlistheader_format = struct.Struct(">IHHIIBI")
class TListHeader(CascadeLeaf):
"""
A :doc:`uproot.writing._cascade.CascadeLeaf` for writing a ROOT TList header.
A TList contains the TStreamerInfo records, so it must be writable in the cascade.
"""
class_version = 5
def __init__(self, location, data_bytes, num_entries):
super(TListHeader, self).__init__(location, _tlistheader_format.size)
self._data_bytes = data_bytes
self._num_entries = num_entries
def __repr__(self):
return "{0}({1}, {2}, {3})".format(
type(self).__name__, self._location, self._data_bytes, self._num_entries
)
@property
def data_bytes(self):
return self._data_bytes
@data_bytes.setter
def data_bytes(self, value):
if self._data_bytes != value:
self._file_dirty = True
self._data_bytes = value
@property
def num_entries(self):
return self._num_entries
@num_entries.setter
def num_entries(self, value):
if self._num_entries != value:
self._file_dirty = True
self._num_entries = value
@property
def | |
#!/usr/bin/python
"""Bitfinex Rest API V2 implementation"""
# pylint: disable=R0904
from __future__ import absolute_import
import json
from json.decoder import JSONDecodeError
import hmac
import hashlib
import requests
from bitfinex import utils
PROTOCOL = "https"
HOST = "api.bitfinex.com"
VERSION = "v2"
# HTTP request timeout in seconds
TIMEOUT = 5.0
class BitfinexException(Exception):
"""
Exception handler
"""
pass
class Client:
"""Client for the bitfinex.com API REST V2.
Link for official bitfinex documentation :
`Bitfinex rest2 docs <https://bitfinex.readme.io/v2/docs>`_
`Bitfinex rest2 reference <https://bitfinex.readme.io/v2/reference>`_
Parameters
----------
key : str
Bitfinex api key
secret : str
Bitfinex api secret
nonce_multiplier : Optional float
Multiply nonce by this number
Examples
--------
::
bfx_client = Client(key,secret)
bfx_client = Client(key,secret,2.0)
"""
def __init__(self, key=None, secret=None, nonce_multiplier=1.0):
"""
Object initialisation takes 2 mandatory arguments key and secret and a optional one
nonce_multiplier
"""
assert isinstance(nonce_multiplier, float), "nonce_multiplier must be decimal"
self.base_url = "%s://%s/" % (PROTOCOL, HOST)
self.key = key
self.secret = secret
self.nonce_multiplier = nonce_multiplier
def _nonce(self):
"""Returns a nonce used in authentication.
Nonce must be an increasing number, if the API key has been used
earlier or other frameworks that have used higher numbers you might
need to increase the nonce_multiplier."""
return str(utils.get_nonce(self.nonce_multiplier))
def _headers(self, path, nonce, body):
"""
create signed headers
"""
signature = "/api/{}{}{}".format(path, nonce, body)
hmc = hmac.new(self.secret.encode('utf8'), signature.encode('utf8'), hashlib.sha384)
signature = hmc.hexdigest()
return {
"bfx-nonce": nonce,
"bfx-apikey": self.key,
"bfx-signature": signature,
"content-type": "application/json"
}
def _post(self, path, payload, verify=False):
"""
Send post request to bitfinex
"""
nonce = self._nonce()
headers = self._headers(path, nonce, payload)
response = requests.post(self.base_url + path, headers=headers, data=payload, verify=verify)
if response.status_code == 200:
return response.json()
else:
try:
content = response.json()
except JSONDecodeError:
content = response.text()
raise BitfinexException(response.status_code, response.reason, content)
def _get(self, path, **params):
"""
Send get request to bitfinex
"""
url = self.base_url + path
response = requests.get(url, timeout=TIMEOUT, params=params)
if response.status_code == 200:
return response.json()
else:
try:
content = response.json()
except JSONDecodeError:
content = response.text()
raise BitfinexException(response.status_code, response.reason, content)
# REST PUBLIC ENDPOINTS
def platform_status(self):
"""
.. _platform_status:
`Bitfinex platform_status reference
<https://bitfinex.readme.io/v2/reference#rest-public-platform-status>`_
Get the current status of the platform. Maintenance periods last for just few minutes and
might be necessary from time to time during upgrades of core components of our
infrastructure. Even if rare it is important to have a way to notify users. For a real-time
notification we suggest to use websockets and listen to events 20060/20061
Returns
-------
int
- 1 = operative
- 0 = maintenance
Example
-------
::
bfx_client.platform_status()
"""
path = "v2/platform/status"
response = self._get(path)
return response
def tickers(self, symbol_list):
"""`Bitfinex tickers reference
<https://bitfinex.readme.io/v2/reference#rest-public-tickers>`_
The ticker is a high level overview of the state of the market. It shows you the current
best bid and ask, as well as the last trade price.It also includes information such as daily
volume and how much the price has moved over the last day.
Parameters
----------
symbol_list : list
The symbols you want information about as a comma separated list,
or ALL for every symbol.
Returns
-------
list
::
[
# on trading pairs (ex. tBTCUSD)
[
SYMBOL,
BID,
BID_SIZE,
ASK,
ASK_SIZE,
DAILY_CHANGE,
DAILY_CHANGE_PERC,
LAST_PRICE,
VOLUME,
HIGH,
LOW
],
# on funding currencies (ex. fUSD)
[
SYMBOL,
FRR,
BID,
BID_SIZE,
BID_PERIOD,
ASK,
ASK_SIZE,
ASK_PERIOD,
DAILY_CHANGE,
DAILY_CHANGE_PERC,
LAST_PRICE,
VOLUME,
HIGH,
LOW
],
...
]
Note
----
================= ===== ================================================================
Field Type Description
================= ===== ================================================================
FRR float Flash Return Rate - average of all fixed rate funding over the
last hour
BID float Price of last highest bid
BID_PERIOD int Bid period covered in days
BID_SIZE float Sum of the 25 highest bid sizes
ASK float Price of last lowest ask
ASK_PERIOD int Ask period covered in days
ASK_SIZE float Sum of the 25 lowest ask sizes
DAILY_CHANGE float Amount that the last price has changed since yesterday
DAILY_CHANGE_PERC float Amount that the price has changed expressed in percentage terms
LAST_PRICE float Price of the last trade
VOLUME float Daily volume
HIGH float Daily high
LOW float Daily low
================= ===== ================================================================
Examples
--------
::
bfx_client.tickers(['tIOTUSD', 'fIOT'])
bfx_client.tickers(['tBTCUSD'])
bfx_client.tickers(['ALL'])
"""
assert isinstance(symbol_list, list), "symbol_list must be of type list"
assert symbol_list, "symbol_list must have at least one symbol"
path = "v2/tickers?symbols={}".format(",".join(symbol_list))
response = self._get(path)
return response
def ticker(self, symbol):
"""`Bitfinex ticker reference
<https://bitfinex.readme.io/v2/reference#rest-public-ticker>`_
The ticker is a high level overview of the state of the market.It shows you the current best
bid and ask, as well as the last trade price.It also includes information such as daily
volume and how much the price has moved over the last day.
Parameters
----------
symbol : str
The symbol you want information about.
You can find the list of valid symbols by calling the `symbols <restv1.html#symbols>`_
method
Returns
-------
list
::
# on trading pairs (ex. tBTCUSD)
[
BID,
BID_SIZE,
ASK,
ASK_SIZE,
DAILY_CHANGE,
DAILY_CHANGE_PERC,
LAST_PRICE,
VOLUME,
HIGH,
LOW
]
# on funding currencies (ex. fUSD)
[
FRR,
BID,
BID_SIZE,
BID_PERIOD,
ASK,
ASK_SIZE,
ASK_PERIOD,
DAILY_CHANGE,
DAILY_CHANGE_PERC,
LAST_PRICE,
VOLUME,
HIGH,
LOW
]
Examples
--------
::
bfx_client.ticker('tIOTUSD')
bfx_client.ticker('fIOT')
bfx_client.ticker('tBTCUSD')
"""
path = "v2/ticker/{}".format(symbol)
response = self._get(path)
return response
def trades(self, symbol):
"""`Bitfinex trades reference
<https://bitfinex.readme.io/v2/reference#rest-public-trades>`_
Trades endpoint includes all the pertinent details of the trade, such as price,
size and time.
Parameters
----------
symbol : str
The symbol you want information about.
You can find the list of valid symbols by calling the `symbols <restv1.html#symbols>`_
method
Returns
-------
list
::
# on trading pairs (ex. tBTCUSD)
[
[
ID,
MTS,
AMOUNT,
PRICE
]
]
# on funding currencies (ex. fUSD)
[
[
ID,
MTS,
AMOUNT,
RATE,
PERIOD
]
]
Examples
--------
::
bfx_client.trades('tIOTUSD')
bfx_client.trades('fIOT')
bfx_client.trades('tBTCUSD')
"""
path = "v2/trades/{}/hist".format(symbol)
response = self._get(path)
return response
def books(self, symbol, precision="P0"):
"""`Bitfinex books reference
<https://bitfinex.readme.io/v2/reference#rest-public-books>`_
The Order Books channel allow you to keep track of the state of the Bitfinex order book.
It is provided on a price aggregated basis, with customizable precision.
Parameters
----------
symbol : str
The `symbol <restv1.html#symbols>`_ you want information about.
precision : Optional str
Level of price aggregation (P0, P1, P2, P3, R0).
R0 means "gets the raw orderbook".
Returns
-------
list
::
# on trading pairs (ex. tBTCUSD)
[
[
PRICE,
COUNT,
AMOUNT
]
]
# on funding currencies (ex. fUSD)
[
[
RATE,
PERIOD,
COUNT,
AMOUNT
]
]
Examples
--------
::
bfx_client.books('tIOTUSD')
bfx_client.books('fIOT')
bfx_client.books('tBTCUSD')
"""
path = "v2/book/{}/{}".format(symbol, precision)
response = self._get(path)
return response
def stats(self, **kwargs):
"""`Bitfinex stats reference
<https://bitfinex.readme.io/v2/reference#rest-public-stats>`_
Various statistics about the requested pair.
Parameters
----------
Key : str
Allowed values: "funding.size", "credits.size", "credits.size.sym",
"pos.size"
Size : str
Available values: '1m'
Symbol : str
The symbol you want information about.
Symbol2 : str
The symbol you want information about.
Side : str
Available values: "long", "short"
Section : str
Available values: "last", "hist"
sort : str
if = 1 it sorts results returned with old > new
Returns
-------
list
::
# response with Section = "last"
[
MTS,
VALUE
]
# response with Section = "hist"
[
[ MTS, VALUE ],
...
]
Examples
--------
::
PARAMS = {
'key': 'funding.size',
'size': '1m',
'symbol': 'fUSD',
'section': 'hist',
'sort': '0'
}
bfx_client.stats(**PARAMS) # statistics
PARAMS = {
'key': 'credits.size',
'size': '1m',
'symbol': 'fUSD',
'section': 'hist',
'sort': '0'
}
bfx_client.stats(**PARAMS) # statistics
PARAMS = {
'key': 'pos.size',
'size': '1m',
'symbol': 'tIOTUSD',
'side': 'short',
'section': 'hist',
'sort': '0'
}
bfx_client.stats(**PARAMS) # statistics
PARAMS = {
'key': 'credits.size.sym',
'size': '1m',
'symbol': 'fUSD',
'symbol2': 'tBTCUSD',
'section': 'hist',
'sort': '0'
}
"""
key_values = ['funding.size', 'credits.size', 'credits.size.sym', 'pos.size']
if kwargs['key'] not in key_values:
key_values = " ".join(key_values)
msg = "Key must have one of the following values : {}".format(key_values)
raise ValueError(msg)
common_stats_url = "v2/stats1/{}:{}:{}".format(
kwargs['key'],
kwargs['size'],
kwargs['symbol']
)
if kwargs['key'] == 'pos.size':
custom_stats_url = ":{}/{}?sort={}".format(
kwargs['side'],
kwargs['section'],
str(kwargs['sort'])
)
if kwargs['key'] in ['funding.size', 'credits.size']:
custom_stats_url = "/{}?sort={}".format(
kwargs['section'],
str(kwargs['sort'])
)
if kwargs['key'] == 'credits.size.sym':
custom_stats_url = ":{}/{}?sort={}".format(
kwargs['symbol2'],
kwargs['section'],
str(kwargs['sort'])
)
path = "".join([common_stats_url, custom_stats_url])
response = self._get(path)
return response
def candles(self, *args, **kwargs):
"""`Bitfinex candles reference
<https://bitfinex.readme.io/v2/reference#rest-public-candles>`_
Provides a way to | |
<reponame>julio-navarro-lara/thesis_scripts<filename>experiment_9/morwilog_v10/phase2_execution.py
#Copyright 2018 <NAME>
#Built at the University of Strasbourg (France). CSTB team @ ICube laboratory
#12/06/2018
import random
import math
from library import *
def roulette_choice(choices):
max = sum(choices.values())
pick = random.uniform(0, max)
current = 0
for key, value in choices.items():
current += value
if current > pick:
return key
def choose_pheromones_based(list_selected_nodes):
choices = {i: list_selected_nodes[i]["ph"] for i in range(0,len(list_selected_nodes))}
chosen_pos = roulette_choice(choices)
return chosen_pos
def iterate_find_node(e_clas,n_event,e_star,previous_events,initial_time,p_t_max,pos_list):
#Result: event
previous_time = e_clas.iloc[n_event]["time"]
for i in range(n_event+1,len(e_clas)):
event = e_clas.iloc[i]
#During time Tmax find the matches
if float(event["time"])-initial_time >= p_t_max:
return [None,None,i]
if is_a_match(event,e_star,previous_events[0]) and (i not in pos_list):
return [event,i,i]
#We need to return the position in the list together with the event
return [None,None,len(e_clas)-1]
def iterate_find_node_counters(e_clas,n_event,e_star,previous_events,initial_time,p_t_max,counter,pos_list):
#Result: [events]
result = []
found = False
max_pos = 0
while counter > 0:
position = 0
for i in range(n_event+1,len(e_clas)):
position = i
event = e_clas.iloc[i]
#During time Tmax find the matches
if float(event["time"])-initial_time >= p_t_max:
if i > max_pos:
max_pos = i
return [None,i]
if is_a_match(event,e_star,previous_events[0]) and (i not in pos_list):
result.append([event,i])
initial_time = event["time"]
n_event = i
found = True
break
if not found:
if position > max_pos:
max_pos = position
return [None,position]
counter -= 1
found = False
return result
def find_nodes(e_clas,n_event,children_list,previous_events,p_t_max,aasg):
children_found = []
event_list = []
pos_list = []
initial_time = float(e_clas.iloc[n_event]["time"])
final_pos = 0 #This variable serves to return the further position in logs that we have explored
for child in children_list:
child_id = child["id"]
node = get_node(aasg,child_id)
e_star = node["e_star"]
if "counter" in node and node["counter"] > 1:
#print "There are counters!! "+str(node["counter"])
counter = node["counter"]
eventsandpos = iterate_find_node_counters(e_clas,n_event,e_star,previous_events,initial_time,p_t_max,counter,pos_list)
#We do not return all the matched events, just the first one
if eventsandpos[0] is not None:
event_list.append(eventsandpos[0][0])
pos_list.append(eventsandpos[0][1])
children_found.append(child)
if eventsandpos[-1][1] > final_pos:
final_pos = eventsandpos[-1][1]
else:
if eventsandpos[1] > final_pos:
final_pos = eventsandpos[1]
else:
[event,pos,max_pos] = iterate_find_node(e_clas,n_event,e_star,previous_events,initial_time,p_t_max,pos_list)
if max_pos > final_pos:
final_pos = max_pos
if event is not None:
event_list.append(event)
pos_list.append(pos)
children_found.append(child)
return {"children_found":children_found,"event_list":event_list,"pos_list":pos_list,"final_pos":final_pos}
def is_eql_match(event,att,r):
if event[att] != r:
return False
return True
def is_neq_match(event,att,r):
if event[att] == r:
return False
return True
def from_ip_to_binary_string(ip):
output = ''.join([bin(int(x)+256)[3:] for x in ip.split('.')])
return output
def calculate_common_bits(ip1,ip2):
result = 0
len1 = len(ip1)
len2 = len(ip2)
if len1 == len2:
for i in range(0,len1):
if ip1[i] == ip2[i]:
result += 1
else:
break
return result
def is_pfx_match(event,att,r,threshold):
bitip1 = from_ip_to_binary_string(event[att])
bitip2 = from_ip_to_binary_string(r)
l = calculate_common_bits(bitip1,bitip2)
sim = float(l)/float(len(bitip1))
if sim > threshold:
return True
else:
return False
def is_txt_match(event,att,r,threshold):
a = set(event[att].split())
b = set(r.split())
c = a.intersection(b)
jac_index = float(len(c)) / (len(a) + len(b) - len(c))
if jac_index > threshold:
return True
else:
return False
def is_set_match(event,att,R):
if event[att] not in R:
return False
return True
def is_sim_eql_previous(event,attP,attC,previous_event):
if event[attC] != previous_event[attP]:
return False
return True
def is_sim_com_previous(event,attP,attC,previous_event):
return not set(event[attC]).isdisjoint(previous_event[attP])
def is_sim_pfx_previous(event,attP,attC,previous_event,threshold):
bitip1 = from_ip_to_binary_string(event[attC])
bitip2 = from_ip_to_binary_string(previous_event[attP])
l = calculate_common_bits(bitip1,bitip2)
sim = float(l)/float(len(bitip1))
if sim > threshold:
return True
else:
return False
def is_sim_txt_previous(event,attP,attC,previous_event,threshold):
a = set(event[attC].split())
b = set(previous_event[attP].split())
c = a.intersection(b)
jac_index = float(len(c)) / (len(a) + len(b) - len(c))
if jac_index > threshold:
return True
else:
return False
def is_sim_neq_previous(event,attP,attC,previous_event):
if event[attC] == previous_event[attP]:
return False
return True
def is_a_match(event,e_star,previous_event):
#This function determines if there is a match between an event and an abstract event
for compare_element in e_star:
function = compare_element["function"]
if function == "EQL":
if not is_eql_match(event,compare_element["att"],compare_element["r"]):
return False
elif function == "NEQ":
if not is_neq_match(event,compare_element["att"],compare_element["r"]):
return False
elif function == "TXT":
if not is_txt_match(event,compare_element["att"],compare_element["r"],compare_element["threshold"]):
return False
elif function == "PFX":
if not is_pfx_match(event,compare_element["att"],compare_element["r"],compare_element["threshold"]):
return False
elif function == "SET":
if not is_set_match(event,compare_element["att"],compare_element["r"]):
return False
elif function == "SIM_EQL":
if not is_sim_eql_previous(event,compare_element["attP"],compare_element["attC"],previous_event):
return False
elif function == "SIM_COM":
if not is_sim_com_previous(event,compare_element["attP"],compare_element["attC"],previous_event):
return False
elif function == "SIM_PFX":
if not is_sim_pfx_previous(event,compare_element["attP"],compare_element["attC"],previous_event,compare_element["threshold"]):
return False
elif function == "SIM_TXT":
if not is_sim_txt_previous(event,compare_element["attP"],compare_element["attC"],previous_event,compare_element["threshold"]):
return False
elif function == "SIM_NEQ":
if not is_sim_neq_previous(event,compare_element["attP"],compare_element["attC"],previous_event):
return False
else:
print "The function "+function+" does not exist in our database"
return False
return True
#It returns a duple, the first is the identifier of the aasg and the second is the root node
def look_for_aasg(event,aasg_set):
for i in range(0,len(aasg_set)):
aasg = aasg_set[i]
root_node = get_root_node(aasg)
if is_a_match(event,root_node["e_star"],[]):
return [i,root_node]
return [None,None]
def get_root_node(aasg):
for node in aasg["nodes"]:
if node["id"] == 0:
return node
return None
def get_node(aasg,node_id):
for node in aasg["nodes"]:
if node["id"] == node_id:
return node
return None
def get_output_arcs(aasg,node_id):
for arc in aasg["arcs"]:
if arc["start"] == node_id:
return arc.copy()
#If no children are found, we return an empty dict
return {}
def get_position_from_id(dict_list,element_id,id_name):
for i in range(0,len(dict_list)):
if dict_list[i][id_name] == element_id:
return i
return -1
def equal_last_generated_morwi(last_generated_morwi,event,aasg_pos,p_t_max):
if aasg_pos == last_generated_morwi["aasg_pos"]:
if event["time"]-last_generated_morwi["time"]<p_t_max:
if (event["type"]==last_generated_morwi["type"]):
if event["ipsrc"]==last_generated_morwi["ipsrc"]:
if (event["psrc"]==last_generated_morwi["psrc"]) or (math.isnan(event["psrc"]) and math.isnan(last_generated_morwi["psrc"])):
return True
return False
def morwi(e_clas,n_event,aasg_set,p_minimum_ph_attack,p_t_max,last_generated_morwi):
event = e_clas.loc[n_event]
[aasg_pos,node] = look_for_aasg(event,aasg_set)
result_morwi = {"sequence":[event],"aasg_pos":aasg_pos,"branch":[],"isresult":False,"pos_seq":[n_event],"matched_nodes":[],"time_matched":[],"pos_matched":[],"pos_choosing":[]}
#We interrupt the execution of the morwi if there has been already a close match
if last_generated_morwi and equal_last_generated_morwi(last_generated_morwi,event,aasg_pos,p_t_max):
return result_morwi
last_pheromone_value = 0
if aasg_pos is not None:
aasg = aasg_set[aasg_pos]
output_arcs = get_output_arcs(aasg,node["id"])
result_morwi["branch"].append(0)
result_morwi["matched_nodes"].append([0])
result_morwi["time_matched"].append([event["time"]])
result_morwi["pos_matched"].append([n_event])
print_log_line(event)
print ""
print ""
print "There is match"
print_log_line(event)
while (output_arcs) and (output_arcs["children"]):
previous_events = list(reversed(result_morwi["sequence"])) #We reverse the list of previous events to make it easier to search
#Children found are nodes as they are defined for the aasg
result_from_search = find_nodes(e_clas,n_event,output_arcs["children"],previous_events,p_t_max,aasg)
pos_list = result_from_search["pos_list"]
children_found = result_from_search["children_found"]
#print children_found
if result_morwi["pos_choosing"] and result_morwi["pos_choosing"][-1] > result_from_search["final_pos"]:
result_morwi["pos_choosing"].append(result_morwi["pos_choosing"][-1])
else:
result_morwi["pos_choosing"].append(result_from_search["final_pos"])
if children_found:
#Random search pheromone-based
event_list = result_from_search["event_list"]
#Other measures for the graphs
found_ids = []
for child in children_found:
found_ids.append(child["id"])
result_morwi["matched_nodes"].append(found_ids)
time_matched = []
for event_a in event_list:
time_matched.append(event_a["time"])
result_morwi["time_matched"].append(time_matched)
result_morwi["pos_matched"].append(pos_list)
######
chosen_pos = choose_pheromones_based(children_found)
last_pheromone_value = children_found[chosen_pos]["ph"]
node_id = children_found[chosen_pos]["id"]
output_arcs = get_output_arcs(aasg,node_id)
n_event = pos_list[chosen_pos]
result_morwi["branch"].append(node_id)
result_morwi["sequence"].append(event_list[chosen_pos])
result_morwi["pos_seq"].append(pos_list[chosen_pos])
print_log_line(event_list[chosen_pos])
print result_morwi["pos_seq"]
else:
return result_morwi
if last_pheromone_value >= p_minimum_ph_attack:
result_morwi["isresult"] = True
#We have to test minimum value of pheromone attacks
return result_morwi
def pheromone_evaporation_all(aasg_set,aasg_pos,p_evap_rate,p_minimum_ph):
#In this method we evaporate all the pheromones
output_aasg_set = list(aasg_set)
list_arcs = aasg_set[aasg_pos]["arcs"]
for arc_pos in range(0,len(list_arcs)):
if "children" in list_arcs[arc_pos]:
list_children = list_arcs[arc_pos]["children"]
for child_pos in range(0,len(list_children)):
resulting_ph = (1.0-p_evap_rate)*list_children[child_pos]["ph"]
if resulting_ph < p_minimum_ph:
output_aasg_set[aasg_pos]["arcs"][arc_pos]["children"][child_pos]["ph"] = p_minimum_ph
else:
output_aasg_set[aasg_pos]["arcs"][arc_pos]["children"][child_pos]["ph"] = resulting_ph
return output_aasg_set
def expert_evaluation(sequence):
#So far if just one of the step belongs to the attack, the whole is considered as an attack
for event in sequence:
if event["tag"] != 0:
return True
return False
def increment_decrement_ph(aasg_set,branch,aasg_pos,verdict,p_delta_ph_0,p_omega,p_initial_ph):
output_aasg_set = list(aasg_set)
list_arcs = output_aasg_set[aasg_pos]["arcs"]
#The elements in the branch are node ids, but we need to find the arcs. We need to select them by pairs
for i in range(0,len(branch)-1):
start = branch[i]
end = branch[i+1]
set_arc_pos = get_position_from_id(list_arcs,start,"start")
children_pos = get_position_from_id(list_arcs[set_arc_pos]["children"],end,"id")
child = list_arcs[set_arc_pos]["children"][children_pos]
ph = child["ph"]
delta_ph = p_delta_ph_0*math.exp(-math.pow(ph-p_initial_ph,2)/(2*math.pow(p_omega,2)))
if verdict:
output_aasg_set[aasg_pos]["arcs"][set_arc_pos]["children"][children_pos]["ph"] += delta_ph
else:
pheromones = output_aasg_set[aasg_pos]["arcs"][set_arc_pos]["children"][children_pos]["ph"] - delta_ph
if pheromones < p_minimum_ph:
output_aasg_set[aasg_pos]["arcs"][set_arc_pos]["children"][children_pos]["ph"] = p_minimum_ph
else:
output_aasg_set[aasg_pos]["arcs"][set_arc_pos]["children"][children_pos]["ph"] = pheromones
return output_aasg_set
def update_json_results(input_json_results,aasg_set,id_event,time_event,alerts_sent,string_sequence_dict):
json_results = input_json_results.copy()
json_results["list_event_ids"].append(id_event)
json_results["list_times"].append(time_event)
json_results["list_alerts_sent"].append(alerts_sent)
for aasg in aasg_set:
#Every arc will have its results
for arc_set in aasg["arcs"]:
start = arc_set["start"]
total_ph = 0
for child in arc_set["children"]:
total_ph += child["ph"]
for child in arc_set["children"]:
end = child["id"]
combined_name = str(start)+'_'+str(end)
ph = child["ph"]
strength = float(ph)/float(total_ph)
json_results["results"][aasg["id"]]["arcs"][combined_name]["ph"].append(ph)
json_results["results"][aasg["id"]]["arcs"][combined_name]["strength"].append(strength)
#So far we add the new sequences if there is any to the AASG with ID 1
if string_sequence_dict["branch"] != "":
json_results["results"][1]["branches_found"].append(string_sequence_dict["branch"])
json_results["results"][1]["sequences_found"].append(string_sequence_dict["sequence"])
json_results["results"][1]["pos_seq_found"].append(string_sequence_dict["pos_seq"])
return json_results
def get_last_generated_morwi(result_morwi):
last_generated_morwi = {}
last_generated_morwi["aasg_pos"] = result_morwi["aasg_pos"]
first_event = result_morwi["sequence"][0]
last_generated_morwi["type"] = first_event["type"]
last_generated_morwi["time"] = first_event["time"]
last_generated_morwi["ipsrc"] = first_event["ipsrc"]
last_generated_morwi["ipdst"] = first_event["ipdst"]
last_generated_morwi["psrc"] = first_event["psrc"]
last_generated_morwi["action"] = first_event["action"]
return last_generated_morwi
def morwihill(e_clas,aasg_set,dict_var):
#total_strength = 1
#We create a dictionary where the id of the last log of found sequence is the key
#The value is the list of corresponding result_morwis with this id as last log
#Doing so, we can retard the update of pheromones to the arrival of the last log in the sequence
dict_changes_to_made = {}
#json_results, to store the results that will be later be processed by another script
#json_results = create_json_results(aasg_set)
#We do not use so far the json results and we | |
<reponame>MitchellTesla/datasets<gh_stars>1000+
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CoNLL2012 shared task data based on OntoNotes 5.0"""
import glob
import os
from collections import defaultdict
from typing import DefaultDict, Iterator, List, Optional, Tuple
import datasets
_CITATION = """\
@inproceedings{pradhan-etal-2013-towards,
title = "Towards Robust Linguistic Analysis using {O}nto{N}otes",
author = {<NAME> and
Moschitti, Alessandro and
<NAME> and
<NAME> and
<NAME> and
<NAME> and
<NAME> and
<NAME>},
booktitle = "Proceedings of the Seventeenth Conference on Computational Natural Language Learning",
month = aug,
year = "2013",
address = "Sofia, Bulgaria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W13-3516",
pages = "143--152",
}
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, \
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, \
<NAME>, <NAME>, <NAME>. \
OntoNotes Release 5.0 LDC2013T19. \
Web Download. Philadelphia: Linguistic Data Consortium, 2013.
"""
_DESCRIPTION = """\
OntoNotes v5.0 is the final version of OntoNotes corpus, and is a large-scale, multi-genre,
multilingual corpus manually annotated with syntactic, semantic and discourse information.
This dataset is the version of OntoNotes v5.0 extended and is used in the CoNLL-2012 shared task.
It includes v4 train/dev and v9 test data for English/Chinese/Arabic and corrected version v12 train/dev/test data (English only).
The source of data is the Mendeley Data repo [ontonotes-conll2012](https://data.mendeley.com/datasets/zmycy7t9h9), which seems to be as the same as the official data, but users should use this dataset on their own responsibility.
See also summaries from paperwithcode, [OntoNotes 5.0](https://paperswithcode.com/dataset/ontonotes-5-0) and [CoNLL-2012](https://paperswithcode.com/dataset/conll-2012-1)
For more detailed info of the dataset like annotation, tag set, etc., you can refer to the documents in the Mendeley repo mentioned above.
"""
_URL = "https://data.mendeley.com/public-files/datasets/zmycy7t9h9/files/b078e1c4-f7a4-4427-be7f-9389967831ef/file_downloaded"
class Conll2012Ontonotesv5Config(datasets.BuilderConfig):
"""BuilderConfig for the CoNLL formatted OntoNotes dataset."""
def __init__(self, language=None, conll_version=None, **kwargs):
"""BuilderConfig for the CoNLL formatted OntoNotes dataset.
Args:
language: string, one of the language {"english", "chinese", "arabic"} .
conll_version: string, "v4" or "v12". Note there is only English v12.
**kwargs: keyword arguments forwarded to super.
"""
assert language in ["english", "chinese", "arabic"]
assert conll_version in ["v4", "v12"]
if conll_version == "v12":
assert language == "english"
super(Conll2012Ontonotesv5Config, self).__init__(
name=f"{language}_{conll_version}",
description=f"{conll_version} of CoNLL formatted OntoNotes dataset for {language}.",
version=datasets.Version("1.0.0"), # hf dataset script version
**kwargs,
)
self.language = language
self.conll_version = conll_version
class Conll2012Ontonotesv5(datasets.GeneratorBasedBuilder):
"""The CoNLL formatted OntoNotes dataset."""
BUILDER_CONFIGS = [
Conll2012Ontonotesv5Config(
language=lang,
conll_version="v4",
)
for lang in ["english", "chinese", "arabic"]
] + [
Conll2012Ontonotesv5Config(
language="english",
conll_version="v12",
)
]
def _info(self):
lang = self.config.language
conll_version = self.config.conll_version
if lang == "arabic":
pos_tag_feature = datasets.Value("string")
else:
tag_set = _POS_TAGS[f"{lang}_{conll_version}"]
pos_tag_feature = datasets.ClassLabel(num_classes=len(tag_set), names=tag_set)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"document_id": datasets.Value("string"),
"sentences": [
{
"part_id": datasets.Value("int32"),
"words": datasets.Sequence(datasets.Value("string")),
"pos_tags": datasets.Sequence(pos_tag_feature),
"parse_tree": datasets.Value("string"),
"predicate_lemmas": datasets.Sequence(datasets.Value("string")),
"predicate_framenet_ids": datasets.Sequence(datasets.Value("string")),
"word_senses": datasets.Sequence(datasets.Value("float32")),
"speaker": datasets.Value("string"),
"named_entities": datasets.Sequence(
datasets.ClassLabel(num_classes=37, names=_NAMED_ENTITY_TAGS)
),
"srl_frames": [
{
"verb": datasets.Value("string"),
"frames": datasets.Sequence(datasets.Value("string")),
}
],
"coref_spans": datasets.Sequence(datasets.Sequence(datasets.Value("int32"), length=3)),
}
],
}
),
homepage="https://conll.cemantix.org/2012/introduction.html",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
lang = self.config.language
conll_version = self.config.conll_version
dl_dir = dl_manager.download_and_extract(_URL)
data_dir = os.path.join(dl_dir, f"conll-2012/{conll_version}/data")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"conll_files_directory": os.path.join(data_dir, f"train/data/{lang}")},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"conll_files_directory": os.path.join(data_dir, f"development/data/{lang}")},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"conll_files_directory": os.path.join(data_dir, f"test/data/{lang}")},
),
]
def _generate_examples(self, conll_files_directory):
conll_files = sorted(glob.glob(os.path.join(conll_files_directory, "**/*gold_conll"), recursive=True))
for idx, conll_file in enumerate(conll_files):
sentences = []
for sent in Ontonotes().sentence_iterator(conll_file):
document_id = sent.document_id
sentences.append(
{
"part_id": sent.sentence_id, # should be part id, according to https://conll.cemantix.org/2012/data.html
"words": sent.words,
"pos_tags": sent.pos_tags,
"parse_tree": sent.parse_tree,
"predicate_lemmas": sent.predicate_lemmas,
"predicate_framenet_ids": sent.predicate_framenet_ids,
"word_senses": sent.word_senses,
"speaker": sent.speakers[0],
"named_entities": sent.named_entities,
"srl_frames": [{"verb": f[0], "frames": f[1]} for f in sent.srl_frames],
"coref_spans": [(c[0], *c[1]) for c in sent.coref_spans],
}
)
yield idx, {"document_id": document_id, "sentences": sentences}
# --------------------------------------------------------------------------------------------------------
# Tag set
_NAMED_ENTITY_TAGS = [
"O", # out of named entity
"B-PERSON",
"I-PERSON",
"B-NORP",
"I-NORP",
"B-FAC", # FACILITY
"I-FAC",
"B-ORG", # ORGANIZATION
"I-ORG",
"B-GPE",
"I-GPE",
"B-LOC",
"I-LOC",
"B-PRODUCT",
"I-PRODUCT",
"B-DATE",
"I-DATE",
"B-TIME",
"I-TIME",
"B-PERCENT",
"I-PERCENT",
"B-MONEY",
"I-MONEY",
"B-QUANTITY",
"I-QUANTITY",
"B-ORDINAL",
"I-ORDINAL",
"B-CARDINAL",
"I-CARDINAL",
"B-EVENT",
"I-EVENT",
"B-WORK_OF_ART",
"I-WORK_OF_ART",
"B-LAW",
"I-LAW",
"B-LANGUAGE",
"I-LANGUAGE",
]
_POS_TAGS = {
"english_v4": [
"XX", # missing
"``",
"$",
"''",
",",
"-LRB-", # (
"-RRB-", # )
".",
":",
"ADD",
"AFX",
"CC",
"CD",
"DT",
"EX",
"FW",
"HYPH",
"IN",
"JJ",
"JJR",
"JJS",
"LS",
"MD",
"NFP",
"NN",
"NNP",
"NNPS",
"NNS",
"PDT",
"POS",
"PRP",
"PRP$",
"RB",
"RBR",
"RBS",
"RP",
"SYM",
"TO",
"UH",
"VB",
"VBD",
"VBG",
"VBN",
"VBP",
"VBZ",
"WDT",
"WP",
"WP$",
"WRB",
], # 49
"english_v12": [
"XX", # misssing
"``",
"$",
"''",
"*",
",",
"-LRB-", # (
"-RRB-", # )
".",
":",
"ADD",
"AFX",
"CC",
"CD",
"DT",
"EX",
"FW",
"HYPH",
"IN",
"JJ",
"JJR",
"JJS",
"LS",
"MD",
"NFP",
"NN",
"NNP",
"NNPS",
"NNS",
"PDT",
"POS",
"PRP",
"PRP$",
"RB",
"RBR",
"RBS",
"RP",
"SYM",
"TO",
"UH",
"VB",
"VBD",
"VBG",
"VBN",
"VBP",
"VBZ",
"VERB",
"WDT",
"WP",
"WP$",
"WRB",
], # 51
"chinese_v4": [
"X", # missing
"AD",
"AS",
"BA",
"CC",
"CD",
"CS",
"DEC",
"DEG",
"DER",
"DEV",
"DT",
"ETC",
"FW",
"IJ",
"INF",
"JJ",
"LB",
"LC",
"M",
"MSP",
"NN",
"NR",
"NT",
"OD",
"ON",
"P",
"PN",
"PU",
"SB",
"SP",
"URL",
"VA",
"VC",
"VE",
"VV",
], # 36
}
# --------------------------------------------------------------------------------------------------------
# The CoNLL(2012) file reader
# Modified the original code to get rid of extra package dependency.
# Original code: https://github.com/allenai/allennlp-models/blob/main/allennlp_models/common/ontonotes.py
class OntonotesSentence:
"""
A class representing the annotations available for a single CONLL formatted sentence.
# Parameters
document_id : `str`
This is a variation on the document filename
sentence_id : `int`
The integer ID of the sentence within a document.
words : `List[str]`
This is the tokens as segmented/tokenized in the bank.
pos_tags : `List[str]`
This is the Penn-Treebank-style part of speech. When parse information is missing,
all parts of speech except the one for which there is some sense or proposition
annotation are marked with a XX tag. The verb is marked with just a VERB tag.
parse_tree : `nltk.Tree`
An nltk Tree representing the parse. It includes POS tags as pre-terminal nodes.
When the parse information is missing, the parse will be `None`.
predicate_lemmas : `List[Optional[str]]`
The predicate lemma of the words for which we have semantic role
information or word sense information. All other indices are `None`.
predicate_framenet_ids : `List[Optional[int]]`
The PropBank frameset ID of the lemmas in `predicate_lemmas`, or `None`.
word_senses : `List[Optional[float]]`
The word senses for the words in the sentence, or `None`. These are floats
because the word sense can have values after the decimal, like `1.1`.
speakers : `List[Optional[str]]`
The speaker information for the words in the sentence, if present, or `None`
This is the speaker or author name where available. Mostly in Broadcast Conversation
and Web Log data. When not available the rows are marked with an "-".
named_entities : `List[str]`
The BIO tags for named entities in the sentence.
srl_frames : `List[Tuple[str, List[str]]]`
A dictionary keyed by the verb in the sentence for the given
Propbank frame labels, in a BIO format.
coref_spans : `Set[TypedSpan]`
The spans for entity mentions involved in coreference resolution within the sentence.
Each element is a tuple composed of (cluster_id, (start_index, end_index)). Indices
are `inclusive`.
"""
def __init__(
self,
document_id: str,
sentence_id: int,
words: List[str],
pos_tags: List[str],
parse_tree: Optional[str],
predicate_lemmas: List[Optional[str]],
predicate_framenet_ids: List[Optional[str]],
word_senses: List[Optional[float]],
speakers: List[Optional[str]],
named_entities: List[str],
srl_frames: List[Tuple[str, List[str]]],
coref_spans,
) -> None:
self.document_id = document_id
self.sentence_id = sentence_id
self.words = words
self.pos_tags = pos_tags
self.parse_tree = parse_tree
self.predicate_lemmas = predicate_lemmas
self.predicate_framenet_ids = predicate_framenet_ids
self.word_senses = word_senses
self.speakers = speakers
self.named_entities = named_entities
self.srl_frames = srl_frames
self.coref_spans = coref_spans
class Ontonotes:
"""
This `DatasetReader` is designed to read in the English OntoNotes v5.0 data
in the format used by the CoNLL 2011/2012 shared tasks. In order to use this
Reader, you must follow the instructions provided [here (v12 release):]
(https://cemantix.org/data/ontonotes.html), which will allow you to download
the CoNLL style annotations for the OntoNotes v5.0 release -- | |
<filename>softlearning/environments/gym/locobot/dual_nav_grasp_envs.py
import gym
from gym import spaces
import numpy as np
import os
from collections import OrderedDict, defaultdict
import tensorflow as tf
import tree
from .utils import dprint, is_in_rect, Timer
from .base_envs import RoomEnv
from .perturbations import get_perturbation, get_perturbation_use_rnd
from .grasping import get_grasp_algorithm, GraspingEval
from softlearning.environments.gym.spaces import DiscreteBox, FrameStack
from softlearning.utils.dict import deep_update
IMAGE_SIZE = 100
class LocobotNavigationGraspingDualPerturbationEnv(RoomEnv):
""" Locobot Navigation with Perturbation """
def __init__(self, **params):
defaults = dict(
is_training=False,
do_teleop=False,
trajectory_log_path="./trajectory/",
trajectory_max_length=1000,
perturbation_interval=0, # 0 is no perturbation at inervals
grasp_perturbation="none",
nav_perturbation="none",
grasp_perturbation_params=dict(),
nav_perturbation_params=dict(),
grasp_algorithm="vacuum",
grasp_algorithm_params=dict(),
do_grasp_eval=False,
alive_penalty=0.0,
do_single_grasp=False,
use_dense_reward=False,
use_shared_data=False,
use_auto_grasp=True,
add_uncertainty_bonus=False,
no_respawn_eval_len=200,
)
defaults["random_robot_yaw"] = False
defaults["action_space"] = spaces.Box(-1.0, 1.0, shape=(2,))
defaults["observation_space"] = spaces.Dict({
"pixels": spaces.Box(low=0, high=255, dtype=np.uint8, shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
})
super().__init__(**deep_update(defaults, params))
print("LocobotNavigationDQNGraspingDualPerturbationEnv params:", self.params)
self.image_size = IMAGE_SIZE
self.use_auto_grasp = self.params["use_auto_grasp"]
self.use_shared_data = self.params["use_shared_data"]
self.add_uncertainty_bonus = self.params["add_uncertainty_bonus"]
self.do_single_grasp = self.params["do_single_grasp"]
self.has_first_reset = False
self.is_training = self.params["is_training"]
self.do_teleop = self.params["do_teleop"]
self.perturbation_interval = self.params["perturbation_interval"]
self.grasp_perturbation_name = self.params["grasp_perturbation"]
self.grasp_perturbation_params = self.params["grasp_perturbation_params"]
self.grasp_perturbation_env = get_perturbation(
self.grasp_perturbation_name,
env=self,
is_training=self.is_training,
infos_prefix="grasp_perturbation-",
use_shared_data=self.use_shared_data,
**self.grasp_perturbation_params
)
self.nav_perturbation_name = self.params["nav_perturbation"]
self.nav_perturbation_params = self.params["nav_perturbation_params"]
self.nav_perturbation_env = get_perturbation(
self.nav_perturbation_name,
env=self,
is_training=self.is_training,
infos_prefix="nav_perturbation-",
use_shared_data=self.use_shared_data,
**self.nav_perturbation_params
)
self.grasp_algorithm_name = self.params["grasp_algorithm"]
self.grasp_algorithm_params = self.params["grasp_algorithm_params"]
self.grasp_algorithm = get_grasp_algorithm(
self.grasp_algorithm_name,
env=self,
is_training=self.is_training,
**self.grasp_algorithm_params
)
self.do_grasp_eval = self.params["do_grasp_eval"]
if self.do_grasp_eval:
self.grasp_eval = GraspingEval(self, self.grasp_algorithm)
self.alive_penalty = self.params["alive_penalty"]
self.use_dense_reward = self.params["use_dense_reward"]
self.no_respawn_eval_len = self.params["no_respawn_eval_len"]
self.trajectory_log_path = self.params["trajectory_log_path"]
self.trajectory_max_length = self.params["trajectory_max_length"]
self.trajectories = []
self.curr_nav_trajectory = []
self.trajectory_total_length = 0
self.trajectory_index = 0
if self.trajectory_log_path:
os.makedirs(self.trajectory_log_path, exist_ok=True)
self.timer = Timer()
# trajectory information
self.total_grasp_actions = 0
self.total_grasped = 0
def finish_init(self,
algorithm=None,
replay_pool=None,
grasp_rnd_trainer=None, grasp_perturbation_policy=None, grasp_perturbation_algorithm=None,
nav_rnd_trainer=None, nav_perturbation_policy=None, nav_perturbation_algorithm=None,
**kwargs
):
self.algorithm = algorithm
self.replay_pool = replay_pool
self.grasp_rnd_trainer = grasp_rnd_trainer
self.grasp_perturbation_policy = grasp_perturbation_policy
self.grasp_perturbation_algorithm = grasp_perturbation_algorithm
self.nav_rnd_trainer = nav_rnd_trainer
self.nav_perturbation_policy = nav_perturbation_policy
self.nav_perturbation_algorithm = nav_perturbation_algorithm
self.grasp_perturbation_env.finish_init(
policy=grasp_perturbation_policy,
algorithm=grasp_perturbation_algorithm,
rnd_trainer=grasp_rnd_trainer,
preprocess_rnd_inputs=lambda x: self.preprocess_grasp_rnd_inputs(x),
nav_algorithm=algorithm,
main_replay_pool=self.replay_pool,
grasp_algorithm=self.grasp_algorithm
)
self.nav_perturbation_env.finish_init(
policy=nav_perturbation_policy,
algorithm=nav_perturbation_algorithm,
rnd_trainer=nav_rnd_trainer,
preprocess_rnd_inputs=lambda x: x,
nav_algorithm=algorithm,
main_replay_pool=self.replay_pool,
grasp_algorithm=self.grasp_algorithm
)
self.grasp_algorithm.finish_init(rnd_trainer=grasp_rnd_trainer)
def reset(self):
dprint("reset")
if self.do_single_grasp and self.has_first_reset:
self.num_steps = 0
return
super().reset(no_return=True)
self.total_grasp_actions = 0
self.total_grasped = 0
self.grasp_algorithm.clear_diagnostics()
self.trajectories = []
self.curr_nav_trajectory = []
self.trajectory_total_length = 0
self.interface.set_wheels_velocity(0, 0)
self.interface.do_steps(120)
self.has_first_reset = True
@property
def should_create_grasp_rnd(self):
return get_perturbation_use_rnd(self.grasp_perturbation_name)
@property
def should_create_nav_rnd(self):
return get_perturbation_use_rnd(self.nav_perturbation_name)
@property
def grasp_rnd_input_shapes(self):
return OrderedDict({'pixels': tf.TensorShape((self.grasp_algorithm.image_size, self.grasp_algorithm.image_size, 3))})
@property
def nav_rnd_input_shapes(self):
return OrderedDict({'pixels': tf.TensorShape((self.image_size, self.image_size, 3))})
def preprocess_grasp_rnd_inputs(self, observations):
observations = OrderedDict({
"pixels": self.grasp_algorithm.crop_obs(observations["pixels"]),
# "current_velocity": observations["current_velocity"],
})
return observations
def sample_training_batch(self, batch_size, **kwargs):
if not self.use_shared_data:
return self.replay_pool.random_batch(batch_size, **kwargs)
elif self.grasp_perturbation_env.has_shared_pool and self.nav_perturbation_env.has_shared_pool:
return self.replay_pool.random_batch_from_multiple(
batch_size,
[self.grasp_perturbation_env.buffer, self.nav_perturbation_env.buffer],
[self.grasp_perturbation_env.process_batch_for_main_pool, self.nav_perturbation_env.process_batch_for_main_pool]
)
elif self.grasp_perturbation_env.has_shared_pool:
return self.replay_pool.random_batch_from_both(
batch_size,
self.grasp_perturbation_env.buffer,
self.grasp_perturbation_env.process_batch_for_main_pool
)
elif self.nav_perturbation_env.has_shared_pool:
return self.replay_pool.random_batch_from_both(
batch_size,
self.nav_perturbation_env.buffer,
self.nav_perturbation_env.process_batch_for_main_pool
)
else:
return self.replay_pool.random_batch(batch_size, **kwargs)
def process_batch(self, batch):
diagnostics = OrderedDict()
if self.nav_rnd_trainer is not None:
dprint(" nav process rnd batch")
observations = batch["observations"]
train_diagnostics = self.nav_rnd_trainer.train(observations)
diagnostics.update(train_diagnostics)
if self.add_uncertainty_bonus:
# this is between 0 - 0.5
uncertainty_bonus = self.grasp_algorithm.get_uncertainty_for_nav(batch["next_observations"]["pixels"])
batch["rewards"] = batch["rewards"] + uncertainty_bonus
diagnostics["uncertainty_bonus-mean"] = np.mean(uncertainty_bonus)
diagnostics["uncertainty_bonus-min"] = np.min(uncertainty_bonus)
diagnostics["uncertainty_bonus-max"] = np.max(uncertainty_bonus)
diagnostics["uncertainty_bonus-std"] = np.std(uncertainty_bonus)
return diagnostics
def get_object_positions(self):
objects = np.zeros((self.room.num_objects, 2))
for i in range(self.room.num_objects):
object_pos, _ = self.interface.get_object(self.room.objects_id[i], relative=False)
objects[i, 0] = object_pos[0]
objects[i, 1] = object_pos[1]
return objects
def update_nav_trajectory(self):
self.curr_nav_trajectory.append(self.interface.get_base_pos_and_yaw())
def finalize_nav_trajectory(self):
self.update_nav_trajectory()
nav_traj = self.curr_nav_trajectory
self.curr_nav_trajectory = []
nav_end_object_positions = self.get_object_positions()
self.trajectories.append(("nav", nav_traj))
self.trajectories.append(("obj", nav_end_object_positions))
self.trajectory_total_length += len(nav_traj)
dprint("nav_traj len", len(nav_traj), nav_traj)
return nav_traj
def do_grasp_perturbation(self, infos, object_ind):
grasp_perturbation_traj = self.grasp_perturbation_env.do_perturbation_precedure(infos, object_ind=object_ind)
if grasp_perturbation_traj[0] is not None:
grasp_pert_end_object_positions = self.get_object_positions()
self.trajectories.append(("grasp_pert", grasp_perturbation_traj))
self.trajectories.append(("obj", grasp_pert_end_object_positions))
self.trajectory_total_length += len(grasp_perturbation_traj[0])
dprint("grasp_perturbation_traj len", len(grasp_perturbation_traj[0]), "reward", grasp_perturbation_traj[2], grasp_perturbation_traj[0])
def do_nav_perturbation(self, infos):
nav_perturbation_traj = self.nav_perturbation_env.do_perturbation_precedure(infos)
if nav_perturbation_traj[0] is not None:
nav_pert_end_object_positions = self.get_object_positions()
self.trajectories.append(("nav_pert", nav_perturbation_traj))
self.trajectories.append(("obj", nav_pert_end_object_positions))
self.trajectory_total_length += len(nav_perturbation_traj[0])
dprint("nav_perturbation_traj len", len(nav_perturbation_traj[0]), "reward", nav_perturbation_traj[2], nav_perturbation_traj[0])
def save_trajectory(self):
# store trajectory information (usually for reset free)
if self.trajectory_log_path:
env_type = "train" if self.is_training else "eval"
save_path = os.path.join(self.trajectory_log_path, f"trajectory_{env_type}_{self.trajectory_index}")
dprint("save trajectory at", save_path)
np.save(save_path, self.trajectories)
self.trajectories = []
self.trajectory_total_length = 0
self.trajectory_index += 1
def get_observation(self):
obs = OrderedDict()
obs["pixels"] = self.render(size=self.image_size, save_frame=bool(self.trajectory_log_path))
return obs
def do_move(self, action):
forward_min = 0.0
forward_max = 15.0
forward_mean = (forward_max + forward_min) * 0.5
forward_scale = (forward_max - forward_min) * 0.5
left = (action[0] + action[1]) * forward_scale + forward_mean
right = (action[0] - action[1]) * forward_scale + forward_mean
self.interface.p.setJointMotorControl2(self.interface.robot, self.interface.LEFT_WHEEL, self.interface.p.VELOCITY_CONTROL, targetVelocity=left, force=1e4)
self.interface.p.setJointMotorControl2(self.interface.robot, self.interface.RIGHT_WHEEL, self.interface.p.VELOCITY_CONTROL, targetVelocity=right, force=1e4)
for _ in range(55 // 5):
self.interface.do_steps(5)
self.unstuck_objects(detect_dist=0.203, factor=0.205)
self.interface.p.setJointMotorControl2(self.interface.robot, self.interface.LEFT_WHEEL, self.interface.p.VELOCITY_CONTROL, targetVelocity=0, force=1e4)
self.interface.p.setJointMotorControl2(self.interface.robot, self.interface.RIGHT_WHEEL, self.interface.p.VELOCITY_CONTROL, targetVelocity=0, force=1e4)
for _ in range(65 // 5):
self.interface.do_steps(5)
self.unstuck_objects(detect_dist=0.203, factor=0.205)
self.unstuck_robot()
self.previous_action = np.array(action)
def do_grasp(self, action, infos, return_grasped_object=False):
if self.use_auto_grasp:
should_grasp = self.grasp_algorithm.are_blocks_graspable()
else:
should_grasp = self.grasp_algorithm.should_grasp_block_learned()
infos["attempted_grasp"] = 1 if should_grasp else 0
if should_grasp:
return self.grasp_algorithm.do_grasp_action(return_grasped_object=return_grasped_object)
else:
if return_grasped_object:
return 0, None
else:
return 0
def unstuck_objects(self, detect_dist=0.215, factor=0.23):
for i in range(self.room.num_objects):
object_pos, _ = self.interface.get_object(self.room.objects_id[i], relative=True)
sq_dist = object_pos[0] ** 2 + object_pos[1] ** 2
if sq_dist <= detect_dist ** 2:
scale_factor = factor / np.sqrt(sq_dist)
new_object_pos = np.array(object_pos) * np.array([scale_factor, scale_factor, 1])
new_object_pos[2] = 0.015
self.interface.move_object(self.room.objects_id[i], new_object_pos, relative=True)
if sq_dist >= 0.3 ** 2:
self.room.force_object_in_bound_if_not(i)
self.room.force_object_out_obstacle_if_not(i)
def unstuck_robot(self):
turn_dir = self.room.get_turn_direction_if_should_turn()
if turn_dir is None:
return
self.interface.set_wheels_velocity(0, 0)
self.interface.do_steps(30)
self.unstuck_objects()
turn_dir, amount = turn_dir
if turn_dir == "right":
self.interface.set_wheels_velocity(15, -15)
else:
self.interface.set_wheels_velocity(-15, 15)
num_repeat = int(abs(amount) * 5) + 3
for _ in range(num_repeat):
self.interface.do_steps(10)
self.unstuck_objects()
self.interface.set_wheels_velocity(0, 0)
self.interface.do_steps(30)
self.unstuck_objects()
def get_objects_pos_dist(self, filter_fn=None):
objects_pos_dist = []
for i in range(self.room.num_objects):
if filter_fn is None or (filter_fn and filter_fn(i)):
object_pos, _ = self.interface.get_object(self.room.objects_id[i])
object_pos_relative, _ = self.interface.get_object(self.room.objects_id[i], relative=True)
object_dist = (object_pos_relative[0] - 0.4) ** 2 + object_pos_relative[1] ** 2
objects_pos_dist.append((object_pos[:2], object_dist))
return objects_pos_dist
def get_objects_pos_dist_in_view(self, filter_fn=None, relative=False):
objects_pos_dist_in_view = []
for i in range(self.room.num_objects):
if filter_fn is None or (filter_fn and filter_fn(i)):
object_pos, _ = self.interface.get_object(self.room.objects_id[i], relative=relative)
object_pos_relative, _ = self.interface.get_object(self.room.objects_id[i], relative=True)
object_dist = (object_pos_relative[0] - 0.4) ** 2 + object_pos_relative[1] ** 2
y_lim = object_pos_relative[0] * np.tan((25 / 180) * np.pi) + 0.08
in_view = (-y_lim <= object_pos_relative[1] <= y_lim) and (0.275 <= object_pos_relative[0] <= 2.50)
objects_pos_dist_in_view.append((object_pos[:2], object_dist, in_view))
return objects_pos_dist_in_view
def step(self, action):
dprint("step:", self.num_steps, "action:", action)
if self.do_teleop:
for _ in range(5000):
try:
cmd = input().strip().split()
if cmd[0] == "g":
self.grasp_algorithm.do_grasp_action()
elif cmd[0] == "q":
raise KeyboardInterrupt
elif cmd[0] == "repeat":
action = [float(cmd[2]), float(cmd[3])]
for _ in range(int(cmd[1])):
self.do_move(action)
else:
action = [float(cmd[0]), float(cmd[1])]
break
except KeyboardInterrupt as e:
raise e
except Exception as e:
pass
self.timer.start()
# init return values
reward = 0.0
infos = {}
# update initial block pos
if len(self.trajectories) == 0:
self.trajectories.append(("obj", self.get_object_positions()))
# update nav trajectory
self.update_nav_trajectory()
# do move
self.do_move(action)
# do grasping
num_grasped, object_ind = self.do_grasp(action, infos, return_grasped_object=True)
reward += num_grasped
self.total_grasped += num_grasped
# steps update
self.num_steps += 1
# get next obs before perturbation
next_obs = self.get_observation()
done = self.num_steps >= self.max_ep_len
if done and not self.do_single_grasp:
self.finalize_nav_trajectory()
elif reward > 0.5:
self.finalize_nav_trajectory()
self.do_grasp_perturbation(infos, object_ind)
self.do_nav_perturbation(infos)
if self.do_single_grasp:
done = True
elif self.perturbation_interval > 0 and self.num_steps % self.perturbation_interval == 0:
self.finalize_nav_trajectory()
self.do_grasp_perturbation(infos, None)
# save trajectory
if (done and not self.do_single_grasp) or self.trajectory_total_length >= self.trajectory_max_length:
self.save_trajectory()
video_name = "video_train" if self.is_training else "video_eval"
self.interface.save_frames(self.trajectory_log_path, video_name)
# apply the alive penalty and dense reward
if self.use_dense_reward:
if reward > 0.5:
reward = 100.0 - self.alive_penalty
else:
objects_pos_dist = self.get_objects_pos_dist()
_, closest_dist = min(objects_pos_dist, key=lambda pos_dist: pos_dist[1])
reward = -np.sqrt(closest_dist) - self.alive_penalty
else:
reward -= self.alive_penalty
self.timer.end()
# infos loggin
infos["shared"] = (infos["attempted_grasp"] == 1)
infos["success"] = num_grasped
base_pos = self.interface.get_base_pos()
infos["base_x"] = base_pos[0]
infos["base_y"] = base_pos[1]
infos["in_room_1"] = int(base_pos[1] < 2.0)
infos["in_room_2"] = int(base_pos[1] >= 2.0)
return next_obs, reward, done, infos
def get_path_infos(self, paths, *args, **kwargs):
infos = {}
infos.update(self.grasp_algorithm.finalize_diagnostics())
infos.update(self.grasp_perturbation_env.finalize_diagnostics())
infos.update(self.nav_perturbation_env.finalize_diagnostics())
infos["total_grasped"] = self.total_grasped
infos["total_grasp_actions"] = self.total_grasp_actions
infos["total_sim_steps"] = self.interface.total_sim_steps
infos["total_elapsed_non_train_time"] = self.timer.total_elapsed_time
total_successes = 0
num_steps = 0
for path in paths:
success_values = np.array(path["infos"]["success"])
total_successes += np.sum(success_values)
num_steps += len(success_values)
infos["success_per_step"] = total_successes / num_steps
infos["success_per_500_steps"] = (total_successes / num_steps) * 500
if self.do_grasp_eval:
self.grasp_eval.do_eval(infos)
self.grasp_algorithm.clear_diagnostics()
| |
"""General purpose (hybrid) model class, and associated hybrid trajectory
and variable classes.
<NAME>, March 2005.
A Model object's hybrid trajectory can be treated as a curve, or as
a mapping. Call the model object with the trajectory name, time(s), and
set the 'asmap' argument to be True to use an integer time to select the
trajectory segment. These are numbered from zero.
A trajectory value in a Model object's 'trajectories' dictionary
attribute is a HybridTrajectory object, having the following
attributes (among others):
timeInterval is the entire time interval for the trajectory.
timePartitions is a sequence of time_intervals (for each trajectory
segment in trajseq), and
trajSeq is a list of epoch or regime trajectory segments [traj_0, traj_1,
..., traj_(R-1)],
where traj_i is a callable Trajectory or HybridTrajectory object.
eventStruct is the event structure used to determine that trajectory.
events is a dictionary of event names -> list of times at which that
event took place.
modelNames is a list of the generators used for the trajectory (one per
partition).
variables is a dictionary that mimics the variables of the trajectory.
"""
# ----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
## PyDSTool imports
from . import Generator, Events, ModelContext
from .utils import *
from .common import *
from .errors import *
from .Interval import *
from .Trajectory import *
from .Variable import *
from .Points import *
from .ModelSpec import *
from .Symbolic import isMultiRef
from .parseUtils import isHierarchicalName, NAMESEP, mapNames, symbolMapClass
## Other imports
import math, sys
import six
from numpy import Inf, NaN, isfinite, sign, abs, array, arange, \
zeros, concatenate, transpose, shape
from numpy import sometrue, alltrue, any, all
import copy
from time import clock
import pprint
__all__ = ['Model', 'HybridModel', 'NonHybridModel',
'boundary_containment', 'boundary_containment_by_postproc',
'boundary_containment_by_event',
'domain_test']
# ----------------------------------------------------------------------------
class boundary_containment(ModelContext.qt_feature_leaf):
# not implemented using metrics because the metrics are trivial
# and cause a lot of overhead for this often-evaluated feature
def __init__(self, name, description='', pars=None):
ModelContext.qt_feature_leaf.__init__(self, name, description, pars)
try:
pars.thresh
except AttributeError:
raise ValueError("Missing threshold specification")
try:
# tolerance for small rounding errors
pars.abseps
except AttributeError:
self.pars.abseps = 0
try:
assert pars.interior_dirn in [-1, 0, 1]
except AttributeError:
raise ValueError("Missing interior direction specification")
except AssertionError:
raise ValueError("Invalid interior direction specification value "
" use -1 for 'below', 1 for 'above', or 0 for 'discrete domain'")
try:
self.pars.coordname
except AttributeError:
# test all coords at once
self.pars.coordname = None
else:
assert isinstance(self.pars.coordname, six.string_types), \
"Coordinate name must be a string"
def evaluate(self, target):
raise NotImplementedError("Only call this method on a concrete "
"sub-class")
class boundary_containment_by_event(boundary_containment):
def __init__(self, name, description='', pars=None):
boundary_containment.__init__(self, name, description, pars)
try:
self.pars.bd_eventname
except AttributeError:
raise ValueError("Missing boundary event name")
# assume that the supplied model will correspond to the source of
# trajectories in evaluate method
try:
self.pars.model
except AttributeError:
raise ValueError("Missing model associated with event")
# have not verified that event is present in model
def evaluate(self, traj):
# verify whether event exists and was flagged in associated model
try:
evpts = traj.getEvents(self.pars.bd_eventname)
except ValueError as errinfo:
print(errinfo)
raise RuntimeError("Could not find flagged events for this trajectory")
try:
evpt = evpts[self.pars.coordname]
except KeyError:
raise ValueError("No such coordinate %s in the defined event"%self.pars.coordname)
except TypeError:
# no events of this kind were found, so passed feature eval test
# dereferencing None (unsubscriptable object)
if self.pars.abseps > 0:
# would like to re-evaluate event at its threshold+abseps, but
# leave for now
print("Warning -- Boundary containment feature %s:"%self.name)
print(" Check for uncertain case using events not implemented")
self.results.output = None
self.results.uncertain = False
else:
self.results.output = None
self.results.uncertain = False
return True
else:
# event found
if self.pars.abseps > 0:
# would like to re-evaluate event at its threshold+abseps, but
# leave for now
print("Warning -- Boundary containment feature %s:"%self.name)
print(" Check for uncertain case using events not implemented")
self.results.output = evpt[0] # only use first event (in case not Terminal)
self.results.uncertain = False
else:
self.results.output = evpt[0] # only use first event (in case not Terminal)
self.results.uncertain = False
return False
def _find_idx(self):
"""Helper function for finding index in trajectory meshpoints
at which containment first failed."""
if self.results.satisfied:
# Trajectory satisfied constraint!
return None
return len(self.results.output)
class boundary_containment_by_postproc(boundary_containment):
def evaluate(self, traj):
diffs = [p - self.pars.thresh for p in \
traj.sample(coords=self.pars.coordname)]
if self.pars.verbose_level > 1:
print("%s diffs in coord %s ="%(self.name,self.pars.coordname) + ", %s" % diffs)
res_strict = array([sign(d) \
== self.pars.interior_dirn for d in diffs])
satisfied_strict = alltrue(res_strict)
if self.pars.abseps > 0:
if self.pars.interior_dirn == 0:
# especially for discrete domains
res_loose = array([abs(d) < self.pars.abseps for d in diffs])
else:
res_loose = array([sign(d + self.pars.interior_dirn*self.pars.abseps) \
== self.pars.interior_dirn for d in diffs])
satisfied_loose = alltrue(res_loose)
self.results.output = res_loose
# if p values are *outside* thresh by up to abseps amount
# then flag this as 'uncertain' for use by domain_test class's
# transversality testing
self.results.uncertain = satisfied_loose and not satisfied_strict
return satisfied_loose
else:
self.results.output = res_strict
self.results.uncertain = sometrue(array(diffs)==0)
return alltrue(res_strict)
def _find_idx(self):
"""Helper function for finding index in trajectory meshpoints
at which containment first failed"""
if self.results.satisfied:
# Trajectory satisfied constraint!
return None
res = self.results.output
if res[0] == -1:
adjusted_res = list((res + 1) != 0)
elif res[0] == 1:
adjusted_res = list((res - 1) != 0)
else:
# starts with 0 already
adjusted_res = list(res != 0)
# find first index at which value is non-zero
# should never raise ValueError because this method is
# only run if there was a sign change found
return adjusted_res.index(True)
class domain_test(ModelContext.qt_feature_node):
def __init__(self, name, description='', pars=None):
ModelContext.qt_feature_node.__init__(self, name, description, pars)
try:
self.pars.interval
except AttributeError:
raise ValueError("Missing domain interval specification")
try:
# tolerance for small rounding errors
self.pars.abseps
except AttributeError:
if isinstance(self.pars.interval, Interval):
# Interval type passed, inherit its abseps
self.pars.abseps = self.pars.interval._abseps
else:
self.pars.abseps = 0
if not isinstance(self.pars.interval, (tuple, list)):
# assume a singleton numeric type passed
self.pars.interval = [self.pars.interval, self.pars.interval]
elif len(self.pars.interval)==1:
# singleton passed, so copy the value for both
# "endpoints" so that xlo_bc etc. below will work
self.pars.interval = [self.pars.interval[0],
self.pars.interval[0]]
self.isdiscrete = self.pars.interval[0] == self.pars.interval[1]
try:
self.pars.coordname
except AttributeError:
raise ValueError("Missing coordinate name")
try:
self.pars.derivname
except AttributeError:
raise ValueError("Missing coordinate derivative name")
# multiply interior directions by the integer value of not self.isdiscrete
# in order to set them to zero when the "interval" is actually a singleton
# value for a discrete domain. That fixes the boundary containment evaluation
# code which compares the sign of coord differences with that interior
# direction value.
xlo_bc = boundary_containment_by_postproc('x_test_lo',
description='Test x lower bound',
pars=args(thresh=self.pars.interval[0],
interior_dirn=1*int(not self.isdiscrete),
abseps=self.pars.abseps,
coordname=self.pars.coordname))
xhi_bc = boundary_containment_by_postproc('x_test_hi',
description='Test x upper bound',
pars=args(thresh=self.pars.interval[1],
interior_dirn=-1*int(not self.isdiscrete),
abseps=self.pars.abseps,
coordname=self.pars.coordname))
dxlo_bc = boundary_containment_by_postproc('dx_test_lo',
description='Test dx at lower x bound',
pars=args(thresh=0,
interior_dirn=1*int(not self.isdiscrete),
abseps=0,
coordname=self.pars.derivname))
dxhi_bc = boundary_containment_by_postproc('dx_test_hi',
description='Test dx at upper x bound',
pars=args(thresh=0,
interior_dirn=-1*int(not self.isdiscrete),
abseps=0,
coordname=self.pars.derivname))
self.subfeatures = {'x_test_lo': xlo_bc,
'dx_test_lo': dxlo_bc,
'x_test_hi': xhi_bc,
'dx_test_hi': dxhi_bc}
def evaluate(self, traj):
# Arg is a traj!
for sf in self.subfeatures.values():
self.propagate_verbosity(sf)
sf.super_pars = self.pars
sf.super_results = self.results
xlo_bc = self.subfeatures['x_test_lo']
xlo_test = xlo_bc(traj)
if xlo_bc.results.uncertain:
if self.pars.verbose_level > 0:
print("Lo bd uncertain")
if self.isdiscrete:
# accept uncertain case for discrete domain
xlo_test = True
else:
# check transversality at critical (boundary) value of domain
xlo_test = self.subfeatures['dx_test_lo'](traj)
xhi_bc = self.subfeatures['x_test_hi']
xhi_test = xhi_bc(traj)
if xhi_bc.results.uncertain:
if self.pars.verbose_level > 0:
print("Hi bd uncertain")
if self.isdiscrete:
# accept uncertain case for discrete domain
xhi_test = True
else:
# check transversality at critical (boundary) value of domain
xhi_test = self.subfeatures['dx_test_hi'](traj)
for sf in self.subfeatures.values():
self.results[sf.name] = sf.results
return xlo_test and xhi_test
def _find_idx(self):
"""Helper function for finding lowest index in trajectory meshpoints
at which domain test first failed"""
if self.results.satisfied:
# Trajectory satified domain conditions!
return None
lowest_idx = Inf
for sfname, sf in self.subfeatures.items():
if self.pars.verbose_level > 0:
print("\n %s %r" % (sfname, sf.results))
try:
res = list(self.results[sfname].output)
except AttributeError:
# dxdt transversality test was not run so ignore
continue
if sf.results.satisfied:
continue
if self.pars.verbose_level > 0:
print(res)
# Find first index at which value is non-zero.
# Will not raise ValueError because test satisfaction
# | |
is_valid(self, lines):
try:
g = smof_base._stream_entries(smof_base.read_fasta_str(lines))
out = [s for s in g]
return True
except BaseException:
return False
def test_good(self):
self.assertTrue(self.cmp_seqs(self.good, (self.seq1, self.seq2)))
def test_good_empty_lines(self):
self.assertTrue(self.cmp_seqs(self.good_empty_lines, (self.seq1, self.seq2)))
def test_weird_empty_lines(self):
self.assertTrue(self.cmp_seqs(self.weird_empty_lines, (self.seq1, self.seq2)))
def test_spaced(self):
self.assertTrue(
self.cmp_seqs(self.spaced, (self.seq1_spaced, self.seq2_spaced))
)
def test_well_commented(self):
self.assertTrue(self.cmp_seqs(self.well_commented, (self.seq1, self.seq2)))
def test_interspersed_comments(self):
self.assertTrue(
self.cmp_seqs(self.interspersed_comments, (self.seq1, self.seq2))
)
def test_funky_header(self):
self.assertTrue(self.cmp_seqs(self.funky_header, [self.seq1_funky]))
def test_internal_gt(self):
self.assertTrue(self.cmp_seqs(self.internal_gt, [self.seq1_weird]))
def test_bad_first(self):
self.assertFalse(self.is_valid(self.bad_first))
def test_empty_seq(self):
# empty sequences are now supported
self.assertTrue(self.is_valid(self.empty_seq))
def test_empty_last_seq(self):
# empty sequences are now supported
self.assertTrue(self.is_valid(self.empty_last_seq))
def test_no_sequence(self):
self.assertTrue(self.is_valid(self.no_sequence))
class TestMd5sum(unittest.TestCase):
def setUp(self):
self.seqs = [">asdf", "ASDF", ">qwer", "TYUI"]
def test_default(self):
self.assertEqual(
get_output(self.seqs, ["md5sum"]), ["28fd532b933aaa89d2188b98241a8b46"]
)
def test_eachseq(self):
self.assertEqual(
get_output(self.seqs, ["md5sum", "-q"]),
[
"asdf\t6d87a19f011653459575ceb722db3b69",
"qwer\t6e9758614cca89162b2d19922de103bb",
],
)
def test_replaceseq(self):
self.assertEqual(
get_output(self.seqs, ["md5sum", "-r"]),
[
">6d87a19f011653459575ceb722db3b69",
"ASDF",
">6e9758614cca89162b2d19922de103bb",
"TYUI",
],
)
def test_headers(self):
self.assertEqual(
get_output(self.seqs, ["md5sum", "-d"]),
["c69874b898abb180ac71bd99bc16f8fb"],
)
def test_seqs(self):
self.assertEqual(
get_output(self.seqs, ["md5sum", "-s"]),
["ed9b124094bc93e7f611da252d06f628"],
)
class TestClean(unittest.TestCase):
def setUp(self):
self.seq = [">a", " gAtA cA-NY "]
self.aaseq = [">p", " gAtA cA-NB "]
self.longseq = [">l", "A" * 91]
self.header = [">l a", "A", ">m|a", "A"]
self.gapamb = [">a", "yy--_.ATT"]
self.nonstandard_dna = [">a", ".-_XxCAT"]
self.nonstandard_pro = [">a", ".-_GANDALF"]
def test_default(self):
self.assertEqual(get_output(self.seq, ["clean"])[1], "gAtAcA-NY")
def test_case(self):
self.assertEqual(get_output(self.seq, ["clean", "-u"])[1], "GATACA-NY")
self.assertEqual(get_output(self.seq, ["clean", "-l"])[1], "gataca-ny")
def test_masking(self):
self.assertEqual(
get_output(self.seq, ["clean", "-t", "nucl", "-m"])[1], "NANANA-NY"
)
self.assertEqual(
get_output(self.seq, ["clean", "-t", "nucl", "-mr"])[1], "NANANA-NN"
)
def test_type(self):
for d in ["n", "nu", "nuc", "nucl", "dna"]:
self.assertEqual(
get_output(self.seq, ["clean", "-t", d, "-r"])[1], "gAtAcA-NN"
)
for d in ["p", "pro", "prot", "protein", "aa", "pep"]:
self.assertEqual(
get_output(self.aaseq, ["clean", "-t", d, "-r"])[1], "gAtAcA-NX"
)
def test_toseq(self):
self.assertEqual(get_output([">a", "ASD!@(#*& D"], ["clean", "-x"])[1], "ASDD")
def test_irregulars(self):
self.assertEqual(
get_output([">p", "YbJuZ"], ["clean", "-t", "p", "-r"])[1], "YXXXX"
)
self.assertEqual(
get_output([">n", "ATRySWkMDbHVG"], ["clean", "-t", "n", "-r"])[1],
"ATNNNNNNNNNNG",
)
# Unambiguously illegal characters are not masked
self.assertEqual(
get_output([">p", "YOU]"], ["clean", "-t", "p", "-r"])[1], "YOX]"
)
self.assertEqual(
get_output([">n", "ATryjG*"], ["clean", "-t", "n", "-r"])[1], "ATNNjG*"
)
def test_nonstandard_dna(self):
self.assertEqual(
get_output(self.nonstandard_dna, ["clean", "-d", "-t", "n"])[1], "---NnCAT"
)
def test_nonstandard_pro(self):
self.assertEqual(
get_output(self.nonstandard_pro, ["clean", "-d", "-t", "p"])[1],
"---GANDALF",
)
def test_wrap(self):
self.assertEqual(get_output(self.longseq, ["clean", "-w", "30"])[1], "A" * 30)
self.assertEqual(get_output(self.longseq, ["clean", "-w", "30"])[4], "A")
# test no-wrap
self.assertEqual(get_output(self.longseq, ["clean", "-w", "0"])[1], "A" * 91)
def test_reduce_header(self):
self.assertEqual(get_output(self.header, ["clean", "-s"])[0], ">l")
self.assertEqual(get_output(self.header, ["clean", "-s"])[2], ">m")
self.assertEqual(get_output(self.header, ["clean", "-s", '--delimiter="[ |]"'])[0], ">l a")
### why the tarnation doesn't this work? it works from the command line, I swear it does
# self.assertEqual(get_output(self.header, ["clean", "-s", '--delimiter="[ |]"'])[2], ">m")
def test_reduce_and_mask(self):
self.assertEqual(get_output(self.gapamb, ["clean", "-uxrt", "n"])[1], "NNATT")
class TestFilter(unittest.TestCase):
def setUp(self):
self.seq = [">a", "ASDFX", ">b", "ASDF", ">c", "ASD", ">d", ""]
def test_shorter_than(self):
self.assertEqual(
get_output(self.seq, ["filter", "-s", 5])[0::2], [">a", ">b", ">c", ">d"]
)
self.assertEqual(
get_output(self.seq, ["filter", "-s", 4])[0::2], [">b", ">c", ">d"]
)
self.assertEqual(get_output(self.seq, ["filter", "-s", 3])[0::2], [">c", ">d"])
self.assertEqual(get_output(self.seq, ["filter", "-s", 0])[0::2], [">d"])
def test_longer_than(self):
self.assertEqual(
get_output(self.seq, ["filter", "-l", 0])[0::2], [">a", ">b", ">c", ">d"]
)
self.assertEqual(
get_output(self.seq, ["filter", "-l", 3])[0::2], [">a", ">b", ">c"]
)
self.assertEqual(get_output(self.seq, ["filter", "-l", 4])[0::2], [">a", ">b"])
self.assertEqual(get_output(self.seq, ["filter", "-l", 5])[0::2], [">a"])
def test_composition(self):
comp = [">a", "AAAAG.....", ">b", "AG........", ">c", "AAX......."]
self.assertEqual(
get_output(comp, ["filter", "-c", "X == 0"])[0::2], [">a", ">b"]
)
self.assertEqual(
get_output(comp, ["filter", "-c", "X = 0"])[0::2], [">a", ">b"]
)
self.assertEqual(get_output(comp, ["filter", "-c", "X != 0"])[0::2], [">c"])
self.assertEqual(get_output(comp, ["filter", "-c", "AG > .3"])[0::2], [">a"])
self.assertEqual(
get_output(comp, ["filter", "-c", "AG < .3"])[0::2], [">b", ">c"]
)
self.assertEqual(get_output(comp, ["filter", "-c", "AG >= .5"])[0::2], [">a"])
self.assertEqual(
get_output(comp, ["filter", "-c", "AG <= .5"])[0::2], [">a", ">b", ">c"]
)
class TestHeaderGrep(unittest.TestCase):
def setUp(self):
self.headers = [
">gg sco 12",
"A",
">gg bob 48a",
"A",
">gl har 61",
"A",
">aL har 61",
"A",
]
def test_default(self):
self.assertEqual(
get_output(self.headers, ["grep", "-y", "bob"]), [">gg bob 48a", "A"]
)
self.assertEqual(
get_output(self.headers, ["grep", "-y", "gg"]),
[">gg sco 12", "A", ">gg bob 48a", "A"],
)
def test_perl(self):
self.assertEqual(
get_output(self.headers, ["grep", "-yP", ".g"]),
[">gg sco 12", "A", ">gg bob 48a", "A"],
)
self.assertEqual(
get_output(self.headers, ["grep", "-yP", "bob|sco"]),
[">gg sco 12", "A", ">gg bob 48a", "A"],
)
self.assertEqual(
get_output(self.headers, ["grep", "-yP", "\d+[a-z]"]), [">gg bob 48a", "A"]
)
def test_invert(self):
self.assertEqual(
get_output(self.headers, ["grep", "-yvP", "^g"]), [">aL har 61", "A"]
)
def test_case_sensitive(self):
self.assertEqual(
get_output(self.headers, ["grep", "-yI", "aL"]), [">aL har 61", "A"]
)
self.assertEqual(get_output(self.headers, ["grep", "-yI", "al"]), [""])
self.assertEqual(
get_output(self.headers, ["grep", "-y", "al"]), [">aL har 61", "A"]
)
self.assertEqual(
get_output(self.headers, ["grep", "-y", "aL"]), [">aL har 61", "A"]
)
def test_count(self):
self.assertEqual(get_output(self.headers, ["grep", "-cP", "gg"]), ["2"])
def test_only_matching(self):
self.assertEqual(
get_output([">a;glob.", "GACFADE"], ["grep", "-oP", "g..b\."]), ["glob."]
)
def test_only_matching_wrap(self):
self.assertEqual(
get_output(
[">a;glob.", "GACFADE"], ["grep", "-w", "a;([^.]+)", "-o", "glob"]
),
["glob"],
)
def test_line_regexp(self):
self.assertEqual(get_output(self.headers, ["grep", "-x", "gg"]), [""])
self.assertEqual(
get_output(self.headers, ["grep", "-x", "gg sco 12"]), [">gg sco 12", "A"]
)
def test_exact(self):
self.assertEqual(get_output(self.headers, ["grep", "-X", "gg"]), [""])
self.assertEqual(
get_output(self.headers, ["grep", "-X", "gg sco 12"]), [">gg sco 12", "A"]
)
class TestSequenceGrep(unittest.TestCase):
def setUp(self):
self.seqs = [
">a",
"AAGATACA",
">b",
"GAACATAACAT",
">c",
"aaaaa",
">d",
"aaaaaa",
">e",
"A",
]
self.revseqs = [">a", "AAG", ">b", "CTT"]
def test_default(self):
self.assertEqual(
get_output(self.seqs, ["grep", "-qy", "gataca"]), [">a", "AAGATACA"]
)
def test_ambiguous_nucl_encodings(self):
for h, q in [
("M", "AC"),
("R", "AG"),
("W", "AT"),
("S", "CG"),
("Y", "CT"),
("K", "GT"),
("V", "ACG"),
("H", "ACT"),
("D", "AGT"),
("B", "CGT"),
("N", "ACGT"),
]:
self.assertNotEqual(
get_output([">{}".format(h), q], ["grep", "-qyG", "^{}+$".format(h)]),
[""],
)
compl = "".join(set("ACGT") - set(q))
if compl:
self.assertEqual(
get_output([">{}".format(h), compl], ["grep", "-qyG", h]), [""]
)
def test_ambiguous_nucl_regex(self):
self.assertEqual(
get_output(self.seqs, ["grep", "-qyG", "R{4}Y"]), [">a", "AAGATACA"]
)
self.assertEqual(
get_output(self.seqs, ["grep", "-qyG", "[^Y]{4}Y"]), [">a", "AAGATACA"]
)
def test_count(self):
self.assertEqual(get_output(self.seqs, ["grep", "-cq", "aa"]), ["4"])
def test_matches(self):
self.assertEqual(get_output(self.seqs, ["grep", "-qm", "aa"]), ["8"])
def test_count_matches(self):
self.assertEqual(get_output(self.seqs, ["grep", "-qcm", "aa"]), ["4\t8"])
def test_both_strands(self):
self.assertEqual(
get_output(self.revseqs, ["grep", "-qy", "AA"]), self.revseqs[0:2]
)
self.assertEqual(get_output(self.revseqs, ["grep", "-qby", "AA"]), self.revseqs)
self.assertEqual(
get_output(self.revseqs, ["grep", "-qy", "AG"]), self.revseqs[0:2]
)
self.assertEqual(get_output(self.revseqs, ["grep", "-qby", "AG"]), self.revseqs)
def test_gff(self):
self.assertEqual(
get_output(self.seqs, ["grep", "--gff", "CAT"]),
[
"b\tsmof-{}\tregex_match\t4\t6\t.\t.\t.\t.".format(smof.__version__),
"b\tsmof-{}\tregex_match\t9\t11\t.\t.\t.\t.".format(smof.__version__),
],
)
def test_gff_context(self):
self.assertEqual(
get_output(self.seqs, ["grep", "--gff", "-A 1", "CAT"]),
[
"b\tsmof-{}\tregex_match\t4\t7\t.\t.\t.\t.".format(smof.__version__),
"b\tsmof-{}\tregex_match\t9\t11\t.\t.\t.\t.".format(smof.__version__),
],
)
self.assertEqual(
get_output(self.seqs, ["grep", "--gff", "-B 1", "CAT"]),
[
"b\tsmof-{}\tregex_match\t3\t6\t.\t.\t.\t.".format(smof.__version__),
"b\tsmof-{}\tregex_match\t8\t11\t.\t.\t.\t.".format(smof.__version__),
],
)
def test_gff_seqid(self):
self.assertEqual(
get_output([">a|b.1 desc", "AC"], ["grep", "--gff", "A"]),
["a|b.1\tsmof-{}\tregex_match\t1\t1\t.\t.\t.\t.".format(smof.__version__)],
)
def test_only_matching(self):
self.assertEqual(
get_output([">a", "GACFADE"], ["grep", "-qoP", "A."])[1::2], ["AC", "AD"]
)
self.assertEqual(
get_output([">a", "GAACFADE"], ["grep", "-qoP", "A.*?D"])[1::2], ["AACFAD"]
)
def test_only_matching_context(self):
self.assertEqual(
get_output([">a", "GACFADE"], ["grep", "-qoP", "-A 1", "A."])[1::2],
["ACF", "ADE"],
)
self.assertEqual(
get_output([">a", "GACFADE"], ["grep", "-qoP", "-A 2", "A."])[1::2],
["ACFA", "ADE"],
)
self.assertEqual(
get_output([">a", "GACFADE"], ["grep", "-qoP", "-B 1", "A."])[1::2],
["GAC", "FAD"],
)
self.assertEqual(
get_output([">a", "GACFADE"], ["grep", "-qoP", "-B 2", "A."])[1::2],
["GAC", "CFAD"],
)
def test_only_matching_context_both(self):
self.assertEqual(
get_output([">a", "GAAGGGTTA"], ["grep", "-qoPb", "-A 1", "AA"])[1::2],
["AAG", "GTT"],
)
def test_only_matching_context_reverse(self):
self.assertEqual(
get_output([">a", "GAAGGGTTA"], ["grep", "-qoPr", "-A 1", "AA"])[1], "GTT"
)
def test_only_matching_wrap(self):
self.assertEqual(
get_output([">a", "GACFADE"], ["grep", "-qw", "CF(..)", "-o", "AD"])[1],
"AD",
)
def test_only_matching_wrap_reverse(self):
self.assertEqual(
get_output([">a", "GACFADE"], ["grep", "-qw", "CF(..)", "-o", "AD"])[1],
"AD",
)
self.assertEqual(
get_output([">a", "GAAGGGTTA"], ["grep", "-qbw", "AAC(..)", "-o", "CC"])[1],
"GG",
)
def test_gapped_search(self):
self.assertEqual(
get_output([">a", "GA-CFADE"], ["grep", "-qgy", "AC"])[1], "GA-CFADE"
)
def test_gapped_search_only(self):
self.assertEqual(
get_output([">a", "GA-CFADE"], ["grep", "-qgyo", "AC"])[1], "A-C"
)
self.assertEqual(
get_output([">a", "--GA--C-F-ADE"], ["grep", "-qgyo", "ACF"])[1], "A--C-F"
)
self.assertEqual(
get_output([">a", "G--ACF-ADE"], ["grep", "-qgyo", "ACF"])[1], "ACF"
)
def test_gapped_search_only_revcom(self):
self.assertEqual(
get_output([">a", "GATA-CA"], ["grep", "-qyorg", "GTA"])[1], "TA-C"
)
self.assertEqual(
get_output([">a", "GATA-CA"], ["grep", "-qyobg", "GTA"])[1], "TA-C"
)
def test_line_regexp(self):
self.assertEqual(get_output(self.seqs, ["grep", "-qx", "GAA"]), [""])
self.assertEqual(
get_output(self.seqs, ["grep", "-qx", "GAACATAACAT"]), [">b", "GAACATAACAT"]
)
self.assertEqual(
get_output(self.seqs, ["grep", "-Pqx", "GAA.*"]), [">b", "GAACATAACAT"]
)
def test_exact(self):
# Partial exact matches return nothing
self.assertEqual(get_output(self.seqs, ["grep", "-qX", "GAA"]), [""])
# Full exact matches return everything
self.assertEqual(
get_output(self.seqs, ["grep", "-qX", "GAACATAACAT"]), [">b", "GAACATAACAT"]
)
def test_fastain(self):
f = tempfile.NamedTemporaryFile(delete=False)
f.write(b">a\nGAT")
filename = f.name
f.close()
self.assertEqual(
get_output(self.seqs, ["grep", "-y", "--fastain", filename]),
[">a", "AAGATACA"],
)
self.assertEqual(
get_output(self.seqs, ["grep", "-yo", "--fastain", filename])[1], "GAT"
)
self.assertEqual(
get_output(self.seqs, ["grep", "--gff", "--fastain", filename])[0].split(
"\t"
)[3:5],
["3", "5"],
)
os.unlink(filename)
class TestGrepBadCombinations(unittest.TestCase):
def setUp(self):
self.seq = [">a", "A"]
def test_wrap_incompatible_options(self):
self.assertRaises(
SystemExit, get_output, self.seq, ["grep", "-Pw", "a(b)", "a"]
)
self.assertRaises(
SystemExit, get_output, self.seq, ["grep", "-Gw", "a(b)", "a"]
)
def test_mv(self):
self.assertRaises(SystemExit, get_output, self.seq, ["grep", "-mv", "a"])
def test_seq_only_matches_against_header(self):
self.assertRaises(SystemExit, get_output, self.seq, ["grep", "-b", "a"])
self.assertRaises(SystemExit, get_output, self.seq, ["grep", "-r", "a"])
self.assertRaises(SystemExit, get_output, self.seq, ["grep", "-G", "a"])
def test_contradictory_options(self):
self.assertRaises(SystemExit, get_output, self.seq, ["grep", | |
12130
},
{
"id_ref": 536,
"del_ref": "COYOACAN",
"cve_col": "03-141",
"nombre_ref": "PEDREGAL DE STO DOMINGO III",
"pob_2010": 10067
},
{
"id_ref": 537,
"del_ref": "COYOACAN",
"cve_col": "03-142",
"nombre_ref": "PEDREGAL DE STO DOMINGO IV",
"pob_2010": 10700
},
{
"id_ref": 538,
"del_ref": "COYOACAN",
"cve_col": "03-147",
"nombre_ref": "PEDREGAL DE STO DOMINGO IX",
"pob_2010": 7931
},
{
"id_ref": 539,
"del_ref": "COYOACAN",
"cve_col": "03-143",
"nombre_ref": "PEDREGAL DE STO DOMINGO V",
"pob_2010": 11188
},
{
"id_ref": 540,
"del_ref": "COYOACAN",
"cve_col": "03-144",
"nombre_ref": "PEDREGAL DE STO DOMINGO VI",
"pob_2010": 13928
},
{
"id_ref": 541,
"del_ref": "COYOACAN",
"cve_col": "03-145",
"nombre_ref": "PEDREGAL DE STO DOMINGO VII",
"pob_2010": 10999
},
{
"id_ref": 542,
"del_ref": "COYOACAN",
"cve_col": "03-146",
"nombre_ref": "PEDREGAL DE STO DOMINGO VIII",
"pob_2010": 7050
},
{
"id_ref": 543,
"del_ref": "COYOACAN",
"cve_col": "03-091",
"nombre_ref": "PEDREGAL DEL MAUREL",
"pob_2010": 1998
},
{
"id_ref": 544,
"del_ref": "COYOACAN",
"cve_col": "03-092",
"nombre_ref": "PETROLERA TAXQUEÑA",
"pob_2010": 1251
},
{
"id_ref": 545,
"del_ref": "COYOACAN",
"cve_col": "03-093",
"nombre_ref": "PILOTO CULHUACAN (U HAB)",
"pob_2010": 3144
},
{
"id_ref": 546,
"del_ref": "COYOACAN",
"cve_col": "03-095",
"nombre_ref": "PRADOS DE COYOACAN",
"pob_2010": 2099
},
{
"id_ref": 547,
"del_ref": "COYOACAN",
"cve_col": "03-096",
"nombre_ref": "PRESIDENTES EJIDALES PRIMERA SECCION",
"pob_2010": 3465
},
{
"id_ref": 548,
"del_ref": "COYOACAN",
"cve_col": "03-152",
"nombre_ref": "PRESIDENTES EJIDALES SEGUNDA SECCION",
"pob_2010": 3101
},
{
"id_ref": 549,
"del_ref": "COYOACAN",
"cve_col": "03-097",
"nombre_ref": "RANCHO EL ROSARIO",
"pob_2010": 521
},
{
"id_ref": 550,
"del_ref": "COYOACAN",
"cve_col": "03-098",
"nombre_ref": "ROMERO DE TERREROS",
"pob_2010": 2875
},
{
"id_ref": 551,
"del_ref": "COYOACAN",
"cve_col": "03-159",
"nombre_ref": "ROMERO DE TERREROS (COND)",
"pob_2010": 438
},
{
"id_ref": 552,
"del_ref": "COYOACAN",
"cve_col": "03-099",
"nombre_ref": "ROMERO DE TERREROS (FRACC)",
"pob_2010": 2276
},
{
"id_ref": 553,
"del_ref": "COYOACAN",
"cve_col": "03-100",
"nombre_ref": "SAN DIEGO CHURUBUSCO",
"pob_2010": 1584
},
{
"id_ref": 554,
"del_ref": "COYOACAN",
"cve_col": "03-101",
"nombre_ref": "SAN FRANCISCO CULHUACAN (PBLO)",
"pob_2010": 14638
},
{
"id_ref": 555,
"del_ref": "COYOACAN",
"cve_col": "03-103",
"nombre_ref": "SAN LUCAS (BARR)",
"pob_2010": 4382
},
{
"id_ref": 556,
"del_ref": "COYOACAN",
"cve_col": "03-104",
"nombre_ref": "SAN MATEO (BARR)",
"pob_2010": 2945
},
{
"id_ref": 557,
"del_ref": "COYOACAN",
"cve_col": "03-105",
"nombre_ref": "SAN PABLO TEPETLAPA (PBLO)",
"pob_2010": 5739
},
{
"id_ref": 558,
"del_ref": "COYOACAN",
"cve_col": "03-106",
"nombre_ref": "SANTA CATARINA (BARR)",
"pob_2010": 3266
},
{
"id_ref": 559,
"del_ref": "COYOACAN",
"cve_col": "03-107",
"nombre_ref": "SANTA CECILIA",
"pob_2010": 3581
},
{
"id_ref": 560,
"del_ref": "COYOACAN",
"cve_col": "03-108",
"nombre_ref": "SANTA MARTHA DEL SUR",
"pob_2010": 1730
},
{
"id_ref": 561,
"del_ref": "COYOACAN",
"cve_col": "03-109",
"nombre_ref": "SANTA URSULA COAPA (PBLO)",
"pob_2010": 10478
},
{
"id_ref": 562,
"del_ref": "COYOACAN",
"cve_col": "03-110",
"nombre_ref": "SANTA URS<NAME>YOACAN",
"pob_2010": 607
},
{
"id_ref": 563,
"del_ref": "COYOACAN",
"cve_col": "03-112",
"nombre_ref": "ST<NAME>ULHUACAN (U HAB)",
"pob_2010": 3136
},
{
"id_ref": 564,
"del_ref": "COYOACAN",
"cve_col": "03-153",
"nombre_ref": "TAXQUEÑA",
"pob_2010": 1208
},
{
"id_ref": 565,
"del_ref": "COYOACAN",
"cve_col": "03-113",
"nombre_ref": "VIEJO EJIDO SANTA URSULA COAPA",
"pob_2010": 7475
},
{
"id_ref": 566,
"del_ref": "COYOACAN",
"cve_col": "03-114",
"nombre_ref": "VILLA COYOACAN",
"pob_2010": 2981
},
{
"id_ref": 567,
"del_ref": "COYOACAN",
"cve_col": "03-115",
"nombre_ref": "VILLA PANAMERICANA 1ERA. SECCIÓN (U HAB)",
"pob_2010": 2208
},
{
"id_ref": 568,
"del_ref": "COYOACAN",
"cve_col": "03-116",
"nombre_ref": "VILLA PANAMERICANA 2DA. SECCIÓN (U HAB)",
"pob_2010": 1706
},
{
"id_ref": 569,
"del_ref": "COYOACAN",
"cve_col": "03-117",
"nombre_ref": "VILLA PANAMERICANA 3ERA. SECCIÓN (U HAB)",
"pob_2010": 2322
},
{
"id_ref": 570,
"del_ref": "COYOACAN",
"cve_col": "03-118",
"nombre_ref": "VILLA PANAMERICANA 4TA. SECCIÓN (U HAB)",
"pob_2010": 1903
},
{
"id_ref": 571,
"del_ref": "COYOACAN",
"cve_col": "03-119",
"nombre_ref": "VILLA PANAMERICANA 5TA. SECCIÓN (U HAB)",
"pob_2010": 2538
},
{
"id_ref": 572,
"del_ref": "COYOACAN",
"cve_col": "03-120",
"nombre_ref": "VILLA PANAMERICANA 6TA. SECCIÓN (U HAB)",
"pob_2010": 295
},
{
"id_ref": 573,
"del_ref": "COYOACAN",
"cve_col": "03-121",
"nombre_ref": "VILLA PANAMERICANA 7MA. SECCIÓN (U HAB)",
"pob_2010": 1901
},
{
"id_ref": 574,
"del_ref": "COYOACAN",
"cve_col": "03-122",
"nombre_ref": "VILLA QUIETUD (FRACC)",
"pob_2010": 3743
},
{
"id_ref": 575,
"del_ref": "COYOACAN",
"cve_col": "03-123",
"nombre_ref": "VILLAS DEL PEDREGAL (U HAB)",
"pob_2010": 1055
},
{
"id_ref": 576,
"del_ref": "COYOACAN",
"cve_col": "03-124",
"nombre_ref": "VISTAS DEL MAUREL (U HAB)",
"pob_2010": 1391
},
{
"id_ref": 577,
"del_ref": "COYOACAN",
"cve_col": "03-125",
"nombre_ref": "XOTEPINGO",
"pob_2010": 1243
},
{
"id_ref": 578,
"del_ref": "COYOACAN",
"cve_col": "03-094",
"nombre_ref": "",
"pob_2010": None
},
{
"id_ref": 579,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-054",
"nombre_ref": "1o DE MAYO",
"pob_2010": 491
},
{
"id_ref": 580,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-001",
"nombre_ref": "<NAME>",
"pob_2010": 1770
},
{
"id_ref": 581,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-002",
"nombre_ref": "<NAME>",
"pob_2010": 1513
},
{
"id_ref": 582,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-003",
"nombre_ref": "<NAME>",
"pob_2010": 3336
},
{
"id_ref": 583,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-004",
"nombre_ref": "AHUATENCO",
"pob_2010": 3841
},
{
"id_ref": 584,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-005",
"nombre_ref": "<NAME>",
"pob_2010": 641
},
{
"id_ref": 585,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-006",
"nombre_ref": "BOSQUES DE LAS LOMAS",
"pob_2010": 13279
},
{
"id_ref": 586,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-007",
"nombre_ref": "CACALOTE",
"pob_2010": 145
},
{
"id_ref": 587,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-008",
"nombre_ref": "COLA DE PATO",
"pob_2010": 1680
},
{
"id_ref": 588,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-012",
"nombre_ref": "CORREDOR SANTA FE",
"pob_2010": 5018
},
{
"id_ref": 589,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-011",
"nombre_ref": "CRUZ BLANCA",
"pob_2010": 1631
},
{
"id_ref": 590,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-014",
"nombre_ref": "EBANO (U HAB)",
"pob_2010": 2091
},
{
"id_ref": 591,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-015",
"nombre_ref": "EL CONTADERO",
"pob_2010": 5116
},
{
"id_ref": 592,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-016",
"nombre_ref": "EL MOLINITO",
"pob_2010": 452
},
{
"id_ref": 593,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-017",
"nombre_ref": "EL MOLINO",
"pob_2010": 1274
},
{
"id_ref": 594,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-018",
"nombre_ref": "EL TIANGUILLO",
"pob_2010": 1898
},
{
"id_ref": 595,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-019",
"nombre_ref": "EL YAQUI",
"pob_2010": 1125
},
{
"id_ref": 596,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-021",
"nombre_ref": "JARDINES DE LA PALMA (HUIZACHITO)",
"pob_2010": 2426
},
{
"id_ref": 597,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-022",
"nombre_ref": "JESUS DEL MONTE",
"pob_2010": 2175
},
{
"id_ref": 598,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-023",
"nombre_ref": "LA PILA",
"pob_2010": 4127
},
{
"id_ref": 599,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-042",
"nombre_ref": "LA RETAMA",
"pob_2010": 978
},
{
"id_ref": 600,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-025",
"nombre_ref": "LA VENTA",
"pob_2010": 531
},
{
"id_ref": 601,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-026",
"nombre_ref": "LAS LAJAS",
"pob_2010": 2421
},
{
"id_ref": 602,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-028",
"nombre_ref": "LAS TINAJAS",
"pob_2010": 2199
},
{
"id_ref": 603,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-030",
"nombre_ref": "LOMA DEL PADRE",
"pob_2010": 4362
},
{
"id_ref": 604,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-031",
"nombre_ref": "LOMAS DE MEMETLA",
"pob_2010": 4684
},
{
"id_ref": 605,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-032",
"nombre_ref": "LOMAS DE VISTA HERMOSA",
"pob_2010": 7922
},
{
"id_ref": 606,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-033",
"nombre_ref": "LOMAS DEL CHAMIZAL",
"pob_2010": 4802
},
{
"id_ref": 607,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-034",
"nombre_ref": "MANZANASTITLA",
"pob_2010": 2099
},
{
"id_ref": 608,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-035",
"nombre_ref": "MEMETLA",
"pob_2010": 712
},
{
"id_ref": 609,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-036",
"nombre_ref": "NAVIDAD (GRANJAS DE NAVIDAD)",
"pob_2010": 10534
},
{
"id_ref": 610,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-038",
"nombre_ref": "PALO ALTO (GRANJAS)",
"pob_2010": 3582
},
{
"id_ref": 611,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-055",
"nombre_ref": "PORTAL DEL SOL",
"pob_2010": 1896
},
{
"id_ref": 612,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-058",
"nombre_ref": "SAN JOSE DE L<NAME> I",
"pob_2010": 7135
},
{
"id_ref": 613,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-059",
"nombre_ref": "SAN JOSE DE LOS CEDROS II",
"pob_2010": 7679
},
{
"id_ref": 614,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-045",
"nombre_ref": "SAN LORENZO ACOPILCO (PBLO)",
"pob_2010": 8609
},
{
"id_ref": 615,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-046",
"nombre_ref": "SAN MATEO TLALTENANGO (PBLO)",
"pob_2010": 17567
},
{
"id_ref": 616,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-047",
"nombre_ref": "SAN PABLO CHIMALPA (PBLO)",
"pob_2010": 9565
},
{
"id_ref": 617,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-056",
"nombre_ref": "SAN PEDRO CUAJIMALPA (PBLO)",
"pob_2010": 19789
},
{
"id_ref": 618,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-050",
"nombre_ref": "TEPETONGO",
"pob_2010": 3908
},
{
"id_ref": 619,
"del_ref": "CUAJIMALPA DE MORELOS",
"cve_col": "04-027",
"nombre_ref": "TEXCALCO",
| |
"c) Ja, ik koop dit product af en toe (minder dan maandelijks)."
# "d) Ja, ik heb dit product ooit al gekocht."
# "e) Neen, ik heb dit product nog nooit gekocht."
# Door middel van het gebruik van de bijhorende toetsen (als je iets wekelijks koopt moet je de 'a' toets indrukken) kan je aanduiden welk statement het beste past
# Op deze manier willen we nagaan in welke mate mensen het product kennen
# Als de verkeerde toets wordt ingedrukt, dan krijgen de participanten de kans om opnieuw te drukken
# Als de participant van antwoord wil veranderen, dan drukken ze gewoon opnieuw op de toets, en dan op enter
# Het antwoord dat na 'Antwoord : ' staat zal pas weggeschreven worden als er op 'enter' wordt geduwd
#######################################################################################################################################################################################
#######################################################################################################################################################################################
# Definities
# Fase 2
# Familiariteit
# Declareren tekst input
instrFam1 = 'In de volgende fase kan je aanduiden in hoe verre je vertrouwd bent met de producten die je daarnet zag.'
instrFam2 = 'Telkens zal je onder het product 5 statements zien. \n \nJe kan een statement selecteren door te drukken op de toets op het keyboard dat overeenkomt met het statement. \n \n'
instrFam3 = 'Bijvoorbeeld: als je een product wekelijks koopt, dan kan je dit aanduiden door op de "a"-toets te drukken op het keyboard. \n\nDruk daarna op de "enter"-toets om jouw antwoord te bevestigen.'
instrFam4 = 'Als je een fout maakte, dan kan je gewoon herdrukken, het antwoord wordt pas gevalideerd als onderaan "antwoord = " verschijnt, en je dan op "enter" duwt.'
instrFam5 = 'Hier hoef je je niet te haasten, dus denk goed na voor je jouw antwoord valideert.'
instrFam6 = 'Als alles duidelijk is mag je op een toets naar keuze drukken, en het volgende gedeelte zal starten na 5 seconden. \n \nAls er nog vragen zijn kan je deze nu stellen aan de proefleider.'
time1 = '1'
time2 = '2'
time3 = '3'
time4 = '4'
time5 = '5'
Statement0 = "a) Ja, ik koop dit product heel vaak (wekelijks)."
Statement1 = "b) Ja, ik koop dit product vaak (maandelijks)."
Statement2 = "c) Ja, ik koop dit product af en toe (minder dan maandelijks)."
Statement3 = "d) Ja, ik heb dit product ooit al gekocht."
Statement4 = "e) Neen, ik heb dit product nog nooit gekocht."
EndInstr2 = 'Dit deel is afgelopen is afgelopen. \n \nDruk op een willekeurige toets om verder te gaan met het volgende gedeelte.'
Fatal = 'Oeps! Er is iets fout gegaan... \n \nRoep de experimentleider.'
name0 = ["LIPTON ICE TEA (2L)"]
name1 = ["CO<NAME> (1.5L)"]
name2 = ["TIC TAC MINT (100 stuks)"]
name3 = ["PHILADELPHIA VERSE KAAS LIGHT (300 gram)"]
name4 = ["MINUTE MAID ORANGE (1L)"]
name5 = ["LAYS PAPRIKA CHIPS (250 gram)"]
name6 = ["<NAME> (200 gram)"]
name7 = ["<NAME> (500 gram)"]
refName= name0 + name1 + name2 + name3 + name4 + name5 + name6 + name7
Name = np.ravel(refName)
refName = list(Name)
# Declareren van PsychoPy text properties
instructionFam1 = visual.TextStim(win, text=instrFam1,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
instructionFam2 = visual.TextStim(win, text=instrFam2,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
instructionFam3 = visual.TextStim(win, text=instrFam3,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
instructionFam4 = visual.TextStim(win, text=instrFam4,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
instructionFam5 = visual.TextStim(win, text=instrFam5,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
instructionFam6 = visual.TextStim(win, text=instrFam6,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
timing1 = visual.TextStim(win, text=time1,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
timing2 = visual.TextStim(win, text=time2,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
timing3 = visual.TextStim(win, text=time3,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
timing4 = visual.TextStim(win, text=time4,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
timing5 = visual.TextStim(win, text=time5,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
Statement0_d = visual.TextStim(win, text= Statement0, units = 'norm', color='Black',pos=(0,-.15), height = .05, alignHoriz = 'center', flipHoriz=False)
Statement1_d = visual.TextStim(win, text= Statement1, units = 'norm', color='Black',pos=(0,-.30), height = .05, alignHoriz = 'center', flipHoriz=False)
Statement2_d = visual.TextStim(win, text= Statement2, units = 'norm', color='Black',pos=(0,-.45), height = .05, alignHoriz = 'center', flipHoriz=False)
Statement3_d = visual.TextStim(win, text= Statement3, units = 'norm', color='Black',pos=(0,-.60), height = .05, alignHoriz = 'center', flipHoriz=False)
Statement4_d = visual.TextStim(win, text= Statement4, units = 'norm', color='Black',pos=(0,-.75), height = .05, alignHoriz = 'center', flipHoriz=False)
EndInstruction2 = visual.TextStim(win, text=EndInstr1,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
FatalMessage = visual.TextStim(win, text=Fatal,units='norm',height=0.12, color='Black',pos=[0,0], alignHoriz='center',flipHoriz=False)
#######################################################################################################################################################################################
# Tonen van instructies aan participanten
core.wait(0.5)
while True:
instructionFam1.draw()
win.flip()
event.waitKeys()
instructionFam2.draw()
win.flip()
event.waitKeys()
instructionFam3.draw()
win.flip()
event.waitKeys()
instructionFam4.draw()
win.flip()
event.waitKeys()
instructionFam5.draw()
win.flip()
event.waitKeys()
instructionFam6.draw()
win.flip()
event.waitKeys()
break
#######################################################################################################################################################################################
# Aftellen
while True:
timing5.draw()
win.flip()
time.sleep(1)
timing4.draw()
win.flip()
time.sleep(1)
timing3.draw()
win.flip()
time.sleep(1)
timing2.draw()
win.flip()
time.sleep(1)
timing1.draw()
win.flip()
time.sleep(1)
break
#######################################################################################################################################################################################
# Klaarmaken voor executie
# Fase 2
# Familiariteit
productList = []
for i in range(7):
word = 'product%i' %i
print word
productList.append(word)
print productList
experiment_data = []
#######################################################################################################################################################################################
# Executie van experiment
# Fase 2
# Familiariteit
trialnumber = 0
os.chdir('C:/Users/Pieter/Dropbox/Academiejaar 2016-2017/Masterproef I/Code/RandomisatieFiles/Antwoord files')
with open("2_Phase_Familiarity_PP_%02d_FailSave.txt" %proefpersoonNR, 'w') as f:
writer = csv.writer(f, delimiter='\t')
writer.writerow([datetime.datetime.now()])
writer.writerow(['Product','Key','Statement','RT (in ms)'])
try:
while trialnumber < len(productList):
Pathway = "C:/Users/Pieter/Dropbox/Academiejaar 2016-2017/Masterproef I/PieterHuycke_paradigma/PieterHuycke/implicit/"
Product = productList[trialnumber]
New = Pathway+Product
Newer = New + "/regular/"
StimProduct = Newer + Product + ".png"
product = visual.ImageStim(win, image= StimProduct, pos = [0,.5])
name = refName[trialnumber]
name_drawing = visual.TextStim(win, text= name, units = 'norm', color='Black',pos=(0,0), height = .05, alignHoriz = 'center', flipHoriz=False)
product.draw()
name_drawing.draw()
Statement0_d.draw()
Statement1_d.draw()
Statement2_d.draw()
Statement3_d.draw()
Statement4_d.draw()
win.flip()
event.clearEvents()
FirstKey = event.waitKeys(keyList = ['Escape','escape', 'esc','a','b','c','d','e'])
t1 = int(round(time.time() * 1000))
if FirstKey[0] in ['Escape','escape', 'esc']:
break
if FirstKey[0] in ['a','b','c','d','e']:
answer = FirstKey[0]
Answered = visual.TextStim(win, text= 'Antwoord: %s' %answer, units = 'norm', color='Black',pos=(0,-.90), height = .05, alignHoriz = 'center', flipHoriz=False)
product.draw()
name_drawing.draw()
Statement0_d.draw()
Statement1_d.draw()
Statement2_d.draw()
Statement3_d.draw()
Statement4_d.draw()
Answered.draw()
win.flip()
statement = []
if FirstKey[0] == 'a':
statement.append(Statement0)
if FirstKey[0] == 'b':
statement.append(Statement1)
if FirstKey[0] == 'c':
statement.append(Statement2)
if FirstKey[0] == 'd':
statement.append(Statement3)
if FirstKey[0] == 'e':
statement.append(Statement4)
SecondKey = event.waitKeys()
t2 = int(round(time.time() * 1000))
reactiontime = int(t2-t1)
if SecondKey[0] in ['return']:
writer.writerow([Product,FirstKey[0],statement[0],reactiontime])
experiment_data.append([Product,FirstKey[0],statement[0],reactiontime])
else:
continue
if trialnumber+1 == len(productList):
writer.writerow([datetime.datetime.now()])
trialnumber += 1
except:
win.flip(clearBuffer=True)
FatalMessage.draw()
win.flip()
event.waitKeys()
with open("2_Phase_Familiarity_PP_%02d_LoggingFile.txt" %proefpersoonNR, 'w') as f:
e1 = sys.exc_info()[0]
e2 = sys.exc_info()[1]
writer = csv.writer(f,delimiter=' ')
writer.writerow([i,e1, e2])
exp_Familiarity_Data = pd.DataFrame(experiment_data, columns = ['Product','Key','Statement','RT (in ms)'])
print len(exp_Familiarity_Data)
print exp_Familiarity_Data
exp_Familiarity_Data.to_csv("2_Phase_Familiarity_PP_%02d.txt" %proefpersoonNR, sep = '\t')
#######################################################################################################################################################################################
# Tonen van 'end message' van deze fase
while not event.getKeys():
EndInstruction2.draw()
win.flip()
#######################################################################################################################################################################################
# Uitlegblok
# Fase 2
# PERVAL
# In dit blok krijgen de participanten nog eens ieder product te zien, maar dan met de prijs er bij
# De participanten duiden vervolgens aan hoe eens ze het zijn met een bepaald statement
# Ieder statement dat ze te zien krijgen behoort tot de PERVAL schaal, meer bepaald de dimensie 'price'
# De prijs die getoond wordt bij het product is de prijs die je normaal zou te zien krijgen in de winkel
# In ons geval is dit prijs '3', omdat prijzen 0,1 & 2 lager liggen, en prijzen 4,5 & 6 hoger liggen dan deze prijs
# Door middel van een muisklik op de schaal duiden de participanten aan hoe eens ze het zijn met een statement
# 1 wil zeggen dat ze 'helemaal oneens' zijn
# 7 wil zeggen dat ze 'helemaal eens' zijn
#######################################################################################################################################################################################
# Working directory
# Fase 2
# PERVAL
os.chdir('C:/Users/Pieter/Dropbox/Academiejaar 2016-2017/Masterproef I/Code/RandomisatieFiles/Shuffled PERVAL')
#######################################################################################################################################################################################
# Inlezen en verwerken van file
# Shuffled PERVAL
# Fase 2
# PERVAL
dataShuffle = []
with open("PERVAL_PP_%02d.txt" %proefpersoonNR, 'r') as f:
reader = csv.reader(f, dialect = 'excel', delimiter = '\t')
for row in reader:
print row
dataShuffle.append(row)
del(dataShuffle[0])
clean = []
for i in range(len(dataShuffle)):
nodig = dataShuffle[i]
del(nodig[0])
clean.append(nodig)
trialShuffle = clean
print '@@@'
print len(trialShuffle), trialShuffle
print '@@@'
#######################################################################################################################################################################################
# Definities
# Fase 1
# Experiment first attempt
# Declareren tekst input
core.wait(0.5)
instr15 = 'Welkom bij het laatste deel van dit experiment! \n \nOpnieuw kan je op een willekeurige toets drukken om verder te gaan.'
instr16 = 'In dit deel hoef je aan te duiden hoe eens je het bent met een bepaald statement. \n \nDe statements die je ziet zullen steeds over het product gaan dat je net voor het statement zag. '
instr17 = 'Eerst verschijnt een van de producten die je hebt gezien in deel 1. \n \nHieronder staat de naam van het product, de winkelprijs van het product, een statement, en een schaal.'
instr18 = 'Duid vervolgens met je muis aan hoe eens je het bent met het statement door te drukken op de schaal. \n \n"1" wil zeggen dat je het HELEMAAL ONEENS bent, "7" wil zeggen dat je het HELEMAAL EENS bent.'
instr19 = 'Als je zeker bent van je selectie mag je op de "OK" toets drukken. \n \nJe mag werken op je eigen tempo, | |
"""
Devices controlled my the ISY are represented as "nodes" on the ISY device and with Node Objects in the API
There are three types of Node Object:
* IsyNode - Node Object
Represent lights, switches, motion sensors
* IsyScene - Scene Object
Represents Scenes contains Nodes that comprise a "Scene"
* IsyNodeFolder - Can hold Scene's or Nodes
a organizational obj for Scene's and Nodes
Only IsyNode Objects maintain "state"
What states are maintined depend on the physical node device itself
but they can include
- on, off of dim level
- temperature
- wattage
Nodes can have "members" or subnodes
IsyScene Objects can take commands but do not maintin a queryable state
A Scene is predefined state for one or more nodes
scenes can only be comprised of nodes which are call "members"
only nodes can be members of a scene
IsyNodeFolders are just for organizing
Nodes, Scenes and Folders can be members of a Folder
"""
from __future__ import print_function
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = "Copyright (C) 2017 <NAME>"
__license__ = "BSD"
import hashlib
from .IsyUtilClass import IsySubClass, val2bool
#from .IsyExceptionClass import *
import ISY.IsyExceptionClass as IsyE
# from IsyClass import *
# from IsyNodeClass import *
# from IsyProgramClass import *
# from IsyVarClass import *
__all__ = ['IsyNode', 'IsyNodeFolder', 'IsyScene']
# library_using_super
class _IsyNodeBase(IsySubClass):
def __init__(self, isy, ndict):
super(_IsyNodeBase, self).__init__(isy, ndict)
self._dimable = self._is_dimable()
#_objtype = (0, "unknown")
_objtype = "unknown"
def on(self, val=255):
""" Send On command to a node
args:
optional value for on level
"""
self._on(val, "DON")
def faston(self, val=255):
""" Send Fast On command to a node
args:
optional value for on level
"""
self._on(val, "DFON")
def _on(self, val, cmd):
if not str(val).isdigit:
raise IsyE.IsyTypeError("On Command : Bad Value : node=%s val=%s" %
self._mydict["address"], str(val))
if "property" in self._mydict:
if "ST" in self._mydict["property"]:
self._mydict["property"]["ST"]["value"] = str(val)
if self._dimable:
self._mydict["property"]["ST"]["formatted"] = "{:.0%}".format(val/255)
else:
self._mydict["property"]["ST"]["formatted"] = "On"
self.isy._node_send(self._mydict["address"], "cmd", cmd, val)
def off(self):
""" Send Off command to a node
args: None
"""
self._off("DOF")
def fastoff(self):
""" Send Fast Off command to a node
args: None
"""
self._off("DFOF")
def _off(self, cmd="DOF"):
self.isy._node_send(self._mydict["address"], "cmd", cmd)
if "property" in self._mydict:
# self._mydict["property"]["time"] = 0
if "ST" in self._mydict["property"]:
self._mydict["property"]["ST"]["value"] = str(0)
self._mydict["property"]["ST"]["formatted"] = "Off"
def beep(self):
self.isy._node_send(self._mydict["address"], "cmd", "BEEP")
def get_spoken(self):
""" get notes property 'spoken' """
return self._get_prop("spoken")
spoken = property(get_spoken)
def get_path(self):
return self.isy._node_get_path(self._mydict['address'], self._objtype)
path = property(get_path)
def members_list(self):
pass
def member_iter(self, flag=0):
return self.members_list()
def member_list(self):
if 'members' in self._mydict:
# print("mydict['members'] : ", type(self._mydict['members']) )
if type(self._mydict['members']) == 'dict':
return self._mydict['members'].keys()
# if type(self._mydict['members']) == 'list':
return self._mydict['members'][:]
return [ ]
def _is_dimable(self):
if 'type' in self._mydict:
a = self._mydict["type"].split('.')
if a[0] == "1":
return True
return False
def is_dimable(self):
return(self._dimable)
dimable = property(is_dimable)
def get_callback(self):
return self.isy.callback_get(self._mydict["address"])
def set_callback(self, func, *args):
if func is None:
return self.isy.callback_del(self._mydict["address"])
else:
return self.isy.callback_set(self._mydict["address"], func, args)
callback = property(get_callback, set_callback)
def is_member(self, obj):
if "members" in self._mydict:
if isinstance(obj, str):
return obj in self._mydict["members"]
elif isinstance(obj, _IsyNodeBase):
return obj._get_prop("address") in self._mydict["members"]
return False
def member_add(self, node, flag=0):
r = self.isy.soapcomm("SetParent",
node=node._get_prop("address"), nodeType=node.nodeType(),
parent=self._mydict["address"], parentType=self.nodeType())
def _rename(self, cmd, newname):
if self.debug & 0x01:
print("rename : ", self.__class__.__name__, " : ", newname)
#if not isinstance(newname, str) or len(newname) == 0:
# print("newname : ", newname)
# raise IsyE.IsyTypeError("rename : name value not str")
r = self.isy.soapcomm(cmd,
id=self._mydict["address"], name=newname )
return r
# check if scene _contains_ node
def __contains__(self, other):
return self.is_member(other)
# check if obj _contains_ attib
# def __contains__(self, other):
# if isinstance(other, str):
# return other in self._getlist
# else:
# return False
# class MemberDicte(dict):
#
# def __getitem__(self, key):
# val = dict.__getitem__(self, key)
# print('GET', key)
# return val
#
# def __setitem__(self, key, val):
# print('SET', key, val)
# dict.__setitem__(self, key, val)
#
# def __delitem__(self, key):
# print('DEL', key)
# dict.__delitem__(self, key)
#
# def __repr__(self):
# dictrepr = dict.__repr__(self)
# return '%s(%s)' % (type(self).__name__, dictrepr)
#
# def get(self, key, default_val):
# print('GET', key, default_val)
# dict.get(self, key, default_val)
#
# def update(self, *args, **kwargs):
# print('update', args, kwargs)
# for k, v in dict(*args, **kwargs).iteritems():
# self[k] = v
#
# convers a node Id to a int
# eg: "9 4A 5F 2" => 00001001010010100101111100000010 => 155868930
#
def node_id_to_int(h):
a = h.split(' ')
return ( int(a[0], 16) << 24 ) | ( int(a[1], 16) << 16 ) | \
( int(a[2], 16) << 8 ) | int(a[3], 16)
# def rate
# def onlevel
class IsyNode(_IsyNodeBase):
""" Node Class for ISY
Attributes:
status / ST
ramprate / RR
onlevel / OL
Readonly Attributes:
address
formatted
enabled
pnode
type
name
ELK_ID
flag
funtions:
get_rr:
set_rr:
Bugs: Results are undefined for Node class objects that
represent a deleted node
"""
_getlist = ['address', 'enabled', 'formatted', 'family',
'ELK_ID',
'parent', 'parent-type',
'name', 'pnode', 'flag', 'wattage',
'isLoad', 'location', 'description', 'spoken',
'dimable'
'OL', 'RR', 'ST', 'type']
_setlist = ['RR', 'OL', 'status', 'ramprate', 'onlevel', 'enable', 'wattage']
_propalias = {'status': 'ST', 'value': 'ST', 'val': 'ST',
'id': 'address', 'addr': 'address',
'ramprate': 'RR', 'onlevel': 'OL',
"node-flag": "flag"}
#_boollist = [ "enabled" ]
def __init__(self, isy, ndict):
# self._objtype = (1, "node")
self._objtype = "node"
self._nodeprops = None
super(self.__class__, self).__init__(isy, ndict)
# self._dimable = self._is_dimable()
# if not self.isy.eventupdates:
# #update only nodes
# if "node-flag" in self._mydict:
# self.update()
# print("addr", self._mydict["address"], type(self._mydict["address"]))
self._hash = hashlib.sha256(self._mydict["address"].encode('utf-8'))
if self.debug & 0x01:
print("Init Node : \"" + self._mydict["address"] + \
"\" : \"" + self._mydict["name"] + "\"")
# self.isy._printdict(self.__dict__)
# Special case from BaseClass due to ST/RR/OL props
def _get_prop(self, prop):
# print("IN get_prop ", prop)
if prop == "formatted":
prop = "ST"
value = "formatted"
else:
value = "value"
if prop in self._propalias:
prop = self._propalias[prop]
if not prop in self._getlist:
# if prop in ['parent', 'parent-type']:
# return None
raise IsyE.IsyPropertyError("no property Attribute {!s}".format(prop))
# check if we have a property
if prop in ['isLoad', 'location', 'description', 'spoken']:
if self._nodeprops is None:
self._nodenotes = self.isy.node_get_notes(self._mydict["address"])
if self._nodenotes is None:
return None
if prop in self._nodenotes:
return self._nodenotes[prop]
else:
# return None
return ""
if prop in ['ST', 'OL', 'RR']:
# Scene's do not have property values
if "property" in self._mydict and prop in self._mydict["property"]:
# print(self._mydict["property"])
# print("prop value", prop, value)
return self._mydict["property"][prop][value]
else:
return None
# if self._mydict["property"]["time"] == 0:
# self.update()
# elif self.isy.cachetime:
# if time.gmtime() < (self.cachetime + self._mydict["property"]["time"]):
# self.update()
else:
# if prop in self._mydict:
# if prop in self._boollist:
# return(val2bool(self._mydict[prop]))
# else:
# return self._mydict[prop]
# else:
# return None
return super(self.__class__, self)._get_prop(prop)
def _set_prop(self, prop, new_value):
""" generic property set """
# print("IN set_prop ", prop, new_value)
if self.debug & 0x04:
print("_set_prop ", prop, " : ", new_value)
if prop in self._propalias:
prop = self._propalias[prop]
if not prop in self._setlist:
if prop == "ST":
self.on(new_value)
return
else:
raise IsyE.IsyPropertyError("_set_prop : " \
"Invalid property Attribute " + prop)
if prop == 'enable':
self._mydict[prop] = bool(new_value)
self.isy.node_enable(self._mydict["address"], bool(new_value))
elif prop in ['OL', 'RR']:
if not str(new_value).isdigit:
raise IsyE.IsyTypeError("Set Property : Bad Value : node=%s prop=%s val=%s" %
self._mydict["address"], prop, str(new_value))
self.isy._node_send(self._mydict["address"], "set", prop, str(new_value))
# self._mydict["property"]["time"] = 0
if prop in self._mydict["property"]:
# if isinstance(new_value, (int, float)) : # already checked with isdigit
self._mydict["property"][prop]["value"] = new_value
# we need to tie this to some action
elif prop in self._mydict:
# self._mydict[prop] = new_value
pass
else:
#print("_set_prop AttributeError")
raise AttributeError("no Attribute " + prop)
def _gettype(self):
""" Type of Node (readonly) """
return "node"
# enable node
def get_enable(self):
""" get enable/disable status a node """
return self._get_prop("enable")
def set_enable(self, new_bool):
""" Set enable status a node
args:
enable bool
"""
return self._set_prop("enable", new_bool)
enable = property(get_enable, set_enable, None, "enable/disable a node")
def get_wattage(self):
""" get wattage """
return self._get_prop("wattage")
def set_wattage(self, watts):
""" set wattage property """
return self.isy.node_set_powerinfo(self._mydict["address"], wattage=watts)
wattage = property(get_wattage, set_wattage)
# ramprate property
# obj mathod for getting/setting a Node's value
# sets how fast a light fades on.
def get_rr(self):
""" Get/Set RampRate property of Node """
return self._get_prop("RR")
def set_rr(self, new_value):
""" Get/Set RampRate property of Node """
return self._set_prop("RR", new_value)
ramprate = property(get_rr, set_rr)
# On Level property
# obj mathod for getting/setting a Node's value
# where in most cases light is how bright the | |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# 适用于 tensorflow >= 2.0, keras 被直接集成到 tensorflow 的内部
# ref: https://keras.io/about/
from tensorflow.keras.layers import Layer, Input, LSTM, TimeDistributed, Bidirectional, Dense, Lambda, Embedding, Dropout, \
Concatenate, RepeatVector
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.utils import plot_model
import tensorflow.keras as keras
from tensorflow.keras.models import Model
from lib.evaluate_xrh import *
from lib.tf_data_tokenize_xrh import *
import time
import configparser
import json
class MachineTranslation:
"""
基于 seq2seq 的神经机器翻译模型 (v2-integrated)
1. 解码采用一体化模型 (integrated model)的方式, 即将每一步的解码都在计算图中完成(时间步的 循环控制写在计算图里面)
2. 采用动态图 (eager execution) 构建模型, 并从头编写训练循环; 由于是边执行边构建图, 计算图的结构是变化的,
在本模型中, 解码时的序列长度 (同一个 batch 序列的长度相同, 但是不同的 batch长度不同 ) 决定了计算图的结构,
因此不能使用 @tf.function 包装器 (转换为静态图)加速计算
2. 实现了基于 tf.data 的数据预处理 pipline, 使用 TextVectorization 和 StringLookup 做句子的向量化和反向量化
4. 在配置中心中维护超参数
Author: xrh
Date: 2021-11-20
ref:
1. Sequence to Sequence Learning with Neural Networks
2. Learning Phrase Representations using RNN Encoder–Decoder for Statistical Machine Translation
3.
"""
def __init__(self,
current_config,
vocab_source, vocab_target,
tokenizer_source=None, tokenizer_target=None,
reverse_source=True,
use_pretrain=False,
):
"""
模型初始化
:param vocab_source: 源语言的词典对象
:param vocab_target: 目标语言的词典对象
:param tokenizer_source:
:param tokenizer_target:
:param reverse_source: 是否将源序列反转
:param use_pretrain: 使用训练好的模型
"""
self.current_config = current_config
self.n_h = int(current_config['n_h'])
self.n_embedding = int(current_config['n_embedding'])
self.target_length = int(current_config['max_seq_length'])
self.dropout_rates = json.loads(current_config['dropout_rates'])
self.reverse_source = reverse_source
_null_str = current_config['_null_str']
_start_str = current_config['_start_str']
_end_str = current_config['_end_str']
_unk_str = current_config['_unk_str']
self.vocab_source = vocab_source
self.vocab_target = vocab_target
# 源语言的词表大小
self.n_vocab_source = self.vocab_source.n_vocab
# 目标语言的词表大小
self.n_vocab_target = self.vocab_target.n_vocab
print('model architecture param:')
print('n_h:{}, n_embedding:{}, n_vocab_source:{}, n_vocab_target:{}'.format(self.n_h, self.n_embedding, self.n_vocab_source,
self.n_vocab_target))
print('-------------------------')
self._null = int(self.vocab_source.map_word_to_id(_null_str)) # 空
self._start = int(self.vocab_source.map_word_to_id(_start_str)) # 句子的开始
self._end = int(self.vocab_source.map_word_to_id(_end_str)) # 句子的结束
self._unk = int(self.vocab_source.map_word_to_id(_unk_str)) # 未登录词
# vocab_source 和 vocab_target 的标号不同
self._null_target = int(self.vocab_target.map_word_to_id(_null_str)) # 空
self._start_target = int(self.vocab_target.map_word_to_id(_start_str)) # 句子的开始
self._end_target = int(self.vocab_target.map_word_to_id(_end_str)) # 句子的结束
self._unk_target = int(self.vocab_target.map_word_to_id(_unk_str)) # 未登录词
self.model_path = current_config['model_path']
# 构建模型
self.model = Seq2seqModel(n_embedding=self.n_embedding, n_h=self.n_h, target_length=self.target_length,
n_vocab_source=self.n_vocab_source, n_vocab_target=self.n_vocab_target,
tokenizer_source=tokenizer_source, tokenizer_target=tokenizer_target,
_start_target=self._start_target, _null_target=self._null_target,
reverse_source=self.reverse_source,
dropout_rates=self.dropout_rates)
if use_pretrain: # 载入训练好的模型
# self.model = tf.saved_model.load(self.model_path)
self.model.load_weights(self.model_path)
def _shuffle_dataset(self, dataset, buffer_size, batch_size):
"""
数据混洗(shuffle), 分批(batch)和预取(prefetch)
:param dataset:
:return:
"""
# 解开 batch, 数据集的粒度变为行
dataset = dataset.unbatch()
# tf.data 的数据混洗,分批和预取,
# shuffle 后不能进行任何的 map 操作, 因为会改变 batch 中的数据行的组合
dataset = dataset.shuffle(buffer_size).batch(batch_size)
final_dataset = dataset.prefetch(buffer_size=tf.data.AUTOTUNE)
return final_dataset
def fit_tf_data(self, train_dataset, valid_dataset, valid_source_target_dict, epoch_num,
batch_size, buffer_size,
):
"""
使用内置方法训练模型
:param train_dataset: 训练数据生成器
:param valid_dataset: 验证数据生成器
:param valid_source_target_dict: 验证数据的字典, 用于计算 bleu
:param epoch_num: 模型训练的 epoch 个数, 一般训练集所有的样本模型都见过一遍才算一个 epoch
:param batch_size: 选择 min-Batch梯度下降时, 每一次输入模型的样本个数
:param buffer_size: shuffle 的窗口大小
:return:
"""
train_dataset_prefetch = self._shuffle_dataset(train_dataset, buffer_size, batch_size)
valid_dataset_prefetch = self._shuffle_dataset(valid_dataset, buffer_size, batch_size)
checkpoint_models_path = self.current_config['checkpoint_models_path']
# Callbacks
# 在根目录下运行 tensorboard --logdir ./logs
tensor_board = keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True,
write_images=True)
# 早停: 在验证集上, 损失经过 patience 次的迭代后, 仍然没有下降则暂停训练
early_stop = EarlyStopping('val_loss', patience=5)
model_checkpoint_with_eval = CheckoutCallback(current_config=self.current_config,
model=self.model,
vocab_obj=self.vocab_target, valid_source_target_dict=valid_source_target_dict,
checkpoint_models_path=checkpoint_models_path)
# Final callbacks
# callbacks = [model_checkpoint_with_eval, early_stop, tensor_board]
history = self.model.fit(
epoch_num=epoch_num, train_dataset=train_dataset_prefetch, valid_dataset=valid_dataset_prefetch,
callback_obj=model_checkpoint_with_eval
)
# 将训练好的模型保存到文件
# self.model.save(self.model_path)
def inference(self, batch_source_dataset, target_length):
"""
使用训练好的模型进行推理
:param batch_source_dataset:
:return:
"""
# batch_source_dataset shape (N_batch, encoder_length)
preds = self.model.predict(batch_source_dataset, target_length)
decode_result = self.vocab_target.map_id_to_word(preds)
decode_result = tf.strings.reduce_join(decode_result, axis=1,
separator=' ')
candidates = [sentence.numpy().decode('utf-8').strip() for sentence in decode_result]
return candidates
class CheckoutCallback:
"""
回调函数, 实现在每一次 epoch 后 checkout 训练好的模型,
并且计算在验证集上的 bleu 分数
"""
def __init__(self, current_config,
model, vocab_obj, valid_source_target_dict,
checkpoint_models_path):
self.model = model
self.vocab_obj = vocab_obj
self.batch_source_dataset, self.references = self.prepare_valid_data(batch_size=int(current_config['batch_size']),
valid_source_target_dict=valid_source_target_dict)
self.target_length = int(current_config['max_seq_length'])
self.evaluate_obj = Evaluate(
with_unk=True,
_null_str=current_config['_null_str'],
_start_str=current_config['_start_str'],
_end_str=current_config['_end_str'],
_unk_str=current_config['_unk_str'])
self.checkpoint_models_path = checkpoint_models_path
def prepare_valid_data(self, batch_size, valid_source_target_dict):
"""
返回 图片的 embedding 向量 和 图片对应的 caption
:param batch_size:
:param valid_source_target_dict:
:return:
"""
source_list = list(valid_source_target_dict.keys())
print('valid source seq num :{}'.format(len(source_list)))
references = [valid_source_target_dict[source] for source in source_list]
source_dataset = tf.data.Dataset.from_tensor_slices(source_list)
batch_source_dataset = source_dataset.batch(batch_size)
return batch_source_dataset, references
def inference_bleu(self):
"""
使用验证数据集进行推理, 并计算 bleu
:return:
"""
# batch_source_dataset shape (N_batch, encoder_length)
preds = self.model.predict(self.batch_source_dataset, self.target_length)
decode_result = self.vocab_obj.map_id_to_word(preds)
decode_result = tf.strings.reduce_join(decode_result, axis=1,
separator=' ')
candidates = [sentence.numpy().decode('utf-8').strip() for sentence in decode_result]
bleu_score, _ = self.evaluate_obj.evaluate_bleu(self.references, candidates)
print()
print('bleu_score:{}'.format(bleu_score))
def on_epoch_end(self, epoch, logs=None):
# checkout 模型
fmt = os.path.join(self.checkpoint_models_path, 'model.%02d-%.4f')
self.model.save_weights(fmt % (epoch, logs['val_loss'])) # 保存模型的参数
# tf.saved_model.save(self.model, fmt % (epoch, logs['val_loss'])) # 保存整个模型
# 计算 bleu 分数
self.inference_bleu()
class Seq2seqModel(Model):
"""
seq2seq 模型
1. 解码采用一体化模型的方式, 即建立推理计算图, 将每一步的解码都在计算图中完成
2. 采用动态图构建模型, 并从头编写训练循环
Author: xrh
Date: 2021-11-20
"""
def __init__(self, n_embedding, n_h, target_length,
n_vocab_source, n_vocab_target,
_start_target, _null_target,
tokenizer_source=None, tokenizer_target=None,
reverse_source=True,
dropout_rates=(0.2, 0.2, 0.2)):
super().__init__()
self.reverse_source = reverse_source
# target 中代表 null 的标号
self._start_target = _start_target
# target 中代表 null 的标号
self._null_target = _null_target
self.tokenizer_source = tokenizer_source
self.tokenizer_target = tokenizer_target
# 建立编码器和解码器
self.encoder = Encoder(n_embedding=n_embedding, n_h=n_h, n_vocab=n_vocab_source, dropout_rates=dropout_rates)
self.train_decoder = TrianDecoder(n_embedding=n_embedding, n_h=n_h, n_vocab=n_vocab_target, target_length=target_length, dropout_rates=dropout_rates)
self.infer_decoder = InferDecoder(train_decoder_obj=self.train_decoder, _start=self._start_target, target_length=target_length)
# 损失对象
self.loss_object = tf.keras.losses.SparseCategoricalCrossentropy(reduction='none')
# 优化器
self.optimizer = keras.optimizers.RMSprop()
# 评价指标
self.train_acc_metric = keras.metrics.SparseCategoricalAccuracy()
self.val_acc_metric = keras.metrics.SparseCategoricalAccuracy()
def _mask_loss_function(self, real, pred):
"""
自定义的损失函数
:param real: 标签值
:param pred: 预测值
:return:
"""
mask = tf.math.logical_not(tf.math.equal(real, self._null_target)) # 输出序列中为空的不计入损失函数
loss_ = self.loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return mask, tf.reduce_mean(loss_)
def _preprocess(self, batch_data):
"""
对数据集的 一个批次的数据的预处理
:param batch_data:
:return:
"""
batch_source, batch_target = batch_data
batch_source_vector = self.tokenizer_source(batch_source).to_tensor()
if self.reverse_source:
batch_source_vector = batch_source_vector[:, ::-1]
batch_target_vector = self.tokenizer_target(batch_target).to_tensor()
batch_target_in = batch_target_vector[:, :-1]
batch_target_out = batch_target_vector[:, 1:]
return (batch_source_vector, batch_target_in), batch_target_out
# @tf.function
def _train_step(self, inputs):
training = True
(batch_source, batch_target_in), batch_target_out = inputs
# batch_source shape (N_batch, source_length)
# batch_target_in shape (N_batch, target_length)
# batch_target_out shape (N_batch, target_length)
target_length = tf.shape(batch_target_in)[1]
with tf.GradientTape() as tape:
layer_state_list = self.encoder(batch_source=batch_source, training=training)
# TODO: layer_state_list 换成 tensor
probs = self.train_decoder(batch_target_in=batch_target_in,
layer_state_list=layer_state_list, training=training)
# probs shape (N_batch, target_length, n_vocab)
target_mask, average_loss = self._mask_loss_function(batch_target_out, probs)
trainable_variables = self.encoder.trainable_variables + self.train_decoder.trainable_variables
gradients = tape.gradient(average_loss, trainable_variables)
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
return average_loss, probs
# @tf.function
def _test_step(self, batch_source, target_length):
# batch_source shape (N_batch, source_length)
training = False
layer_state_list = self.encoder(batch_source=batch_source, training=training)
probs, preds = self.infer_decoder(layer_state_list=layer_state_list,
target_length=target_length, training=training)
return probs, preds
def fit(self, epoch_num, train_dataset, valid_dataset, callback_obj=None):
logs_list = [] # 记录每一个 epoch 的日志
for epoch in range(epoch_num):
print('----------------------------------')
print("Epoch %d/%d" % (epoch, epoch_num))
start_time = time.time()
logs = {} # 当前 epoch 的日志
# ------------ train dataset -------------#
epoch_train_loss = 0
iteration = 0
# 遍历训练数据集的 所有batch
for batch_data in tqdm(train_dataset):
features, labels = self._preprocess(batch_data)
average_loss, probs = self._train_step((features, labels))
# print('iteration loss: %.2f' % (batch_loss,))
epoch_train_loss += average_loss
# Update training metric.
self.train_acc_metric.update_state(labels, probs)
iteration += 1
# 本次 epoch 的训练结束
# print("\nEpoch: %d end" % (epoch,))
train_acc = self.train_acc_metric.result()
# 重置验证指标
self.train_acc_metric.reset_states()
epoch_train_loss = epoch_train_loss / iteration
print("train loss: %.4f , train acc: %.4f" % (epoch_train_loss, float(train_acc)))
# ------------ valid dataset -------------#
epoch_valid_loss = 0
iteration = 0
# 遍历验证数据集
for batch_data in tqdm(valid_dataset):
features, labels = self._preprocess(batch_data)
(batch_source, batch_target_in) = features
target_length = tf.shape(batch_target_in)[1]
probs, _ = self._test_step(batch_source, target_length)
_, batch_loss = self._mask_loss_function(labels, probs)
epoch_valid_loss += batch_loss
# Update val metrics
self.val_acc_metric.update_state(labels, probs)
iteration += 1
val_acc = self.val_acc_metric.result()
# 重置验证指标
self.val_acc_metric.reset_states()
epoch_valid_loss = epoch_valid_loss / iteration
print("valid loss: %.4f , valid acc: %.4f" % (epoch_valid_loss, float(val_acc)))
cost_time = time.time() - start_time
print("Time taken: %.2fs" % cost_time)
# 记录日志
logs['epoch'] = epoch
logs['train_loss'] = epoch_train_loss
logs['train_acc'] = train_acc
logs['val_loss'] = epoch_valid_loss
logs['val_acc'] = val_acc
logs['time'] = cost_time
logs_list.append(logs)
# -------------- callback --------------#
if callback_obj is not None:
callback_obj.on_epoch_end(epoch=epoch, logs=logs)
return logs_list
def predict(self, source_dataset, target_length):
preds_list = []
# 遍历数据集
for batch_data in tqdm(source_dataset):
batch_source = batch_data
batch_source = self.tokenizer_source(batch_source).to_tensor()
_, preds = self._test_step(batch_source, target_length)
for pred in preds:
preds_list.append(pred)
return preds_list
class Encoder(Layer):
"""
基于 LSTM 的编码器层
"""
def __init__(self, n_embedding, n_h, n_vocab, dropout_rates=(0.2, 0.2, 0.2)):
super(Encoder, self).__init__()
self.embedding_layer = Embedding(n_vocab, n_embedding)
self.lstm_layer0 = LSTM(n_h, return_sequences=True, return_state=True, name='lstm_layer1')
# self.dropout_layer1 = Dropout(dropout_rates[0], name='dropout1') # 神经元有 0.1 的概率被弃置
# self.lstm_layer2 = LSTM(n_h, return_sequences=True, return_state=True, name='lstm_layer1')
# self.dropout_layer2 = Dropout(dropout_rates[0], name='dropout1') # 神经元有 0.1 的概率被弃置
# def get_config(self):
# config = super().get_config().copy()
# config.update({
# 'embedding_layer': self.embedding_layer,
# 'lstm_layer0': self.lstm_layer0
#
# })
# return config
def call(self, batch_source, training=True):
# batch_source shape (N_batch, source_length)
source_embedding = self.embedding_layer(inputs=batch_source) # shape (N_batch, encoder_length, n_embedding)
layer_state_list = []
out_lstm0, state_h0, state_c0 = self.lstm_layer0(
inputs=source_embedding) # out_lstm0 shape : (N_batch, source_length, n_h)
layer_state_list.append((state_h0, state_c0))
return layer_state_list
class TrianDecoder(Layer):
"""
训练模式下的基于 LSTM 的解码器层
"""
def __init__(self, n_embedding, n_h, n_vocab, target_length, dropout_rates=(0.2, 0.2, 0.2)):
super(TrianDecoder, self).__init__()
self.target_length = target_length
self.embedding_layer = Embedding(n_vocab, n_embedding)
self.lstm_layer0 = LSTM(n_h, return_sequences=True, | |
Turing state which issues the correct operation starting from the first PC bit."""
return State()
@memo
def nextstate(self):
"""A Turing state which increments PC by 1, with the tape head on the last PC bit."""
return self.dispatch_order(0, 1)
@memo
def nextstate_2(self):
"""A Turing state which increments PC by 2, with the tape head on the last PC bit."""
return State(move=-1, next=self.dispatch_order(1, 1), name='nextstate_2')
@memo
def dispatch_order(self, order, carry_bit):
"""Constructs Turing states which move from the work area back to the PC head.
On entry, the head should be order bits left of the rightmost bit of the program
counter; if carry_bit is set, the bit the head is on will be incremented."""
if order == self.pc_bits:
return State(move=+1, next=self.dispatchroot(), name='!ENTRY')
assert order < self.pc_bits
if carry_bit:
return State(write0='1', next0=self.dispatch_order(order + 1, 0),
write1='0', next1=self.dispatch_order(order + 1, 1),
move=-1, name='dispatch.{}.carry'.format(order))
else:
return State(next=self.dispatch_order(order + 1, 0), move=-1,
name='dispatch.{}'.format(order))
@memo
def noop(self, order):
"""A subprogram of given size which does nothing.
Used automatically to maintain alignment."""
reverse = State(move=-1, next=self.dispatch_order(order, 1), name='noop.{}'.format(order))
return Subroutine(reverse, order, reverse.name)
@memo
def halt(self):
"""A subprogram which halts the Turing machine when your work is done."""
return Subroutine(Halt(), 0, 'halt')
@memo
def jump(self, order, rel_pc, sub_name):
"""A subprogram which replaces a suffix of the PC, for relative jumps.
Used automatically by the Goto operator."""
assert rel_pc < (1 << (order + 1))
steps = [State() for i in range(order + 2)]
steps[order+1] = self.dispatch_order(order, rel_pc >> order)
steps[0].be(move=-1, next=steps[1], \
name='{}.jump({},{},{})'.format(sub_name, rel_pc, order, 0))
for i in range(order):
bit = str((rel_pc >> i) & 1)
steps[i+1].be(move=-1, next=steps[i+2], write=bit, \
name='{}.jump({},{},{})'.format(sub_name, rel_pc, order, i+1))
return Subroutine(steps[0], 0, '{}.jump({},{})'.format(sub_name, rel_pc, order))
@memo
def rjump(self, rel_pc):
"""A subprogram which adds a constant to the PC, for relative jumps."""
steps = [(State(), State()) for i in range(self.pc_bits + 1)]
steps.append(2 * (self.dispatch_order(self.pc_bits, 0),))
steps[0][0].be(move=-1, next=steps[1][0], name='rjump({})({})'.format(rel_pc, 0))
for i in range(self.pc_bits):
bit = (rel_pc >> i) & 1
steps[i+1][0].be(move=-1, next0=steps[i+2][0], write0=str(bit), \
next1=steps[i+2][bit], write1=str(1-bit), \
name='rjump({})({})'.format(rel_pc, i+1))
steps[i+1][1].be(move=-1, next0=steps[i+2][bit], write0=str(1-bit), \
next1=steps[i+2][1], write1=str(bit), \
name='rjump({})({}+)'.format(rel_pc, i+1))
return Subroutine(steps[0][0], 0, 'rjump({})'.format(rel_pc))
# TODO: subprogram compilation needs to be substantially lazier in order to do
# effective inlining and register allocation
def makesub(self, *parts, name):
"""Assigns PC values within a subprogram and creates the dispatcher."""
# first find out where everything is and how big I am
label_offsets = {}
label_map = {}
goto_map = {}
real_parts = []
offset = 0
if not self.control_args.no_cfg_optimize:
parts = cfg_optimizer(parts)
if name == 'main()':
# inject code to initialize registers (a bit of a hack)
regcount = self._nextreg
while regcount & (regcount - 1):
regcount += 1
parts = regcount * (self.reg_init(), ) + parts
for part in parts:
if isinstance(part, Label):
# labels take up no space
label_offsets[part.name] = offset
label_map.setdefault(offset, []).append(part.name)
continue # not a real_part
if isinstance(part, Goto):
goto_map[offset] = part.name
# parts must be aligned
while offset % part.size:
noop_order = (offset & -offset).bit_length() - 1
offset += 1 << noop_order
real_parts.append(self.noop(noop_order))
real_parts.append(part)
offset += part.size
assert offset > 0
order = 0
while offset > (1 << order):
order += 1
while offset < (1 << order):
noop_order = (offset & -offset).bit_length() - 1
offset += 1 << noop_order
real_parts.append(self.noop(noop_order))
offset = 0
child_map = {}
jumps_required = set()
for part in real_parts:
if isinstance(part, Goto):
jump_order = 0
target = label_offsets[part.name]
while True:
base = (offset >> jump_order) << jump_order
rel = target - base
if rel >= 0 and rel < (1 << (jump_order + 1)):
jumps_required.add((jump_order, rel))
break
jump_order += 1
offset += part.size
offset = 0
for part in real_parts:
if isinstance(part, Goto):
assert part.name in label_offsets
target = label_offsets[part.name]
if self.control_args.relative_jumps:
part = self.rjump(target - offset)
else:
part = None
for jump_order in range(order + 1):
base = (offset >> jump_order) << jump_order
rel = target - base
if (jump_order, rel) in jumps_required:
part = self.jump(jump_order, rel, name)
# don't break, we want to take the largest reqd jump
# except for very short jumps, those have low enough
# entropy to be worthwhile
if jump_order < 3:
break
assert part
offset_bits = make_bits(offset >> part.order, order - part.order)
goto_line = goto_map.get(offset)
label_line = label_map.get(offset)
child_map[offset_bits] = InsnInfo(part, label_line, goto_line)
offset += 1 << part.order
return Subroutine(make_dispatcher(child_map, name, order), order, name, child_map=child_map)
# Utilities...
@memo
def register(self, name):
"""Assigns a name to a register, and creates the primitive inc/dec routines."""
index = self._nextreg
self._nextreg += 1
pad = 0
inc = Subroutine(self.reg_incr(index), 0, 'reg_incr('+name+')')
dec = Subroutine(self.reg_decr(index), 0, 'reg_decr('+name+')', is_decrement=True)
return Register(name, index, inc, dec)
def regfile(self, *regs):
"""Assigns names to one or more registers, and creates the primitive inc/dec routines."""
return [self.register(name) for name in regs]
@memo
def transfer(self, source, *to):
"""Subprogram which moves values between registers.
The source register will be cleared, and its value will be added to each to register."""
name = 'transfer(' + ','.join([source.name] + [x.name for x in sorted(to)]) + ')'
return self.makesub(
Label('again'),
source.dec,
Goto('zero'),
*([tox.inc for tox in sorted(to)] + [
Goto('again'),
Label('zero'),
]),
name=name
)
class Machine:
"""Manipulates and debugs the generated Turing machine for a MachineBuilder."""
def __init__(self, builder):
self.builder = builder
self.main = builder.main()
if self.main.order != builder.pc_bits:
print('pc_bits does not match calculated main order:', self.main.order, builder.pc_bits)
assert False
self.builder.dispatchroot().clone(self.main.entry)
self.entry = self.builder.dispatch_order(self.builder.pc_bits, 0)
self.state = self.entry
self.left_tape = []
self.current_tape = '0'
self.right_tape = []
self.longest_label = max(len(state.name) for state in self.reachable())
def harness(self, args):
"""Processes command line arguments and runs the test harness for a machine."""
if not args.dont_compress:
self.compress()
if args.print_subs:
self.print_subs()
if args.print_tm:
self.print_machine()
if args.run_tm:
while isinstance(self.state, State):
self.tm_step()
def compress(self):
"""Combine pairs of equivalent states in the turing machine."""
while True:
did_work = False
unique_map = {}
replacement_map = {}
for state in self.reachable():
tup = (state.next0, state.next1, state.write0, state.write1,
state.move0, state.move1)
if tup in unique_map:
replacement_map[state] = unique_map[tup]
else:
unique_map[tup] = state
for state in self.reachable():
if state.next0 in replacement_map:
did_work = True
state.next0 = replacement_map[state.next0]
if state.next1 in replacement_map:
did_work = True
state.next1 = replacement_map[state.next1]
if self.entry in replacement_map:
did_work = True
self.entry = replacement_map[self.entry]
if not did_work:
break
def print_subs(self):
"""Dump the subroutines used by this machine."""
stack = [self.main]
seen = set()
while stack:
subp = stack.pop()
if subp in seen:
continue
seen.add(subp)
print()
print('NAME:', subp.name, 'ORDER:', subp.order)
for offset, entry in sorted(subp.child_map.items()):
while len(offset) < subp.order:
offset = offset + ' '
display = ' {offset} -> {child}'.format(offset=offset, child=entry.sub.name)
if entry.goto:
display += ' -> ' + entry.goto
for label in entry.labels or ():
display += ' #' + label
print(display)
stack.append(entry.sub)
def reachable(self):
"""Enumerates reachable states for the generated Turing machine."""
queue = [self.entry]
seen = []
seen_set = set()
while queue:
state = queue.pop()
if isinstance(state, Halt) or state in seen_set:
continue
if not state.set:
continue
seen_set.add(state)
seen.append(state)
queue.append(state.next1)
queue.append(state.next0)
return seen
def print_machine(self):
"""Prints the state-transition table for the generated Turing machine."""
reachable = sorted(self.reachable(), key=lambda x: x.name)
count = {}
for state in reachable:
count[state.name] = count.get(state.name, 0) + 1
index = {}
renumber = {}
for state in reachable:
if count[state.name] == 1:
continue
index[state.name] = index.get(state.name, 0) + 1
renumber[state] = state.name + '(#' + str(index[state.name]) + ')'
dirmap = {1: 'R', -1: 'L'}
for state in sorted(self.reachable(), key=lambda x: x.name):
print(renumber.get(state, state.name), '=',
state.write0, dirmap[state.move0], renumber.get(state.next0, state.next0.name),
state.write1, dirmap[state.move1], renumber.get(state.next1, state.next1.name))
def tm_print(self):
"""Prints the current state of the Turing machine execution."""
tape = ''.join(' ' + x for x in self.left_tape) + \
'[' + self.current_tape + ']' + ' '.join(reversed(self.right_tape))
print('{state:{len}} {tape}'.format(len=self.longest_label, \
state=self.state.name, tape=tape))
def tm_step(self):
"""Executes the Turing machine for a single step."""
self.tm_print()
state = self.state
if self.current_tape == '0':
write, move, nextstate = state.write0, state.move0, state.next0
else:
write, move, nextstate = state.write1, state.move1, state.next1
self.current_tape = write
self.state = nextstate
if | |
sys.exit(-1)
def sel(self, opt, choice):
for i in range(len(opt)):
option = opt[i].find_element_by_class_name(
'ui-corner-all').get_attribute("innerHTML")
if option == choice:
btn = opt[i].find_element_by_class_name('ui-corner-all')
time.sleep(1)
btn.click()
time.sleep(1)
return
continue
def policy_delete_in_webui(self, fixture):
if not self.webui_common.click_configure_policies():
result = result and False
rows = self.webui_common.get_rows()
for pol in range(len(rows)):
tdArry = rows[pol].find_elements_by_class_name('slick-cell')
if(len(tdArry) > 2):
if (tdArry[2].text == fixture.policy_name):
tdArry[0].find_element_by_tag_name('i').click()
self.webui_common.wait_till_ajax_done(self.browser)
rows = self.webui_common.get_rows()
ass_net = rows[
pol + 1].find_elements_by_class_name('row-fluid')[1].find_element_by_xpath("//div[@class='span11']").text.split()
if(ass_net[0] != '-'):
for net in range(len(ass_net)):
network.append(ass_net[net])
else:
print("No networks associated")
tdArry[5].find_element_by_tag_name('i').click()
self.browser.find_element_by_id(
'gridPolicy-action-menu-' + str(i)).find_elements_by_tag_name('li')[1].find_element_by_tag_name('a').click()
self.browser.find_element_by_id("btnRemovePopupOK").click()
self.webui_common.wait_till_ajax_done(self.browser)
if not self.webui_common.check_error_msg("Delete policy"):
raise Exception("Policy deletion failed")
self.logger.info("%s is deleted successfully using webui" %
(fixture.policy_name))
break
# end policy_delete_in_webui
def verify_analytics_nodes_ops_basic_data(self):
self.logger.info("Verifying analytics_node basic ops-data in Webui...")
self.logger.debug(self.dash)
if not self.webui_common.click_monitor_analytics_nodes():
result = result and False
rows = self.webui_common.get_rows()
analytics_nodes_list_ops = self.webui_common.get_collectors_list_ops()
result = True
for n in range(len(analytics_nodes_list_ops)):
ops_analytics_node_name = analytics_nodes_list_ops[n]['name']
self.logger.info("Vn host name %s exists in op server..checking if exists in webui as well" % (
ops_analytics_node_name))
if not self.webui_common.click_monitor_analytics_nodes():
result = result and False
rows = self.webui_common.get_rows()
for i in range(len(rows)):
match_flag = 0
if rows[i].find_elements_by_class_name('slick-cell')[0].text == ops_analytics_node_name:
self.logger.info("Analytics_node name %s found in webui..going to match basic details.." % (
ops_analytics_node_name))
self.logger.debug(self.dash)
match_index = i
match_flag = 1
break
if not match_flag:
self.logger.error("Analytics_node name %s did not match in webui...not found in webui" % (
ops_analytics_node_name))
self.logger.debug(self.dash)
else:
self.logger.info("Click and retrieve analytics_node basic view details in webui for \
analytics_node-name %s " % (ops_analytics_node_name))
self.webui_common.click_monitor_analytics_nodes_basic(
match_index)
dom_basic_view = self.webui_common.get_basic_view_infra()
# special handling for overall node status value
node_status = self.browser.find_element_by_id('allItems').find_element_by_tag_name(
'p').get_attribute('innerHTML').replace('\n', '').strip()
for i, item in enumerate(dom_basic_view):
if item.get('key') == 'Overall Node Status':
dom_basic_view[i]['value'] = node_status
# filter analytics_node basic view details from opserver data
analytics_nodes_ops_data = self.webui_common.get_details(
analytics_nodes_list_ops[n]['href'])
ops_basic_data = []
host_name = analytics_nodes_list_ops[n]['name']
ip_address = analytics_nodes_ops_data.get(
'CollectorState').get('self_ip_list')
ip_address = ', '.join(ip_address)
generators_count = str(
len(analytics_nodes_ops_data.get('CollectorState').get('generator_infos')))
version = json.loads(analytics_nodes_ops_data.get('CollectorState').get('build_info')).get(
'build-info')[0].get('build-id')
version = self.webui_common.get_version_string(version)
module_cpu_info_len = len(
analytics_nodes_ops_data.get('ModuleCpuState').get('module_cpu_info'))
for i in range(module_cpu_info_len):
if analytics_nodes_ops_data.get('ModuleCpuState').get('module_cpu_info')[i][
'module_id'] == 'Collector':
cpu_mem_info_dict = analytics_nodes_ops_data.get(
'ModuleCpuState').get('module_cpu_info')[i]
break
cpu = self.webui_common.get_cpu_string(cpu_mem_info_dict)
memory = self.webui_common.get_memory_string(cpu_mem_info_dict)
modified_ops_data = []
process_state_list = analytics_nodes_ops_data.get(
'ModuleCpuState').get('process_state_list')
process_down_stop_time_dict = {}
process_up_start_time_dict = {}
exclude_process_list = [
'contrail-config-nodemgr', 'contrail-analytics-nodemgr', 'contrail-control-nodemgr', 'contrail-vrouter-nodemgr',
'openstack-nova-compute', 'contrail-svc-monitor', 'contrail-discovery:0', 'contrail-zookeeper', 'contrail-schema']
for i, item in enumerate(process_state_list):
if item['process_name'] == 'redis-query':
redis_query_string = self.webui_common.get_process_status_string(
item, process_down_stop_time_dict, process_up_start_time_dict)
if item['process_name'] == 'contrail-qe':
contrail_qe_string = self.webui_common.get_process_status_string(
item, process_down_stop_time_dict, process_up_start_time_dict)
if item['process_name'] == 'contrail-analytics-nodemgr':
contrail_analytics_nodemgr_string = self.webui_common.get_process_status_string(
item, process_down_stop_time_dict, process_up_start_time_dict)
if item['process_name'] == 'redis-uve':
redis_uve_string = self.webui_common.get_process_status_string(
item, process_down_stop_time_dict, process_up_start_time_dict)
if item['process_name'] == 'contrail-opserver':
contrail_opserver_string = self.webui_common.get_process_status_string(
item, process_down_stop_time_dict, process_up_start_time_dict)
if item['process_name'] == 'contrail-collector':
contrail_collector_string = self.webui_common.get_process_status_string(
item, process_down_stop_time_dict, process_up_start_time_dict)
for k, v in process_down_stop_time_dict.items():
if k not in exclude_process_list:
reduced_process_keys_dict[k]=v
if not reduced_process_keys_dict:
for process in exclude_process_list:
process_up_start_time_dict.pop(process, None)
recent_time = min(process_up_start_time_dict.values())
overall_node_status_time = self.webui_common.get_node_status_string(
str(recent_time))
overall_node_status_string = [
'Up since ' + status for status in overall_node_status_time]
else:
overall_node_status_down_time = self.webui_common.get_node_status_string(
str(max(reduced_process_keys_dict.values())))
process_down_count = len(reduced_process_keys_dict)
overall_node_status_string = str(
process_down_count) + ' Process down'
modified_ops_data.extend(
[{'key': 'Hostname', 'value': host_name}, {'key': 'Generators', 'value': generators_count}, {'key': 'IP Address', 'value': ip_address}, {'key': 'CPU', 'value': cpu}, {'key': 'Memory', 'value': memory}, {'key': 'Version', 'value': version}, {'key': 'Collector', 'value': contrail_collector_string},
{'key': 'Query Engine', 'value': contrail_qe_string}, {'key': 'OpServer', 'value': contrail_opserver_string}, {'key': 'Redis Query', 'value': redis_query_string}, {'key': 'Redis UVE', 'value': redis_uve_string}, {'key': 'Overall Node Status', 'value': overall_node_status_string}])
if self.webui_common.match_ops_with_webui(modified_ops_data, dom_basic_view):
self.logger.info(
"Ops %s uves analytics_nodes basic view details data matched in webui" %
(ops_analytics_node_name))
else:
self.logger.error(
"Ops %s uves analytics_nodes basic view details data match failed in webui" %
(ops_analytics_node_name))
result = result and False
return result
# end verify_analytics_nodes_ops_basic_data_in_webui
def verify_config_nodes_ops_basic_data(self):
self.logger.info(
"Verifying config_node basic ops-data in Webui monitor->infra->Config Nodes->details(basic view)...")
self.logger.debug(self.dash)
if not self.webui_common.click_monitor_config_nodes():
result = result and False
rows = self.webui_common.get_rows()
config_nodes_list_ops = self.webui_common.get_config_nodes_list_ops()
result = True
for n in range(len(config_nodes_list_ops)):
ops_config_node_name = config_nodes_list_ops[n]['name']
self.logger.info("Vn host name %s exists in op server..checking if exists in webui as well" % (
ops_config_node_name))
if not self.webui_common.click_monitor_config_nodes():
result = result and False
rows = self.webui_common.get_rows()
for i in range(len(rows)):
match_flag = 0
if rows[i].find_elements_by_class_name('slick-cell')[0].text == ops_config_node_name:
self.logger.info("Config_node name %s found in webui..going to match basic details..." % (
ops_config_node_name))
self.logger.debug(self.dash)
match_index = i
match_flag = 1
break
if not match_flag:
self.logger.error("Config_node name %s did not match in webui...not found in webui" % (
ops_config_node_name))
self.logger.debug(self.dash)
else:
self.logger.info("Click and retrieve config_node basic view details in webui for \
config_node-name %s " % (ops_config_node_name))
# filter config_node basic view details from opserver data
config_nodes_ops_data = self.webui_common.get_details(
config_nodes_list_ops[n]['href'])
self.webui_common.click_monitor_config_nodes_basic(match_index)
dom_basic_view = self.webui_common.get_basic_view_infra()
ops_basic_data = []
host_name = config_nodes_list_ops[n]['name']
ip_address = config_nodes_ops_data.get(
'ModuleCpuState').get('config_node_ip')
if not ip_address:
ip_address = '--'
else:
ip_address = ', '.join(ip_address)
process_state_list = config_nodes_ops_data.get(
'ModuleCpuState').get('process_state_list')
process_down_stop_time_dict = {}
process_up_start_time_dict = {}
exclude_process_list = [
'contrail-config-nodemgr', 'contrail-analytics-nodemgr', 'contrail-control-nodemgr', 'contrail-vrouter-nodemgr',
'openstack-nova-compute', 'contrail-svc-monitor', 'contrail-discovery:0', 'contrail-zookeeper', 'contrail-schema']
for i, item in enumerate(process_state_list):
if item['process_name'] == 'contrail-api:0':
api_string = self.webui_common.get_process_status_string(
item, process_down_stop_time_dict, process_up_start_time_dict)
if item['process_name'] == 'ifmap':
ifmap_string = self.webui_common.get_process_status_string(
item, process_down_stop_time_dict, process_up_start_time_dict)
if item['process_name'] == 'contrail-discovery:0':
discovery_string = self.webui_common.get_process_status_string(
item, process_down_stop_time_dict, process_up_start_time_dict)
if item['process_name'] == 'contrail-schema':
schema_string = self.webui_common.get_process_status_string(
item, process_down_stop_time_dict, process_up_start_time_dict)
if item['process_name'] == 'contrail-svc-monitor':
monitor_string = self.webui_common.get_process_status_string(
item, process_down_stop_time_dict, process_up_start_time_dict)
for k, v in process_down_stop_time_dict.items():
if k not in exclude_process_list:
reduced_process_keys_dict[k]=v
if not reduced_process_keys_dict:
for process in exclude_process_list:
process_up_start_time_dict.pop(process, None)
recent_time = max(process_up_start_time_dict.values())
overall_node_status_time = self.webui_common.get_node_status_string(
str(recent_time))
overall_node_status_string = [
'Up since ' + status for status in overall_node_status_time]
else:
overall_node_status_down_time = self.webui_common.get_node_status_string(
str(max(reduced_process_keys_dict.values())))
process_down_count = len(reduced_process_keys_dict)
overall_node_status_string = str(
process_down_count) + ' Process down'
# special handling for overall node status value
node_status = self.browser.find_element_by_id('allItems').find_element_by_tag_name(
'p').get_attribute('innerHTML').replace('\n', '').strip()
for i, item in enumerate(dom_basic_view):
if item.get('key') == 'Overall Node Status':
dom_basic_view[i]['value'] = node_status
version = config_nodes_ops_data.get(
'ModuleCpuState').get('build_info')
if not version:
version = '--'
else:
version = json.loads(config_nodes_ops_data.get('ModuleCpuState').get('build_info')).get(
'build-info')[0].get('build-id')
version = self.webui_common.get_version_string(version)
module_cpu_info_len = len(
config_nodes_ops_data.get('ModuleCpuState').get('module_cpu_info'))
cpu_mem_info_dict = {}
for i in range(module_cpu_info_len):
if config_nodes_ops_data.get('ModuleCpuState').get('module_cpu_info')[i][
'module_id'] == 'ApiServer':
cpu_mem_info_dict = config_nodes_ops_data.get(
'ModuleCpuState').get('module_cpu_info')[i]
break
if not cpu_mem_info_dict:
cpu = '--'
memory = '--'
else:
cpu = self.webui_common.get_cpu_string(cpu_mem_info_dict)
memory = self.webui_common.get_memory_string(
cpu_mem_info_dict)
modified_ops_data = []
generator_list = self.webui_common.get_generators_list_ops()
for element in generator_list:
if element['name'] == ops_config_node_name + ':Config:Contrail-Config-Nodemgr:0':
analytics_data = element['href']
generators_vrouters_data = self.webui_common.get_details(
element['href'])
analytics_data = generators_vrouters_data.get(
'ModuleClientState').get('client_info')
if analytics_data['status'] == 'Established':
analytics_primary_ip = analytics_data[
'primary'].split(':')[0] + ' (Up)'
modified_ops_data.extend(
[{'key': 'Hostname', 'value': host_name}, {'key': 'IP Address', 'value': ip_address}, {'key': 'CPU', 'value': cpu}, {'key': 'Memory', 'value': memory}, {'key': 'Version', 'value': version}, {'key': 'API Server', 'value': api_string},
{'key': 'Discovery', 'value': discovery_string}, {'key': 'Service Monitor', 'value': monitor_string}, {'key': 'Ifmap', 'value': ifmap_string}, {'key': 'Schema Transformer', 'value': schema_string}, {'key': 'Overall Node Status', 'value': overall_node_status_string}])
self.webui_common.match_ops_with_webui(
modified_ops_data, dom_basic_view)
if self.webui_common.match_ops_with_webui(modified_ops_data, dom_basic_view):
self.logger.info(
"Ops %s uves config_nodes basic view details data matched in webui" %
(ops_config_node_name))
else:
self.logger.error(
"Ops %s uves config_nodes basic view details data match failed in webui" % (ops_config_node_name))
result = result and False
return result
# end verify_config_nodes_ops_basic_data_in_webui
def verify_vrouter_ops_basic_data(self):
result = True
self.logger.info(
"Verifying vrouter basic ops-data in Webui monitor->infra->Virtual routers->details(basic view)...")
self.logger.debug(self.dash)
if not self.webui_common.click_monitor_vrouters():
result = result and False
rows = self.webui_common.get_rows()
vrouters_list_ops = self.webui_common.get_vrouters_list_ops()
for n in range(len(vrouters_list_ops)):
ops_vrouter_name = vrouters_list_ops[n]['name']
self.logger.info(
"Vn host name %s exists in op server..checking if exists in webui as well" %
(ops_vrouter_name))
if not self.webui_common.click_monitor_vrouters():
result = result and False
rows = self.webui_common.get_rows()
for i in range(len(rows)):
match_flag = 0
if rows[i].find_elements_by_class_name('slick-cell')[0].text == ops_vrouter_name:
self.logger.info(
"Vrouter name %s found in webui..going to match basic details..." % (ops_vrouter_name))
self.logger.debug(self.dash)
match_index = i
match_flag = 1
break
if not match_flag:
self.logger.error(
"Vrouter name %s did not match in webui...not found in webui" % (ops_vrouter_name))
self.logger.debug(self.dash)
else:
self.logger.info(
"Click and retrieve vrouter basic view details in webui for vrouter-name %s " % (ops_vrouter_name))
self.webui_common.click_monitor_vrouters_basic(match_index)
dom_basic_view = self.webui_common.get_basic_view_infra()
# special handling for overall node status value
node_status = self.browser.find_element_by_id('allItems').find_element_by_tag_name(
'p').get_attribute('innerHTML').replace('\n', '').strip()
for i, item in enumerate(dom_basic_view):
if item.get('key') == 'Overall Node Status':
dom_basic_view[i]['value'] = node_status
# special handling for control nodes
control_nodes = self.browser.find_element_by_class_name(
'table-cell').text
for i, item in enumerate(dom_basic_view):
if item.get('key') == 'Control Nodes':
dom_basic_view[i]['value'] = control_nodes
# filter vrouter basic view details from opserver data
vrouters_ops_data = self.webui_common.get_details(
| |
<gh_stars>1-10
#!/usr/bin/sudo python
# El Toro LITE
#
# Coded by Jesse
# twitch.tv/oh_bother
#
# Lisences are for nerds. Er, I mean, do whatever with this. MIT or something.
# this "lite" version removes a lot of animations and similar cool stuff. :(
# also the client modified this code heavily. YMMV
#
# ElToro main code, this is going to work with V1 PCB.
# Assets are to be built into the eltoro/ass/ directory
#
# RPi Pinout:
#
# 3.3v 01|02 5v
# GPIO02 03|04 5v
# GPIO03 05|06 Gnd
# RGB_OE - GPIO04 07|08 GPIO14 - BTN_Breakbeam
# Gnd 09|10 GPIO15 - BTN_Coin
# RGB_CLK - GPIO17 11|12 GPIO18 - BTN_Start
# RGB_C - GPIO27 13|14 Gnd
# RGB_A - GPIO22 15|16 GPIO23 - RGB_B2
# 3.3v 17|18 GPIO24 - BTN_P1Up
# LED_DI - GPIO10 19|20 Gnd
# - GPIO09 21|22 GPIO25 - BTN_P2Dn
# LED_CI - GPIO11 23|24 GPIO08 - BTN_P2Up
# Gnd 25|26 GPIO07 - BTN_P1Dn
# ID_SD 27|28 ID_SC
# RGB_R1 - GPIO05 29|30 Gnd
# RGB_B1 - GPIO06 31|32 GPIO12 - RGB_R2
# RGB_G1 - GPIO13 33|34 Gnd
# - GPIO19 35|36 GPIO16 - RGB_G2
# RGB_B - GPIO26 37|38 GPIO20 - RGB_D
# GND 39|40 GPIO21 - RGB_LAT
#
# RGB display pixels are zero indexed.
# png image row 64 is buttons and show lights
# 0, 1, 2, 3, 4, 5, 6...
# start, p1up, p2up, p2dn, p1dn, 3w pixels...
#
# Adafruit RGB Hat compatible, 32x64 pixels
# Adafruit Coin Slot
# Ws2803 LED Drivers
# Adafruit RGB LED Buttons with ws2801 LED strip
# Adafruit Breakbeam
# ALC4040 USB sound adapter
#
# <NAME> 11/22/2017
# twitch.tv/oh_bother
#===================================================
#CREDITS SET TO 2 INITIAL STATE IS GAME FOR TESTING
#WIN SCORE IS SET TO 2
#===================================================
import os
import time
import random
import sys
from gpiozero import Button
from rgbmatrix import RGBMatrix, RGBMatrixOptions
from PIL import Image
from PIL import ImageFont, ImageDraw
import pygame
from threading import Thread
import Adafruit_WS2801
import Adafruit_GPIO.SPI as SPI
#Pins
pin_coin = 15
pin_breakbeam = 14
pin_start = 18
pin_p1up = 24
pin_p1dn = 7
pin_p2up = 8
pin_p2dn = 25
pin_dout = 10
pin_cout = 11
#settings
frame_rate = 0.1
fade_rate = 0.03
down_limit = 4
win_score = 21
round_cost = 2
coin_each = 1
button_inc = 1
min_score = 0
winWait = 2
idle_time = 40
guilt_time = 50
p1_upCol = (000, 000, 055)
p1_dnCol = (000, 000, 055)
p2_upCol = (000, 055, 000)
p2_dnCol = (000, 055, 000)
pixel_count = 32
#variables (globals)
credits = 0
p1_score = 0
p2_score = 0
idleInc = 0
#guiltInc = 0
startInc = 0
coinInc = 0
win1Inc = 0
win2Inc = 0
tieInc = 0
idleMax = 0
#guiltMax = 0
startMax = 0
coinMax = 0
win1Max = 0
win2Max = 0
tieMax = 0
baddnSfx = 0
badupSfx = 0
badhighSfx = 0
badlowSfx = 0
gooddnSfx = 0
goodupSfx = 0
idleFx = Image.new('RGB', (0, 0))
coinFx = Image.new('RGB', (0, 0))
win1Fx = Image.new('RGB', (0, 0))
win2Fx = Image.new('RGB', (0, 0))
tieFx = Image.new('RGB', (0, 0))
base = Image.new('RGBA', (64, 32))
#enable flags
startRun = 0
idleRun = 0
coinRun = 0
guiltRun = 0
gameWatch = 0
startWatch = 0
beamWatch = 0
beamBroke = 0
win1Run = 0
win2Run = 0
tieRun = 0
startWatch = 0
startPress = 0
p1hitWin = 0
p2hitWin = 0
tick = 0
tock = 0
#inputs
btn_coin = Button(pin_coin, hold_repeat=False)
btn_breakbeam = Button(pin_breakbeam, hold_repeat=False)
btn_start = Button(pin_start, hold_repeat=False, pull_up=False)
btn_p1up = Button(pin_p1up, hold_repeat=False, pull_up=False)
btn_p1dn = Button(pin_p1dn, hold_repeat=False, pull_up=False)
btn_p2up = Button(pin_p2up, hold_repeat=False, pull_up=False)
btn_p2dn = Button(pin_p2dn, hold_repeat=False, pull_up=False)
#initialize WS2801 Strip
pixels = Adafruit_WS2801.WS2801Pixels(pixel_count, clk=pin_cout, do=pin_dout)
pixels.clear()
pixels.show()
# Initialize mixer
pygame.mixer.pre_init(44100, -16, 2, 2048) # setup mixer to avoid sound lag
pygame.init()
# Configuration for the matrix
# THIS HAS TO BE AFTER THE BUTT AND AUDIO STUFF
options = RGBMatrixOptions()
options.rows = 32
options.chain_length = 2
options.parallel = 1
options.hardware_mapping = 'adafruit-hat' # If you have an Adafruit HAT: 'adafruit-hat'
matrix = RGBMatrix(options = options)
#==============STARTUP STATES==============
def state_boot():
#global constants
global idleMax, startMax, coinMax, win1Max, win2Max, tieMax
#global guiltMax
#global sound fx
global guiltSfx, coinSfx, baddnSfx, badupSfx, badhighSfx, badlowSfx, gooddnSfx, goodupSfx
global win1Sfx, win2Sfx, tieSfx
#global LED arrays
global idleFx, coinFx, win1Fx, win2Fx, tieFx
#global timers
global t, t2
print "boot message"
#boot screen
for thing in os.listdir("ass/boot"):
if ".wav" in thing:
#sound file name/location loaded up
soundLoc = "ass/boot/" + thing
effect = pygame.mixer.Sound(soundLoc)
effect.play()
file = "ass/boot/" + str(random.randint(1,2)) + ".png"
if os.path.isfile(file):
rgbimg = Image.open(file).convert('RGB')
matrix.SetImage(rgbimg)
for i in range(32):
r, g, b = rgbimg.getpixel((64, i))
for i in range(pixel_count):
pixels.set_pixel_rgb(i, r, g, b)
time.sleep(0.2) #flash through pixels
pixels.show()
else:
print "no boot image"
#load non-random sound files into global variables
guiltSfx = pygame.mixer.Sound("ass/guilt/guilt.wav")
coinSfx = pygame.mixer.Sound("ass/soundFx/coin.wav")
#win sounds
win1Sfx = pygame.mixer.Sound("ass/win/1.wav")
win2Sfx = pygame.mixer.Sound("ass/win/2.wav")
tieSfx = pygame.mixer.Sound("ass/win/t.wav")
#button sfx
baddnSfx = pygame.mixer.Sound("ass/soundFx/bbaddn.wav")
badupSfx = pygame.mixer.Sound("ass/soundFx/bbadup.wav")
badhighSfx = pygame.mixer.Sound("ass/soundFx/bbadhigh.wav")
badlowSfx = pygame.mixer.Sound("ass/soundFx/bbadlow.wav")
goodupSfx = pygame.mixer.Sound("ass/soundFx/bgoodup.wav")
gooddnSfx = pygame.mixer.Sound("ass/soundFx/bgooddn.wav")
#load sfx images into global variables
idleFx = Image.open("ass/ledFx/idle.png").convert('RGB')
#guiltFx = Image.open("ass/ledFx/idle.png").convert('RGB')
coinFx = Image.open("ass/ledFx/coin.png").convert('RGB')
win1Fx = Image.open("ass/ledFx/win1.png").convert('RGB')
win2Fx = Image.open("ass/ledFx/win2.png").convert('RGB')
tieFx = Image.open("ass/ledFx/tie.png").convert('RGB')
#load max global values for fades
idleMax, height = idleFx.size
#guiltMax, height =
coinMax, height = coinFx.size
win1Max, height = win1Fx.size
win2Max, height = win2Fx.size
tieMax, height = tieFx.size
#correct for 0 index
idleMax -= 1
coinMax -= 1
win1Max -= 1
win2Max -= 1
tieMax -= 1
startMax = 255
#print str(idleMax) + " " + str(coinMax) + " " + str(win1Max) + " " + str(win2Max) + " " + str(tieMax)
#start fade thread
Thread(target=loop_LED).start()
Thread.daemon = True
#wait for sound to stop
while pygame.mixer.get_busy():
pass
time.sleep(1)
allClear()
#make sure idle runs initially
t = time.time() - idle_time
t2 = time.time() - guilt_time
return state_idle
#==============IDLING STATES==============
def state_idle():
#global timing flags
global tick, t, t2
#global button/etc flags
global idleRun
global startPress, startRun, startWatch
global beamWatch
#break beam
if (credits < 1) and (time.time() - t2 > guilt_time):
beamWatch = 1
else:
beamWatch = 0
# #fade start if there's enough credits
if credits >= round_cost:
#enable start button
startWatch = 1
#fade start
startRun = 1
# #players hit start to gaem
if startPress:
startPress = 0
startWatch = 0
return state_game
#timer for idle screen
#this just runs and ignores flags until it exist
#so needs to be fast, 8 frames-ish
#revision, idle was running every 30 seconds reported by client
#if (time.time() - t) > idle_time:
if time.time() - t > idle_time:
creditScrn(1)
t = time.time()
elif coinRun:
creditScrn(0)
if tick:
pixels.show()
tick = 0
return state_idle
#==============Game STATES==============
def state_game():
#global startWatch
global tick
global startPress
global gameWatch
global idleRun
idleRun = 0
#enable player buttons
gameWatch = 1
#score to display
displayScore()
#flags/interaction
#runs second so it shows bad number and rips it away from player
scoreCheck()
#starpress only active from scorecheck
if startPress:
gameWatch = 0
startPress = 0
if p1_score == p2_score:
return state_lannister
elif p1_score >= win_score:
return state_team1Win
else:
return state_team2Win
#display them
if tick:
pixels.show()
tick = 0
return state_game
#================WIN states!===============
def state_team1Win():
print "team1 wins"
global tick
global win1Run
print "team 1 wins"
win1 = Image.open("ass/win/1.png").convert('RGB')
matrix.SetImage(win1)
pygame.mixer.stop()
win1Sfx.play()
#led fx
win1Run = 1
#wait for sound to stop
while pygame.mixer.get_busy():
if tick:
pixels.show()
tick = 0
time.sleep(winWait)
return state_reset
def state_team2Win():
global win2Run
global tick
print "team2 wins"
win2 = Image.open("ass/win/2.png").convert('RGB')
matrix.SetImage(win2)
pygame.mixer.stop()
win2Sfx.play()
#led fx
win2Run = 1
while pygame.mixer.get_busy():
if tick:
pixels.show()
tick = 0
time.sleep(winWait)
return state_reset
def state_lannister():
global tieRun
global tick
print "tie win"
tie = Image.open("ass/win/t.png").convert('RGB')
matrix.SetImage(tie)
pygame.mixer.stop()
tieSfx.play()
#led fx
tieRun = 1
while pygame.mixer.get_busy():
if tick:
pixels.show()
tick = 0
time.sleep(winWait)
return state_reset
#===============reset game=================
def state_reset():
global credits
global p1_score
global p2_score
global p1hitWin
global p2hitWin
#reset all variables/flags
#scores
p1_score = 0
p2_score = 0
#max score hits
p1hitWin = 0
p2hitWin = 0
#take yo money
credits -= round_cost
#make sure idle runs initially
t = time.time() - idle_time
print "game reset"
return state_idle
#============COIN/BUTTON callbacks================
#buttons DO NOT REPEAT, btn_delay is just an
#interaction delay
def coin_insert():
global coinRun
global credits
#anime LEDs keeyaaah!
coinSfx.play()
coinRun = 1
print "coin inserted"
credits += coin_each
def beam_break():
global beamWatch
global t2
print "beam broke"
if beamWatch:
guiltSfx.play()
beamWatch = 0
t2 = time.time()
def start_button():
global | |
<gh_stars>10-100
#!/usr/local/bin/python
"""
Author: <NAME>
Contact: <EMAIL>
Testing:
import dash_client
mpd_file = <MPD_FILE>
dash_client.playback_duration(mpd_file, 'http://192.168.127.12:8005/')
From commandline:
python dash_client.py -m "http://192.168.127.12:8006/media/mpd/x4ukwHdACDw.mpd" -p "all"
python dash_client.py -m "http://127.0.0.1:8000/media/mpd/x4ukwHdACDw.mpd" -p "basic"
"""
from __future__ import division
from datetime import datetime
from argparse import ArgumentParser
from collections import defaultdict
import errno
import httplib
from multiprocessing import Process, Queue
import os
import random
import signal
from string import ascii_letters, digits
import sys
import time
import timeit
import urllib2
import urlparse
import string
import urllib2
import fcntl
import psutil
from subprocess import *
from adaptation import basic_dash, basic_dash2, weighted_dash, netflix_dash
from adaptation.adaptation import WeightedMean
import config_dash
from configure_log_file import configure_log_file, write_json
import dash_buffer
import read_mpd
from oauthlib.uri_validate import segment
from twisted.python.util import println
from cherrypy import quickstart
import subprocess
from symbol import except_clause
''' try:
WindowsError
except NameError:
from shutil import WindowsError
'''
# Constants
DEFAULT_PLAYBACK = 'BASIC'
DOWNLOAD_CHUNK = 1024
# Globals for arg parser with the default values
# Not sure if this is the correct way ....
MPD = None
HOST = None
LIST = False
QUIC = False
CURL = False
PLAYBACK = DEFAULT_PLAYBACK
DOWNLOAD = False
SEGMENT_LIMIT = None
CONNECTION_TYPE_STR = ""
JUMP = False
JUMP_SCENARIO = ""
CMD = ""
JUMP_BUFFER_COUNTER = 0
class DashPlayback:
"""
Audio[bandwidth] : {duration, url_list}
Video[bandwidth] : {duration, url_list}
"""
def __init__(self):
self.min_buffer_time = None
self.playback_duration = None
self.audio = dict()
self.video = dict()
def get_mpd(url):
""" Module to download the MPD from the URL and save it to file"""
try:
connection = urllib2.urlopen(url, timeout=9999)
except urllib2.HTTPError, error:
config_dash.LOG.error("Unable to download MPD file HTTP Error: %s" % error.code)
return None
except urllib2.URLError:
error_message = "URLError. Unable to reach Server.Check if Server active"
config_dash.LOG.error(error_message)
print error_message
return None
except IOError, httplib.HTTPException:
message = "Unable to , file_identifierdownload MPD file HTTP Error."
config_dash.LOG.error(message)
return None
mpd_data = connection.read()
connection.close()
mpd_file = url.split('/')[-1]
mpd_file_handle = open(mpd_file, 'w')
mpd_file_handle.write(mpd_data)
mpd_file_handle.close()
config_dash.LOG.info("Downloaded the MPD file {}".format(mpd_file))
return mpd_file
def get_bandwidth(data, duration):
""" Module to determine the bandwidth for a segment
download"""
return data * 8 / duration
def get_domain_name(url):
""" Module to obtain the domain name from the URL
From : http://stackoverflow.com/questions/9626535/get-domain-name-from-url
"""
parsed_uri = urlparse.urlparse(url)
domain = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
return domain
def id_generator(id_size=6):
""" Module to create a random string with uppercase
and digits.
"""
TEMP_STR = "TEMP_"
return TEMP_STR + ''.join(random.choice(ascii_letters + digits) for _ in range(id_size))
def download_segment(segment_url, dash_folder, sb):
# URLLIB
if (not CURL and not QUIC): # URLLIB
""" HTTP Module to download the segment """
try:
# print segment_url
connection = urllib2.urlopen(segment_url, timeout=None)
except urllib2.HTTPError, error:
config_dash.LOG.error("Unable to download DASH Segment {} HTTP Error:{} ".format(segment_url, str(error.code)))
return None
parsed_uri = urlparse.urlparse(segment_url)
segment_path = '{uri.path}'.format(uri=parsed_uri)
while segment_path.startswith('/'):
segment_path = segment_path[1:]
segment_filename = os.path.join(dash_folder, os.path.basename(segment_path))
make_sure_path_exists(os.path.dirname(segment_filename))
segment_file_handle = open(segment_filename, 'wb')
segment_size = 0
while True:
segment_data = connection.read(DOWNLOAD_CHUNK)
segment_size += len(segment_data)
segment_file_handle.write(segment_data)
if len(segment_data) < DOWNLOAD_CHUNK:
break
connection.close()
segment_file_handle.close()
return segment_size, segment_filename
if (CURL or QUIC): # CURL or QUIC client
""" CURL or QUIC client Module to download the segment """
parsed_uri = urlparse.urlparse(segment_url)
segment_path = '{uri.path}'.format(uri=parsed_uri)
while segment_path.startswith('/'):
segment_path = segment_path[1:]
segment_filename = os.path.join(dash_folder, os.path.basename(segment_path))
requested_url = segment_url
if QUIC:
requested_url = string.replace(segment_url, 'https://' + HOST, config_dash.QUIC_FILES_HEADER_XORIGINAL_URL_DOMAIN)
print "Write requested_url to subprocess stdin: ", requested_url
print sb.stdin.write(requested_url + '\n')
while True:
out = non_block_read(sb.stdout) # will return '' instead of hanging for ever
if "FATAL" in out or "Failed to connect" in out or "ERROR" in out:
segment_size = "-1"
print "calculated segment size:", int(segment_size)
int_segment_size = int(segment_size)
check_kill_process("quic_client")
break
if "file_size_start:" in out:
start_index = out.find("file_size_start:") + len("file_size_start:")
end_index = out.find(":file_size_end")
segment_size = out[start_index:end_index]
print "calculated segment size:", int(segment_size)
int_segment_size = int(segment_size)
if int_segment_size == -1:
check_kill_process("LibCurlCppConsole")
break
return int_segment_size, segment_filename
def get_media_all(domain, media_info, file_identifier, done_queue):
""" Download the media from the list of URL's in media
"""
bandwidth, media_dict = media_info
media = media_dict[bandwidth]
media_start_time = timeit.default_timer()
for segment in [media.initialization] + media.url_list:
start_time = timeit.default_timer()
segment_url = urlparse.urljoin(domain, segment)
_, segment_file = download_segment(segment_url, file_identifier)
elapsed = timeit.default_timer() - start_time
if segment_file:
done_queue.put((bandwidth, segment_url, elapsed))
media_download_time = timeit.default_timer() - media_start_time
done_queue.put((bandwidth, 'STOP', media_download_time))
return None
def make_sure_path_exists(path):
""" Module to make sure the path exists if not create it
"""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def print_representations(dp_object):
""" Module to print the representations"""
print "The DASH media has the following video representations/bitrates"
for bandwidth in dp_object.video:
print bandwidth
def start_playback_smart(dp_object, domain, playback_type=None, download=False, video_segment_duration=None, connection_type="", JUMP_SCENARIO=""):
""" Module that downloads the MPD-FIle and download
all the representations of the Module to download
the MPEG-DASH media.
Example: start_playback_smart(dp_object, domain, "SMART", DOWNLOAD, video_segment_duration)
:param dp_object: The DASH-playback object
:param domain: The domain name of the server (The segment URLS are domain + relative_address)
:param playback_type: The type of playback
1. 'BASIC' - The basic adapataion scheme
2. 'SARA' - Segment Aware Rate Adaptation
3. 'NETFLIX' - Buffer based adaptation used by Netflix
:param download: Set to True if the segments are to be stored locally (Boolean). Default False
:param video_segment_duration: Playback duratoin of each segment
:return:
"""
# Initialize the DASH buffer
dash_player = dash_buffer.DashPlayer(dp_object.playback_duration, video_segment_duration, connection_type)
dash_player.start()
# A folder to save the segments in
file_identifier = 'URLLIB_' #id_generator()
config_dash.LOG.info("The segments are stored in %s" % file_identifier)
dp_list = defaultdict(defaultdict)
# Creating a Dictionary of all that has the URLs for each segment and different bitrates
for bitrate in dp_object.video:
# Getting the URL list for each bitrate
dp_object.video[bitrate] = read_mpd.get_url_list(dp_object.video[bitrate], video_segment_duration,
dp_object.playback_duration, bitrate)
if "$Bandwidth$" in dp_object.video[bitrate].initialization:
dp_object.video[bitrate].initialization = dp_object.video[bitrate].initialization.replace(
"$Bandwidth$", str(bitrate))
media_urls = [dp_object.video[bitrate].initialization] + dp_object.video[bitrate].url_list
for segment_count, segment_url in enumerate(media_urls, dp_object.video[bitrate].start):
# segment_duration = dp_object.video[bitrate].segment_duration
dp_list[segment_count][bitrate] = segment_url
# print segment_count,bitrate,segment_url
bitrates = dp_object.video.keys()
bitrates.sort()
average_dwn_time = 0
segment_files = []
# For basic adaptation
previous_segment_times = []
recent_download_sizes = []
weighted_mean_object = None
current_bitrate = bitrates[0]
previous_bitrate = None
total_downloaded = 0
# Delay in terms of the number of segments
delay = 0
segment_duration = 0
segment_size = segment_download_time = None
# Netflix Variables
average_segment_sizes = netflix_rate_map = None
netflix_state = "INITIAL"
sb = None
global JUMP_BUFFER_COUNTER
JUMP_BUFFER_COUNTER=0
# Start playback of all the segments
"""
for segment1 in dp_list.keys():
for bitrate1 in dp_list[segment1]:
print segment1, bitrate1, dp_list[segment1][bitrate1]
"""
if (CURL or QUIC): # CURL or QUIC client
""" CURL or QUIC client Module to download the segment """
if CURL:
CMD = config_dash.CURL_CLIENT_CMD
print CMD
if QUIC:
CMD = config_dash.QUIC_CLIENT_CMD
print CMD
sb = Popen(CMD, shell=True, stdout=PIPE, stdin=PIPE, stderr=STDOUT)
while True:
out = non_block_read(sb.stdout) # will return '' instead of hanging for ever
if "started" in out:
print out
break
max_jump_count = 0
current_jump_index = 0
if JUMP:
JUMP_SCENARIO_ARR = JUMP_SCENARIO.split(',')
max_jump_count = len(JUMP_SCENARIO_ARR)
total_segment_count = len(dp_list)
segment_number = 1
while segment_number <= total_segment_count:
config_dash.LOG.info("*************** segment_number:" + str(segment_number) + "*********************")
config_dash.LOG.info(" {}: Processing the segment {}".format(playback_type.upper(), segment_number))
write_json()
if not previous_bitrate:
previous_bitrate = current_bitrate
if SEGMENT_LIMIT:
if not dash_player.segment_limit:
dash_player.segment_limit = int(SEGMENT_LIMIT)
if segment_number > int(SEGMENT_LIMIT):
config_dash.LOG.info("Segment limit reached")
break
if segment_number == dp_object.video[bitrate].start:
current_bitrate = bitrates[0]
else:
if playback_type.upper() == "BASIC":
current_bitrate, average_dwn_time = basic_dash2.basic_dash2(segment_number, bitrates, average_dwn_time,
recent_download_sizes,
previous_segment_times, current_bitrate)
if dash_player.buffer.qsize() > config_dash.BASIC_THRESHOLD:
delay = dash_player.buffer.qsize() - config_dash.BASIC_THRESHOLD
config_dash.LOG.info("Basic-DASH: Selected {} for the segment {}".format(current_bitrate,
segment_number + 1))
elif playback_type.upper() == "SMART":
if not weighted_mean_object:
weighted_mean_object = WeightedMean(config_dash.SARA_SAMPLE_COUNT)
config_dash.LOG.debug("Initializing the weighted Mean object")
# Checking the segment number is in acceptable range
if segment_number < len(dp_list) - 1 + dp_object.video[bitrate].start:
try:
config_dash.LOG.info("JUMP_BUFFER_COUNTER: %s",str(JUMP_BUFFER_COUNTER))
current_bitrate, delay,JUMP_BUFFER_COUNTER = weighted_dash.weighted_dash(bitrates, dash_player,
weighted_mean_object.weighted_mean_rate,
current_bitrate,
get_segment_sizes(dp_object,
segment_number + 1),JUMP_BUFFER_COUNTER)
except IndexError, e:
config_dash.LOG.error(e)
elif playback_type.upper() == "NETFLIX":
config_dash.LOG.info("Playback is NETFLIX")
# Calculate the average segment sizes for each bitrate
if not average_segment_sizes:
average_segment_sizes = get_average_segment_sizes(dp_object)
if segment_number < len(dp_list) - 1 + dp_object.video[bitrate].start:
try:
if segment_size and segment_download_time:
segment_download_rate = segment_size / segment_download_time
else:
segment_download_rate = 0
config_dash.LOG.info("JUMP_BUFFER_COUNTER: %s",str(JUMP_BUFFER_COUNTER))
current_bitrate, netflix_rate_map, netflix_state,JUMP_BUFFER_COUNTER = netflix_dash.netflix_dash(
bitrates, dash_player, segment_download_rate, current_bitrate, average_segment_sizes,
netflix_rate_map, netflix_state,JUMP_BUFFER_COUNTER)
config_dash.LOG.info("NETFLIX: Next bitrate = {}".format(current_bitrate))
except IndexError, e:
config_dash.LOG.error(e)
else:
config_dash.LOG.critical("Completed segment playback for Netflix")
break
# If the buffer is full wait till it gets empty
if dash_player.buffer.qsize() >= config_dash.NETFLIX_BUFFER_SIZE:
delay = (dash_player.buffer.qsize() - config_dash.NETFLIX_BUFFER_SIZE + 1) | |
if len(label_str) == 0:
return "", "en"
if "@" in label_str:
res = label_str.split("@")
text_string = "@".join(res[:-1]).replace('"', "").replace("'", "")
lang = res[-1].replace('"', '').replace("'", "")
else:
text_string = label_str.replace('"', "").replace("'", "")
lang = "en"
return text_string, lang
@staticmethod
def create_all_text(labels, aliases, descriptions):
text = ''
if 'en' in labels and labels['en']:
text = text + '\n'.join(labels['en']) + '\n'
if 'en' in aliases and aliases['en']:
text = text + '\n'.join(aliases['en']) + '\n'
if 'en' in descriptions and descriptions['en']:
text = text + '\n'.join(descriptions['en']) + '\n'
return text
@staticmethod
def to_float(input_str):
try:
return float(input_str)
except:
return None
@staticmethod
def create_mapping_es(es_version: float, str_fields_need_index: typing.List[str],
float_fields: typing.List[str] = None,
str_fields_no_index: typing.List[str] = None, copy_to_fields: typing.List[str] = None,
all_langs=None, int_fields: typing.List[str] = None):
if all_langs is None or len(all_langs) == 0:
all_langs = ['en']
properties_dict = {}
# add property part
for str_field in str_fields_need_index:
if str_field == 'id':
properties_dict[str_field] = {}
properties_dict[str_field]["type"] = "text"
properties_dict[str_field]['fields'] = {
"keyword": {
"type": "keyword",
"ignore_above": 256
},
"keyword_lower": {
"type": "keyword",
"normalizer": "lowercase_normalizer"
}
}
else:
properties_dict[str_field] = {"properties": {}}
for lang in all_langs:
if lang not in properties_dict[str_field]["properties"]:
properties_dict[str_field]["properties"][lang] = {}
properties_dict[str_field]["properties"][lang]['type'] = "text"
if str_field == "aliases" or str_field == 'labels':
properties_dict[str_field]["properties"][lang]['fields'] = {
"keyword": {
"type": "keyword",
"ignore_above": 256
},
"keyword_lower": {
"type": "keyword",
"normalizer": "lowercase_normalizer"
}
}
else:
properties_dict[str_field]["properties"][lang]['fields'] = {
"keyword_lower": {
"type": "keyword",
"normalizer": "lowercase_normalizer"
}
}
if copy_to_fields:
# one copy to field for different languages
# one copy to field for all languages
if str_field in copy_to_fields:
properties_dict[str_field]["properties"][lang]["copy_to"] = [
f"all_labels.{lang}",
"all_labels_aliases"
]
if "all_labels" not in properties_dict:
properties_dict["all_labels"] = {"properties": {}}
properties_dict["all_labels"]["properties"][lang] = {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
},
"keyword_lower": {
"type": "keyword",
"normalizer": "lowercase_normalizer"
},
"ngram": {
"type": "text",
"analyzer": "edge_ngram_analyzer",
"search_analyzer": "edge_ngram_search_analyzer"
}
}
}
if "all_labels_aliases" not in properties_dict:
properties_dict["all_labels_aliases"] = {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
},
"keyword_lower": {
"type": "keyword",
"normalizer": "lowercase_normalizer"
}
}
}
if float_fields:
for float_field in float_fields:
properties_dict[float_field] = {
"type": "float"
}
if int_fields:
for int_field in int_fields:
properties_dict[int_field] = {
"type": "integer"
}
if str_fields_no_index:
for str_field in str_fields_no_index:
if es_version >= 6:
properties_dict[str_field] = {
"type": "text",
"index": "false"
}
else:
properties_dict[str_field] = {
"type": "text",
"index": "no"
}
settings = {
"index": {
"analysis": {
"normalizer": {
"lowercase_normalizer": {
"filter": [
"lowercase"
],
"type": "custom"
}
},
"analyzer": {
"edge_ngram_analyzer": {
"filter": [
"lowercase"
],
"tokenizer": "edge_ngram_tokenizer"
},
"edge_ngram_search_analyzer": {
"tokenizer": "lowercase"
}
},
"tokenizer": {
"edge_ngram_tokenizer": {
"token_chars": [
"letter"
],
"min_gram": "2",
"type": "edge_ngram",
"max_gram": "20"
}
}
}
}
}
# finish mapping dict
if es_version >= 6:
mapping_dict = {
"mappings": {
"properties": properties_dict
},
"settings": settings
}
else:
mapping_dict = {
"mappings": {
"doc": {
"properties": properties_dict
}
},
"settings": settings
}
return mapping_dict
@staticmethod
def load_elasticsearch_index(kgtk_jl_path, es_url, es_index, es_version, mapping_file_path=None, es_user=None,
es_pass=None,
batch_size=10000):
"""
loads a jsonlines file to Elasticsearch index.
Args:
kgtk_jl_path: input json lines file, could be output of build_elasticsearch_index
es_url: Elasticsearch server url
es_index: Elasticsearch index to be created/loaded
mapping_file_path: mapping file for the index
es_user: Elasticsearch user
es_pass: Elasticsearch password
batch_size: batch size to be loaded at once
Returns: Nothing
"""
# first create the index
create_response = Utility.create_index(es_url, es_index, mapping_file_path, es_user, es_pass)
print('create response: {}'.format(create_response.status_code))
f = open(kgtk_jl_path)
load_batch = []
counter = 0
# i = 0
for line in f:
# i += 1
counter += 1
# if i > 1918500:
each_res = line.replace('\n', '')
if not each_res:
continue
json_x = json.loads(each_res)
load_batch.append(json.dumps({"index": {"_id": json_x['id']}}))
load_batch.append(line.replace('\n', ''))
if len(load_batch) % batch_size == 0:
counter += len(load_batch)
print('done {} rows'.format(counter))
response = None
try:
response = Utility.load_index(es_version, es_url, es_index, '{}\n\n'.format('\n'.join(load_batch)),
mapping_file_path,
es_user=es_user, es_pass=es_pass)
if response.status_code >= 400:
print(response.text)
except:
print('Exception while loading a batch to es')
print(response.text)
print(response.status_code)
load_batch = []
if len(load_batch) > 0:
response = Utility.load_index(es_version, es_url, es_index, '{}\n\n'.format('\n'.join(load_batch)),
mapping_file_path,
es_user=es_user, es_pass=es_pass)
if response.status_code >= 400:
print(response.text)
print('Finished loading the elasticsearch index')
@staticmethod
def load_index(es_version, es_url, es_index, payload, mapping_file_path, es_user=None, es_pass=None):
if es_version >= 6:
es_url_bulk = '{}/{}/_doc/_bulk'.format(es_url, es_index)
else:
es_url_bulk = '{}/{}/doc/_bulk'.format(es_url, es_index)
headers = {
'Content-Type': 'application/x-ndjson',
}
if es_user and es_pass:
return requests.post(es_url_bulk, headers=headers, data=payload, auth=HTTPBasicAuth(es_user, es_pass))
else:
return requests.post(es_url_bulk, headers=headers, data=payload)
@staticmethod
def create_index(es_url, es_index, mapping_file_path, es_user=None, es_pass=None):
es_url_index = '{}/{}'.format(es_url, es_index)
# first check if index exists
if es_user and es_pass:
response = requests.get(es_url_index, auth=HTTPBasicAuth(es_user, es_pass))
else:
response = requests.get(es_url_index)
if response.status_code == 200:
print('Index: {} already exists...'.format(es_index))
elif response.status_code // 100 == 4:
if mapping_file_path is not None:
# no need to create index if mapping file is not specified, it'll be created at load time
mapping = json.load(open(mapping_file_path))
if es_user and es_pass:
response = requests.put(es_url_index, auth=HTTPBasicAuth(es_user, es_pass), json=mapping)
else:
response = requests.put(es_url_index, json=mapping)
if response.text and "error" in json.loads(response.text):
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(json.loads(response.text))
raise UploadError("Creating new index failed! Please check the error response above!")
else:
print('An exception has occurred: ')
print(response.text)
return response
@staticmethod
def format_error_details(module_name, error_details, error_code=-1):
error = {
"module_name": module_name,
"error_details": error_details,
"error_code": error_code
}
return error
@staticmethod
def str2bool(v: str):
"""
a simple wrap function that can wrap any kind of input to bool type, used for argparsers
"""
import argparse
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
@staticmethod
def execute_shell_code(shell_command: str, debug=False):
from subprocess import Popen, PIPE
if debug:
Utility.eprint("Executing...")
Utility.eprint(shell_command)
Utility.eprint("-" * 100)
out = Popen(shell_command, shell=True, stdout=PIPE, stderr=PIPE, universal_newlines=True)
# out.wait()
"""
Popen.wait():
Wait for child process to terminate. Set and return returncode attribute.
Warning: This will deadlock when using stdout=PIPE and/or stderr=PIPE and the child process generates enough output to
a pipe such that it blocks waiting for the OS pipe buffer to accept more data. Use communicate() to avoid that. """
stdout, stderr = out.communicate()
if stderr:
Utility.eprint("Error!!")
Utility.eprint(stderr)
Utility.eprint("-" * 50)
if debug:
Utility.eprint("Running fished!!!!!!")
return stdout
@staticmethod
def eprint(*args, **kwargs):
"""
print the things to stderr instead of stdout to prevent get included of bash `>`
"""
import sys
print(*args, file=sys.stderr, **kwargs)
@staticmethod
def add_acronym(labels: typing.Union[str, typing.List[str]]):
"""
base on the given list of labels, add the acronym of each label
For example: ["Barack Obama"] -> ["Barack Obama", "B. Obama"]
:param labels: a list of str or a str
:return: a list of str with acronym format data
"""
if isinstance(labels, str):
labels = [labels]
useless_words = [
'Mr', 'Ms', 'Miss', 'Mrs', 'Mx', 'Master', 'Sir', 'Madam', 'Dame', 'Lord', 'Lady',
'Dr', 'Prof', 'Br', 'Sr', 'Fr', 'Rev', 'Pr', 'Elder'
]
# ensure we can search both on capitalized case and normal case
temp = []
for each in useless_words:
temp.append(each.lower())
useless_words.extend(temp)
useless_words_parser = re.compile(r"({})\s".format("|".join(useless_words)))
all_candidates = set(labels)
# check comma
new_add_labels = set()
for each_label in labels:
if "," in each_label:
comma_pos = each_label.find(",")
# if have comma, it means last name maybe at first
all_candidates.add(each_label[comma_pos + 1:].lstrip() + " " + each_label[:comma_pos])
# check useless words and remove them (like honorifics)
labels = list(all_candidates)
for each_label in labels:
# remove those until nothing remained, add the processed label after each removal
while useless_words_parser.search(each_label):
temp_search_res = useless_words_parser.search(each_label)
each_label = each_label[:temp_search_res.start()] + " " + each_label[temp_search_res.end():]
all_candidates.add(each_label)
# generate acronyms
labels = list(all_candidates)
for each_label in labels:
# ensure only 1 space between words
label_preprocessed = " ".join(each_label.split())
f_name1, f_name2 = "", ""
names = label_preprocessed.split(' ')
for n in names[:-1]:
f_name1 = '{}{}. '.format(f_name1, n[0])
f_name2 = '{}{} '.format(f_name2, n[0])
f_name1 += names[-1]
f_name2 += names[-1]
all_candidates.add(f_name1)
all_candidates.add(f_name2)
return list(all_candidates)
@staticmethod
def jaccard_similarity(list1: typing.List[str], list2: typing.List[str]):
s1 = set(list1)
s2 = set(list2)
return len(s1.intersection(s2)) / len(s1.union(s2))
@staticmethod
def sort_by_col_and_row(input_df: pd.DataFrame) -> pd.DataFrame:
out_df = input_df.copy()
# astype float first to prevent error of "invalid literal for int() | |
Ax, unsigned int const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
long long [] Ax, long long const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
unsigned long long [] Ax, unsigned long long const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
float [] Ax, float const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
double [] Ax, double const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
long double [] Ax, long double const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_cfloat_wrapper [] Ax, npy_cfloat_wrapper const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_cdouble_wrapper [] Ax, npy_cdouble_wrapper const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_clongdouble_wrapper [] Ax, npy_clongdouble_wrapper const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
npy_bool_wrapper [] Ax, npy_bool_wrapper const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
signed char [] Ax, signed char const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
unsigned char [] Ax, unsigned char const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
short [] Ax, short const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
unsigned short [] Ax, unsigned short const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
int [] Ax, int const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
unsigned int [] Ax, unsigned int const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
long long [] Ax, long long const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
unsigned long long [] Ax, unsigned long long const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
float [] Ax, float const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
double [] Ax, double const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
long double [] Ax, long double const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
npy_cfloat_wrapper [] Ax, npy_cfloat_wrapper const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
npy_cdouble_wrapper [] Ax, npy_cdouble_wrapper const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
npy_clongdouble_wrapper [] Ax, npy_clongdouble_wrapper const [] Xx)
"""
return _csr.csr_scale_rows(*args)
def csr_scale_columns(*args):
"""
csr_scale_columns(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_bool_wrapper [] Ax, npy_bool_wrapper const [] Xx)
csr_scale_columns(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
signed char [] Ax, signed char const [] Xx)
csr_scale_columns(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
unsigned char [] Ax, unsigned char const [] Xx)
csr_scale_columns(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
short [] Ax, short const [] Xx)
csr_scale_columns(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
unsigned short [] Ax, unsigned short const [] Xx)
csr_scale_columns(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
int [] Ax, int const [] Xx)
csr_scale_columns(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
unsigned int [] Ax, unsigned int const [] Xx)
csr_scale_columns(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
long long [] Ax, long long const [] Xx)
csr_scale_columns(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
unsigned long long [] Ax, unsigned long long const [] Xx)
csr_scale_columns(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
float [] Ax, float const [] Xx)
csr_scale_columns(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
double [] Ax, double const [] Xx)
csr_scale_columns(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
long double [] Ax, long double const [] Xx)
csr_scale_columns(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_cfloat_wrapper [] Ax, npy_cfloat_wrapper const [] Xx)
csr_scale_columns(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_cdouble_wrapper [] Ax, npy_cdouble_wrapper const [] Xx)
csr_scale_columns(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_clongdouble_wrapper [] Ax, npy_clongdouble_wrapper const [] Xx)
csr_scale_columns(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
npy_bool_wrapper [] Ax, npy_bool_wrapper const [] Xx)
csr_scale_columns(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
signed char [] Ax, signed char const [] Xx)
csr_scale_columns(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
unsigned char [] Ax, unsigned char const [] Xx)
csr_scale_columns(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
short [] Ax, short const [] Xx)
csr_scale_columns(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
unsigned short [] Ax, unsigned short const [] Xx)
csr_scale_columns(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
int [] Ax, int const [] Xx)
csr_scale_columns(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
unsigned int [] Ax, unsigned int const [] Xx)
csr_scale_columns(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
long long [] Ax, long long const [] Xx)
csr_scale_columns(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
unsigned long long [] Ax, unsigned long long const [] Xx)
csr_scale_columns(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
float [] Ax, float const [] Xx)
csr_scale_columns(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
double [] Ax, double const [] Xx)
csr_scale_columns(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
long double [] Ax, long double const [] Xx)
csr_scale_columns(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
npy_cfloat_wrapper [] Ax, npy_cfloat_wrapper const [] Xx)
csr_scale_columns(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
npy_cdouble_wrapper [] Ax, npy_cdouble_wrapper const [] Xx)
csr_scale_columns(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
npy_clongdouble_wrapper [] Ax, npy_clongdouble_wrapper const [] Xx)
"""
return _csr.csr_scale_columns(*args)
def csr_tocsc(*args):
"""
csr_tocsc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_bool_wrapper const [] Ax, npy_int32 [] Bp, npy_int32 [] Bi,
npy_bool_wrapper [] Bx)
csr_tocsc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] | |
<gh_stars>0
from __future__ import absolute_import, division, print_function
from fnmatch import fnmatch
from functools import wraps
from glob import glob
from math import ceil
from operator import getitem
import os
from threading import Lock
import uuid
from warnings import warn
import pandas as pd
import numpy as np
from toolz import merge
from ..base import tokenize
from ..compatibility import unicode, apply
from .. import array as da
from ..async import get_sync
from ..delayed import Delayed, delayed
from .core import _Frame, DataFrame, Series
from .shuffle import set_partition
from ..utils import build_name_function
lock = Lock()
def _dummy_from_array(x, columns=None):
""" Create empty pd.DataFrame or pd.Series which has correct dtype """
if x.ndim > 2:
raise ValueError('from_array does not input more than 2D array, got'
' array with shape %r' % (x.shape,))
if getattr(x.dtype, 'names', None) is not None:
# record array has named columns
cols = tuple(x.dtype.names)
dtypes = [x.dtype.fields[n][0] for n in x.dtype.names]
elif x.ndim == 1 and (np.isscalar(columns) or columns is None):
# Series
return pd.Series([], name=columns, dtype=x.dtype)
else:
cols = list(range(x.shape[1])) if x.ndim == 2 else [0]
dtypes = [x.dtype] * len(cols)
data = {}
for c, dt in zip(cols, dtypes):
data[c] = np.array([], dtype=dt)
data = pd.DataFrame(data, columns=cols)
if columns is not None:
# if invalid, raise error from pandas
data.columns = columns
return data
def from_array(x, chunksize=50000, columns=None):
""" Read dask Dataframe from any slicable array
Uses getitem syntax to pull slices out of the array. The array need not be
a NumPy array but must support slicing syntax
x[50000:100000]
and have 2 dimensions:
x.ndim == 2
or have a record dtype:
x.dtype == [('name', 'O'), ('balance', 'i8')]
"""
if isinstance(x, da.Array):
return from_dask_array(x, columns=columns)
dummy = _dummy_from_array(x, columns)
divisions = tuple(range(0, len(x), chunksize))
divisions = divisions + (len(x) - 1,)
token = tokenize(x, chunksize, columns)
name = 'from_array-' + token
dsk = {}
for i in range(0, int(ceil(len(x) / chunksize))):
data = (getitem, x, slice(i * chunksize, (i + 1) * chunksize))
if isinstance(dummy, pd.Series):
dsk[name, i] = (pd.Series, data, None, dummy.dtype, dummy.name)
else:
dsk[name, i] = (pd.DataFrame, data, None, dummy.columns)
return _Frame(dsk, name, dummy, divisions)
def from_pandas(data, npartitions=None, chunksize=None, sort=True, name=None):
"""Construct a dask object from a pandas object.
If given a ``pandas.Series`` a ``dask.Series`` will be returned. If given a
``pandas.DataFrame`` a ``dask.DataFrame`` will be returned. All other
pandas objects will raise a ``TypeError``.
Parameters
----------
df : pandas.DataFrame or pandas.Series
The DataFrame/Series with which to construct a dask DataFrame/Series
npartitions : int, optional
The number of partitions of the index to create.
chunksize : int, optional
The size of the partitions of the index.
Returns
-------
dask.DataFrame or dask.Series
A dask DataFrame/Series partitioned along the index
Examples
--------
>>> df = pd.DataFrame(dict(a=list('aabbcc'), b=list(range(6))),
... index=pd.date_range(start='20100101', periods=6))
>>> ddf = from_pandas(df, npartitions=3)
>>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2010-01-01 00:00:00', offset='D'),
Timestamp('2010-01-03 00:00:00', offset='D'),
Timestamp('2010-01-05 00:00:00', offset='D'),
Timestamp('2010-01-06 00:00:00', offset='D'))
>>> ddf = from_pandas(df.a, npartitions=3) # Works with Series too!
>>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2010-01-01 00:00:00', offset='D'),
Timestamp('2010-01-03 00:00:00', offset='D'),
Timestamp('2010-01-05 00:00:00', offset='D'),
Timestamp('2010-01-06 00:00:00', offset='D'))
Raises
------
TypeError
If something other than a ``pandas.DataFrame`` or ``pandas.Series`` is
passed in.
See Also
--------
from_array : Construct a dask.DataFrame from an array that has record dtype
from_bcolz : Construct a dask.DataFrame from a bcolz ctable
read_csv : Construct a dask.DataFrame from a CSV file
"""
if isinstance(getattr(data, 'index', None), pd.MultiIndex):
raise NotImplementedError("Dask does not support MultiIndex Dataframes.")
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError("Input must be a pandas DataFrame or Series")
if ((npartitions is None) == (chunksize is None)):
raise ValueError('Exactly one of npartitions and chunksize must be specified.')
nrows = len(data)
if chunksize is None:
chunksize = int(ceil(nrows / npartitions))
else:
npartitions = int(ceil(nrows / chunksize))
name = name or ('from_pandas-' + tokenize(data, chunksize))
if not nrows:
return _Frame({(name, 0): data}, name, data, [None, None])
if sort and not data.index.is_monotonic_increasing:
data = data.sort_index(ascending=True)
if sort:
divisions, locations = sorted_division_locations(data.index,
chunksize=chunksize)
else:
locations = list(range(0, nrows, chunksize)) + [len(data)]
divisions = [None] * len(locations)
dsk = dict(((name, i), data.iloc[start: stop])
for i, (start, stop) in enumerate(zip(locations[:-1],
locations[1:])))
return _Frame(dsk, name, data, divisions)
def from_bcolz(x, chunksize=None, categorize=True, index=None, lock=lock,
**kwargs):
""" Read dask Dataframe from bcolz.ctable
Parameters
----------
x : bcolz.ctable
Input data
chunksize : int, optional
The size of blocks to pull out from ctable. Ideally as large as can
comfortably fit in memory
categorize : bool, defaults to True
Automatically categorize all string dtypes
index : string, optional
Column to make the index
lock: bool or Lock
Lock to use when reading or False for no lock (not-thread-safe)
See Also
--------
from_array: more generic function not optimized for bcolz
"""
if lock is True:
lock = Lock()
import dask.array as da
import bcolz
if isinstance(x, (str, unicode)):
x = bcolz.ctable(rootdir=x)
bc_chunklen = max(x[name].chunklen for name in x.names)
if chunksize is None and bc_chunklen > 10000:
chunksize = bc_chunklen
categories = dict()
if categorize:
for name in x.names:
if (np.issubdtype(x.dtype[name], np.string_) or
np.issubdtype(x.dtype[name], np.unicode_) or
np.issubdtype(x.dtype[name], np.object_)):
a = da.from_array(x[name], chunks=(chunksize * len(x.names),))
categories[name] = da.unique(a)
columns = tuple(x.dtype.names)
divisions = tuple(range(0, len(x), chunksize))
divisions = divisions + (len(x) - 1,)
if x.rootdir:
token = tokenize((x.rootdir, os.path.getmtime(x.rootdir)), chunksize,
categorize, index, kwargs)
else:
token = tokenize((id(x), x.shape, x.dtype), chunksize, categorize,
index, kwargs)
new_name = 'from_bcolz-' + token
dsk = dict(((new_name, i),
(dataframe_from_ctable,
x,
(slice(i * chunksize, (i + 1) * chunksize),),
columns, categories, lock))
for i in range(0, int(ceil(len(x) / chunksize))))
meta = dataframe_from_ctable(x, slice(0, 0), columns, categories, lock)
result = DataFrame(dsk, new_name, meta, divisions)
if index:
assert index in x.names
a = da.from_array(x[index], chunks=(chunksize * len(x.names),))
q = np.linspace(0, 100, len(x) // chunksize + 2)
divisions = da.percentile(a, q).compute()
return set_partition(result, index, divisions, **kwargs)
else:
return result
def dataframe_from_ctable(x, slc, columns=None, categories=None, lock=lock):
""" Get DataFrame from bcolz.ctable
Parameters
----------
x: bcolz.ctable
slc: slice
columns: list of column names or None
>>> import bcolz
>>> x = bcolz.ctable([[1, 2, 3, 4], [10, 20, 30, 40]], names=['a', 'b'])
>>> dataframe_from_ctable(x, slice(1, 3))
a b
1 2 20
2 3 30
>>> dataframe_from_ctable(x, slice(1, 3), columns=['b'])
b
1 20
2 30
>>> dataframe_from_ctable(x, slice(1, 3), columns='b')
1 20
2 30
Name: b, dtype: int...
"""
import bcolz
if columns is None:
columns = x.dtype.names
if isinstance(columns, tuple):
columns = list(columns)
x = x[columns]
if type(slc) is slice:
start = slc.start
stop = slc.stop if slc.stop < len(x) else len(x)
else:
start = slc[0].start
stop = slc[0].stop if slc[0].stop < len(x) else len(x)
idx = pd.Index(range(start, stop))
if lock:
lock.acquire()
try:
if isinstance(x, bcolz.ctable):
chunks = [x[name][slc] for name in columns]
if categories is not None:
chunks = [pd.Categorical.from_codes(
np.searchsorted(categories[name], chunk),
categories[name], True)
if name in categories else chunk
for name, chunk in zip(columns, chunks)]
result = pd.DataFrame(dict(zip(columns, chunks)), columns=columns,
index=idx)
elif isinstance(x, bcolz.carray):
chunk = x[slc]
if categories is not None and columns and columns in categories:
chunk = pd.Categorical.from_codes(
np.searchsorted(categories[columns], chunk),
categories[columns], True)
result = pd.Series(chunk, name=columns, index=idx)
finally:
if lock:
lock.release()
return result
def from_dask_array(x, columns=None):
""" Convert dask Array to dask DataFrame
Converts a 2d array into a DataFrame and a 1d array into a Series.
Parameters
----------
x: da.Array
columns: list or string
list of column names if DataFrame, single string if Series
Examples
--------
>>> import dask.array as da
>>> import dask.dataframe as dd
>>> x = da.ones((4, 2), chunks=(2, 2))
>>> df = dd.io.from_dask_array(x, columns=['a', 'b'])
>>> df.compute()
a b
0 1.0 1.0
1 1.0 1.0
2 1.0 1.0
3 1.0 1.0
"""
dummy = _dummy_from_array(x, columns)
name = 'from-dask-array' + tokenize(x, columns)
divisions = [0]
for c in x.chunks[0]:
divisions.append(divisions[-1] + c)
index = [(np.arange, a, b, 1, 'i8') for a, b in
zip(divisions[:-1], divisions[1:])]
divisions[-1] -= 1
if x.ndim == 2:
if len(x.chunks[1]) > 1:
x = x.rechunk({1: x.shape[1]})
dsk = {}
for i, (chunk, ind) in enumerate(zip(x._keys(), index)):
if x.ndim == 2:
chunk = chunk[0]
if isinstance(dummy, pd.Series):
dsk[name, i] = (pd.Series, chunk, ind, x.dtype, dummy.name)
else:
dsk[name, | |
<gh_stars>0
import os
import logging
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import Input
from tensorflow.keras.layers import Conv2D, MaxPooling2D, ReLU, BatchNormalization, Add
from tensorflow.keras.layers import UpSampling2D,Conv2DTranspose
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint, LambdaCallback
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping
from tensorflow.keras.initializers import RandomNormal
import restore
from util import DataLoader, plot_test_images
from losses import psnr3 as psnr
from losses import euclidean, cosine, charbonnier
class CISRDCNN():
def __init__(self,
height_lr=16, width_lr=16, channels=3,
upscaling_factor=4, lr = 1e-4,
stage=None,
colorspace = 'RGB',
fulltrain = False
):
# Low-resolution image dimensions
self.height_lr = height_lr
self.width_lr = width_lr
# High-resolution image dimensions
if upscaling_factor not in [1, 2, 4, 8]:
raise ValueError(
'Upscaling factor must be either 2, 4, or 8. You chose {}'.format(upscaling_factor))
self.upscaling_factor = upscaling_factor
self.height_hr = int(self.height_lr * self.upscaling_factor)
self.width_hr = int(self.width_lr * self.upscaling_factor)
# Low-resolution and high-resolution shapes
self.channels = channels
self.colorspace = colorspace
self.stage = stage
self.shape_lr = (self.height_lr, self.width_lr, self.channels)
self.shape_hr = (self.height_hr, self.width_hr, self.channels)
self.loss = "mse"
self.lr = lr
if (stage=='dbcnn'):
print("Compiling DBCNN")
self.dbcnn = self.build_dbcnn()
self.compile_model(self.dbcnn)
if (stage=='uscnn'):
print("Compiling USCNN")
self.dbcnn = self.build_dbcnn()
self.dbcnn.trainable = False
self.compile_model(self.dbcnn)
self.uscnn = self.build_uscnn()
self.compile_model(self.uscnn)
if (stage=='qecnn'):
print("Compiling QECNN")
self.dbcnn = self.build_dbcnn()
self.dbcnn.trainable = False
self.compile_model(self.dbcnn)
self.uscnn = self.build_uscnn()
self.uscnn.trainable = False
self.compile_model(self.uscnn)
self.qecnn = self.build_qecnn()
self.compile_model(self.qecnn)
if (stage=='cisrdcnn'):
print("Compiling CISRDCNN")
self.dbcnn = self.build_dbcnn()
self.dbcnn.trainable = True
self.compile_model(self.dbcnn)
self.uscnn = self.build_uscnn()
self.uscnn.trainable = True
self.compile_model(self.uscnn)
self.qecnn = self.build_qecnn()
self.qecnn.trainable = True
self.compile_model(self.qecnn)
self.cisrdcnn = self.build_cisrdcnn()
self.cisrdcnn.trainable = True
self.compile_model(self.cisrdcnn)
def compile_model(self, model):
"""Compile the DBCNN with appropriate optimizer"""
model.compile(
loss=self.loss,
optimizer= SGD(lr=self.lr, momentum=0.9, decay=1e-6, nesterov=True),# Adam(lr=self.lr,beta_1=0.9, beta_2=0.999),
metrics=[psnr]
)
def build_dbcnn(self,k1=20):
def DBCNN(input):
x=input
for i in range(k1-1):
x = Conv2D(filters= 64, kernel_size = (3,3), strides=1,padding='same')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = Conv2D(filters= self.channels, kernel_size = (3,3), strides=1, padding='same', name='K1')(x)
x = ReLU()(x)
x = Add()([x, input])
return x
inputs = Input(shape=(None, None, self.channels))
x = DBCNN(inputs)
model = Model(inputs=inputs, outputs=x,name="DBCNN")
#logging.debug(model.summary())
return model
def build_uscnn(self,k2=10):
def USCNN(input):
x = input
for i in range(k2-1):
x = Conv2D(filters= 64, kernel_size = (3,3), strides=1,padding='same')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = UpSampling2D(size=(self.upscaling_factor, self.upscaling_factor),interpolation="nearest")(x)
x = Conv2D(filters= self.channels, kernel_size = (9,9), strides=1,padding='same')(x)
x = ReLU()(x)
return x
inputs = Input(shape=(None, None, self.channels))
x = self.dbcnn(inputs)
x = USCNN(x)
model = Model(inputs=inputs, outputs=x, name="USCNN")
#logging.debug(model.summary())
return model
def build_qecnn(self,k3=20):
def QECNN(input):
x=input
for i in range(k3-1):
x = Conv2D(filters= 64, kernel_size = (3,3), strides=1,padding='same')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = Conv2D(filters= self.channels, kernel_size = (3,3), strides=1, padding='same', name='K3')(x)
x = ReLU()(x)
x = Add()([x, input])
return x
z = Input(shape=(None, None, self.channels))
x = self.uscnn(z)
hr = QECNN(x)
model = Model(inputs=z, outputs=hr,name="QECNN")
#logging.debug(model.summary())
return model
def build_cisrdcnn(self):
z = Input(shape=(None, None, self.channels))
hr = self.qecnn(z)
model = Model(inputs=z, outputs=hr,name="CISRDCNN")
#logging.debug(model.summary())
return model
def train_dbcnn(self,
epochs=50,
batch_size=8,
steps_per_epoch=5,
steps_per_validation=5,
crops_per_image=4,
print_frequency=5,
log_tensorboard_update_freq=10,
workers=1,
max_queue_size=5,
model_name='DBCNN',
media_type='i',
datapath_train='../../../videos_harmonic/MYANMAR_2160p/train/',
datapath_validation='../../../videos_harmonic/MYANMAR_2160p/validation/',
datapath_test='../../../videos_harmonic/MYANMAR_2160p/test/',
log_weight_path='../model/',
log_tensorboard_path='../logs/',
log_test_path='../test/',
qf=30
):
# Create data loaders
train_loader = DataLoader(
datapath_train, batch_size,
self.height_hr, self.width_hr,
self.upscaling_factor,
crops_per_image,
media_type,
self.channels,
self.colorspace,
self.stage,
qf
)
validation_loader = None
if datapath_validation is not None:
validation_loader = DataLoader(
datapath_validation, batch_size,
self.height_hr, self.width_hr,
self.upscaling_factor,
crops_per_image,
media_type,
self.channels,
self.colorspace,
self.stage,
qf
)
test_loader = None
if datapath_test is not None:
test_loader = DataLoader(
datapath_test, 1,
self.height_hr, self.width_hr,
self.upscaling_factor,
1,
media_type,
self.channels,
self.colorspace,
self.stage,
qf
)
# Callback: tensorboard
callbacks = []
if log_tensorboard_path:
tensorboard = TensorBoard(
log_dir=os.path.join(log_tensorboard_path, model_name),
histogram_freq=0,
write_graph=True,
update_freq=log_tensorboard_update_freq
)
callbacks.append(tensorboard)
else:
print(">> Not logging to tensorboard since no log_tensorboard_path is set")
# Callback: Stop training when a monitored quantity has stopped improving
earlystopping = EarlyStopping(
monitor='val_loss',
patience=30, verbose=1,
restore_best_weights=True )
callbacks.append(earlystopping)
# Callback: Reduce lr when a monitored quantity has stopped improving
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1,
patience=10, min_lr=1e-6,verbose=1)
callbacks.append(reduce_lr)
# Callback: save weights after each epoch
modelcheckpoint = ModelCheckpoint(
os.path.join(log_weight_path, model_name + '_{}X.tf'.format(self.upscaling_factor)),
monitor='val_loss',
save_best_only=True,
save_weights_only=True)
callbacks.append(modelcheckpoint)
# Callback: test images plotting
if datapath_test is not None:
testplotting = LambdaCallback(
on_epoch_end=lambda epoch, logs: None if ((epoch+1) % print_frequency != 0 ) else plot_test_images(
self.dbcnn,
test_loader,
datapath_test,
log_test_path,
epoch+1,
name=model_name,
channels=self.channels,
colorspace=self.colorspace))
callbacks.append(testplotting)
#callbacks.append(TQDMCallback())
self.dbcnn.fit(
train_loader,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_data=validation_loader,
validation_steps=steps_per_validation,
callbacks=callbacks,
shuffle=True,
use_multiprocessing=False,
workers=workers
)
def train_uscnn(self,
epochs=50,
batch_size=8,
steps_per_epoch=5,
steps_per_validation=5,
crops_per_image=4,
print_frequency=5,
log_tensorboard_update_freq=10,
workers=1,
max_queue_size=5,
model_name='CISRDCNN',
media_type='i',
datapath_train='../../../videos_harmonic/MYANMAR_2160p/train/',
datapath_validation='../../../videos_harmonic/MYANMAR_2160p/validation/',
datapath_test='../../../videos_harmonic/MYANMAR_2160p/test/',
log_weight_path='../model/',
log_tensorboard_path='../logs/',
log_test_path='../test/',
qf=30
):
# Create data loaders
train_loader = DataLoader(
datapath_train, batch_size,
self.height_hr, self.width_hr,
self.upscaling_factor,
crops_per_image,
media_type,
self.channels,
self.colorspace,
self.stage,
qf
)
validation_loader = None
if datapath_validation is not None:
validation_loader = DataLoader(
datapath_validation, batch_size,
self.height_hr, self.width_hr,
self.upscaling_factor,
crops_per_image,
media_type,
self.channels,
self.colorspace,
self.stage,
qf
)
test_loader = None
if datapath_test is not None:
test_loader = DataLoader(
datapath_test, 1,
self.height_hr, self.width_hr,
self.upscaling_factor,
1,
media_type,
self.channels,
self.colorspace,
self.stage,
qf
)
# Callback: tensorboard
callbacks = []
if log_tensorboard_path:
tensorboard = TensorBoard(
log_dir=os.path.join(log_tensorboard_path, model_name),
histogram_freq=0,
write_graph=True,
update_freq=log_tensorboard_update_freq
)
callbacks.append(tensorboard)
else:
print(">> Not logging to tensorboard since no log_tensorboard_path is set")
# Callback: Stop training when a monitored quantity has stopped improving
earlystopping = EarlyStopping(
monitor='val_loss',
patience=30, verbose=1,
restore_best_weights=True )
callbacks.append(earlystopping)
# Callback: Reduce lr when a monitored quantity has stopped improving
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1,
patience=10, min_lr=1e-6,verbose=1)
callbacks.append(reduce_lr)
# Callback: save weights after each epoch
modelcheckpoint = ModelCheckpoint(
os.path.join(log_weight_path, model_name + '_{}X.tf'.format(self.upscaling_factor)),
monitor='val_loss',
save_best_only=True,
save_weights_only=True)
callbacks.append(modelcheckpoint)
# Callback: test images plotting
if datapath_test is not None:
testplotting = LambdaCallback(
on_epoch_end=lambda epoch, logs: None if ((epoch+1) % print_frequency != 0 ) else plot_test_images(
self.uscnn,
test_loader,
datapath_test,
log_test_path,
epoch+1,
name=model_name,
channels=self.channels,
colorspace=self.colorspace))
callbacks.append(testplotting)
#callbacks.append(TQDMCallback())
self.uscnn.fit(
train_loader,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_data=validation_loader,
validation_steps=steps_per_validation,
callbacks=callbacks,
shuffle=True,
use_multiprocessing=False,
workers=workers
)
def train_qecnn(self,
epochs=50,
batch_size=8,
steps_per_epoch=5,
steps_per_validation=5,
crops_per_image=4,
print_frequency=5,
log_tensorboard_update_freq=10,
workers=1,
max_queue_size=5,
model_name='CISRDCNN',
media_type='i',
datapath_train='../../../videos_harmonic/MYANMAR_2160p/train/',
datapath_validation='../../../videos_harmonic/MYANMAR_2160p/validation/',
datapath_test='../../../videos_harmonic/MYANMAR_2160p/test/',
log_weight_path='../model/',
log_tensorboard_path='../logs/',
log_test_path='../test/',
qf=30
):
# Create data loaders
train_loader = DataLoader(
datapath_train, batch_size,
self.height_hr, self.width_hr,
self.upscaling_factor,
crops_per_image,
media_type,
self.channels,
self.colorspace,
self.stage,
qf
)
validation_loader = None
if datapath_validation is not None:
validation_loader = DataLoader(
datapath_validation, batch_size,
self.height_hr, self.width_hr,
self.upscaling_factor,
crops_per_image,
media_type,
self.channels,
self.colorspace,
self.stage,
qf
)
test_loader = None
if datapath_test is not None:
test_loader = DataLoader(
datapath_test, 1,
self.height_hr, self.width_hr,
self.upscaling_factor,
1,
media_type,
self.channels,
self.colorspace,
self.stage,
qf
)
# Callback: tensorboard
callbacks = []
if log_tensorboard_path:
tensorboard = TensorBoard(
log_dir=os.path.join(log_tensorboard_path, model_name),
histogram_freq=0,
write_graph=True,
update_freq=log_tensorboard_update_freq
)
callbacks.append(tensorboard)
else:
print(">> Not logging to tensorboard since no log_tensorboard_path is set")
# Callback: Stop training when a monitored quantity has stopped improving
earlystopping = EarlyStopping(
monitor='val_loss',
patience=30, verbose=1,
restore_best_weights=True )
callbacks.append(earlystopping)
# Callback: Reduce lr when a monitored quantity has stopped improving
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1,
patience=10, min_lr=1e-6,verbose=1)
callbacks.append(reduce_lr)
# Callback: save weights after each epoch
modelcheckpoint = ModelCheckpoint(
os.path.join(log_weight_path, model_name + '_{}X.tf'.format(self.upscaling_factor)),
monitor='val_loss',
save_best_only=True,
save_weights_only=True)
callbacks.append(modelcheckpoint)
# Callback: test images plotting
if datapath_test is not None:
testplotting = LambdaCallback(
on_epoch_end=lambda epoch, logs: None if ((epoch+1) % print_frequency != 0 ) else plot_test_images(
self.qecnn,
test_loader,
datapath_test,
log_test_path,
epoch+1,
name=model_name,
channels=self.channels,
colorspace=self.colorspace))
callbacks.append(testplotting)
#callbacks.append(TQDMCallback())
self.qecnn.fit(
train_loader,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_data=validation_loader,
validation_steps=steps_per_validation,
callbacks=callbacks,
shuffle=True,
use_multiprocessing=False,
workers=workers
)
def train_cisrdcnn(self,
epochs=50,
batch_size=8,
steps_per_epoch=5,
steps_per_validation=5,
crops_per_image=4,
print_frequency=5,
log_tensorboard_update_freq=10,
workers=1,
max_queue_size=5,
model_name='CISRDCNN',
media_type='i',
datapath_train='../../../videos_harmonic/MYANMAR_2160p/train/',
datapath_validation='../../../videos_harmonic/MYANMAR_2160p/validation/',
datapath_test='../../../videos_harmonic/MYANMAR_2160p/test/',
log_weight_path='../model/',
log_tensorboard_path='../logs/',
log_test_path='../test/',
qf=30
):
# Create data loaders
train_loader = DataLoader(
datapath_train, batch_size,
self.height_hr, self.width_hr,
self.upscaling_factor,
crops_per_image,
media_type,
self.channels,
self.colorspace,
self.stage,
qf
)
validation_loader = None
if datapath_validation is not None:
validation_loader = DataLoader(
datapath_validation, batch_size,
self.height_hr, self.width_hr,
self.upscaling_factor,
crops_per_image,
media_type,
self.channels,
self.colorspace,
self.stage,
qf
)
test_loader = None
if datapath_test is not None:
test_loader = DataLoader(
datapath_test, 1,
self.height_hr, self.width_hr,
self.upscaling_factor,
1,
media_type,
self.channels,
self.colorspace,
self.stage,
qf
)
# Callback: tensorboard
callbacks = []
if log_tensorboard_path:
tensorboard = TensorBoard(
log_dir=os.path.join(log_tensorboard_path, model_name),
histogram_freq=0,
write_graph=True,
update_freq=log_tensorboard_update_freq
)
callbacks.append(tensorboard)
else:
print(">> Not logging to tensorboard since no log_tensorboard_path is set")
# Callback: Stop training when a monitored quantity | |
#!/usr/bin/env python
#
# mergetrees.py: routines that create merge scenarios
#
# Subversion is a tool for revision control.
# See http://subversion.apache.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
# General modules
import shutil, sys, re, os
import time
# Our testing module
import main, wc, verify, actions, testcase
from prop_tests import binary_mime_type_on_text_file_warning
# (abbreviation)
Item = wc.StateItem
Skip = testcase.Skip_deco
SkipUnless = testcase.SkipUnless_deco
XFail = testcase.XFail_deco
Issues = testcase.Issues_deco
Issue = testcase.Issue_deco
Wimp = testcase.Wimp_deco
exp_noop_up_out = actions.expected_noop_update_output
from svntest.main import SVN_PROP_MERGEINFO
def expected_merge_output(rev_ranges, additional_lines=[], foreign=False,
elides=False, two_url=False, target=None,
text_conflicts=0, prop_conflicts=0, tree_conflicts=0,
text_resolved=0, prop_resolved=0, tree_resolved=0,
skipped_paths=0):
"""Generate an (inefficient) regex representing the expected merge
output and mergeinfo notifications from REV_RANGES and ADDITIONAL_LINES.
REV_RANGES is a list of revision ranges for which mergeinfo is being
recorded. Each range is of the form [start, end] (where both START and
END are inclusive, unlike in '-rX:Y') or the form [single_rev] (which is
like '-c SINGLE_REV'). If REV_RANGES is None then only the standard
notification for a 3-way merge is expected.
ADDITIONAL_LINES is a list of strings to match the other lines of output;
these are basically regular expressions except that backslashes will be
escaped herein. If ADDITIONAL_LINES is a single string, it is interpreted
the same as a list containing that string.
If ELIDES is true, add to the regex an expression representing elision
notification. If TWO_URL is true, tweak the regex to expect the
appropriate mergeinfo notification for a 3-way merge.
TARGET is the local path to the target, as it should appear in
notifications; if None, it is not checked.
TEXT_CONFLICTS, PROP_CONFLICTS, TREE_CONFLICTS and SKIPPED_PATHS specify
the number of each kind of conflict to expect.
"""
if rev_ranges is None:
lines = [main.merge_notify_line(None, None, False, foreign)]
else:
lines = []
for rng in rev_ranges:
start_rev = rng[0]
if len(rng) > 1:
end_rev = rng[1]
else:
end_rev = None
lines += [main.merge_notify_line(start_rev, end_rev,
True, foreign, target)]
lines += [main.mergeinfo_notify_line(start_rev, end_rev, target)]
if (elides):
lines += ["--- Eliding mergeinfo from .*\n"]
if (two_url):
lines += ["--- Recording mergeinfo for merge between repository URLs .*\n"]
# Address "The Backslash Plague"
#
# If ADDITIONAL_LINES are present there are possibly paths in it with
# multiple components and on Windows these components are separated with
# '\'. These need to be escaped properly in the regexp for the match to
# work correctly. See http://aspn.activestate.com/ASPN/docs/ActivePython
# /2.2/howto/regex/regex.html#SECTION000420000000000000000.
if isinstance(additional_lines, str):
additional_lines = [additional_lines]
if sys.platform == 'win32':
additional_lines = [line.replace("\\", "\\\\") for line in additional_lines]
lines += additional_lines
lines += main.summary_of_conflicts(
text_conflicts, prop_conflicts, tree_conflicts,
text_resolved, prop_resolved, tree_resolved,
skipped_paths,
as_regex=True)
return "|".join(lines)
def check_mergeinfo_recursively(root_path, subpaths_mergeinfo):
"""Check that the mergeinfo properties on and under ROOT_PATH are those in
SUBPATHS_MERGEINFO, a {path: mergeinfo-prop-val} dictionary."""
expected = verify.UnorderedOutput(
[path + ' - ' + subpaths_mergeinfo[path] + '\n'
for path in subpaths_mergeinfo])
actions.run_and_verify_svn(expected, [],
'propget', '-R', SVN_PROP_MERGEINFO,
root_path)
######################################################################
#----------------------------------------------------------------------
def set_up_dir_replace(sbox):
"""Set up the working copy for directory replace tests, creating
directory 'A/B/F/foo' with files 'new file' and 'new file2' within
it (r2), and merging 'foo' onto 'C' (r3), then deleting 'A/B/F/foo'
(r4)."""
sbox.build()
wc_dir = sbox.wc_dir
C_path = sbox.ospath('A/C')
F_path = sbox.ospath('A/B/F')
F_url = sbox.repo_url + '/A/B/F'
foo_path = os.path.join(F_path, 'foo')
new_file = os.path.join(foo_path, "new file")
new_file2 = os.path.join(foo_path, "new file 2")
# Make directory foo in F, and add some files within it.
actions.run_and_verify_svn(None, [], 'mkdir', foo_path)
main.file_append(new_file, "Initial text in new file.\n")
main.file_append(new_file2, "Initial text in new file 2.\n")
main.run_svn(None, "add", new_file)
main.run_svn(None, "add", new_file2)
# Commit all the new content, creating r2.
expected_output = wc.State(wc_dir, {
'A/B/F/foo' : Item(verb='Adding'),
'A/B/F/foo/new file' : Item(verb='Adding'),
'A/B/F/foo/new file 2' : Item(verb='Adding'),
})
expected_status = actions.get_virginal_state(wc_dir, 1)
expected_status.add({
'A/B/F/foo' : Item(status=' ', wc_rev=2),
'A/B/F/foo/new file' : Item(status=' ', wc_rev=2),
'A/B/F/foo/new file 2' : Item(status=' ', wc_rev=2),
})
actions.run_and_verify_commit(wc_dir, expected_output, expected_status)
# Merge foo onto C
expected_output = wc.State(C_path, {
'foo' : Item(status='A '),
'foo/new file' : Item(status='A '),
'foo/new file 2' : Item(status='A '),
})
expected_mergeinfo_output = wc.State(C_path, {
'' : Item(status=' U'),
})
expected_elision_output = wc.State(C_path, {
})
expected_disk = wc.State('', {
'' : Item(props={SVN_PROP_MERGEINFO : '/A/B/F:2'}),
'foo' : Item(),
'foo/new file' : Item("Initial text in new file.\n"),
'foo/new file 2' : Item("Initial text in new file 2.\n"),
})
expected_status = wc.State(C_path, {
'' : Item(status=' M', wc_rev=1),
'foo' : Item(status='A ', wc_rev='-', copied='+'),
'foo/new file' : Item(status=' ', wc_rev='-', copied='+'),
'foo/new file 2' : Item(status=' ', wc_rev='-', copied='+'),
})
expected_skip = wc.State(C_path, { })
actions.run_and_verify_merge(C_path, '1', '2', F_url, None,
expected_output,
expected_mergeinfo_output,
expected_elision_output,
expected_disk,
expected_status,
expected_skip,
check_props=True)
# Commit merge of foo onto C, creating r3.
expected_output = wc.State(wc_dir, {
'A/C' : Item(verb='Sending'),
'A/C/foo' : Item(verb='Adding'),
})
expected_status = actions.get_virginal_state(wc_dir, 1)
expected_status.add({
'A/B/F/foo' : Item(status=' ', wc_rev=2),
'A/C' : Item(status=' ', wc_rev=3),
'A/B/F/foo/new file' : Item(status=' ', wc_rev=2),
'A/B/F/foo/new file 2' : Item(status=' ', wc_rev=2),
'A/C/foo' : Item(status=' ', wc_rev=3),
'A/C/foo/new file' : Item(status=' ', wc_rev=3),
'A/C/foo/new file 2' : Item(status=' ', wc_rev=3),
})
actions.run_and_verify_commit(wc_dir, expected_output, expected_status)
# Delete foo on F, creating r4.
actions.run_and_verify_svn(None, [], 'rm', foo_path)
expected_output = wc.State(wc_dir, {
'A/B/F/foo' : Item(verb='Deleting'),
})
expected_status = actions.get_virginal_state(wc_dir, 1)
expected_status.add({
'A/C' : Item(status=' ', wc_rev=3),
'A/C/foo' : Item(status=' ', wc_rev=3),
'A/C/foo/new file' : Item(status=' ', wc_rev=3),
'A/C/foo/new file 2' : Item(status=' ', wc_rev=3),
})
actions.run_and_verify_commit(wc_dir, expected_output, expected_status)
#----------------------------------------------------------------------
def set_up_branch(sbox, branch_only = False, nbr_of_branches = 1):
'''Starting with standard greek tree, copy 'A' NBR_OF_BRANCHES times
to A_COPY, A_COPY_2, A_COPY_3, and so on. Then, unless BRANCH_ONLY is
true, make four modifications (setting file contents to "New content")
under A:
r(2 + NBR_OF_BRANCHES) - A/D/H/psi
r(3 + NBR_OF_BRANCHES) - A/D/G/rho
r(4 + NBR_OF_BRANCHES) - A/B/E/beta
r(5 + NBR_OF_BRANCHES) - A/D/H/omega
Return (expected_disk, expected_status).'''
# With the default parameters, the branching looks like this:
#
# A -1-----3-4-5-6--
# \
# A_COPY 2-----------
wc_dir = sbox.wc_dir
expected_status = actions.get_virginal_state(wc_dir, 1)
expected_disk = main.greek_state.copy()
def copy_A(dest_name, rev):
expected = verify.UnorderedOutput(
["A " + os.path.join(wc_dir, dest_name, "B") + "\n",
"A " + os.path.join(wc_dir, dest_name, "B", "lambda") + "\n",
"A " + os.path.join(wc_dir, dest_name, "B", "E") + "\n",
"A " + os.path.join(wc_dir, dest_name, "B", "E", "alpha") + "\n",
"A " + os.path.join(wc_dir, dest_name, "B", "E", "beta") + "\n",
"A " + os.path.join(wc_dir, dest_name, "B", "F") + "\n",
"A " + os.path.join(wc_dir, dest_name, "mu") + "\n",
"A " + os.path.join(wc_dir, dest_name, "C") + "\n",
"A " + os.path.join(wc_dir, dest_name, "D") + "\n",
"A " + os.path.join(wc_dir, dest_name, "D", "gamma") + "\n",
"A " + os.path.join(wc_dir, dest_name, "D", "G") + "\n",
"A " + os.path.join(wc_dir, dest_name, "D", "G", "pi") + "\n",
"A " + os.path.join(wc_dir, dest_name, "D", "G", "rho") + "\n",
"A " + os.path.join(wc_dir, dest_name, "D", "G", "tau") + "\n",
"A " + os.path.join(wc_dir, dest_name, "D", "H") + "\n",
"A " + os.path.join(wc_dir, dest_name, "D", "H", "chi") + "\n",
"A " + os.path.join(wc_dir, dest_name, "D", "H", "omega") + "\n",
"A " + os.path.join(wc_dir, dest_name, "D", "H", "psi") + "\n",
"Checked out revision " + str(rev - 1) + ".\n",
"A " + os.path.join(wc_dir, dest_name) + "\n"])
expected_status.add({
dest_name + "/B" : Item(status=' ', wc_rev=rev),
dest_name + "/B/lambda" : Item(status=' ', wc_rev=rev),
dest_name + "/B/E" : Item(status=' ', wc_rev=rev),
dest_name + "/B/E/alpha" : Item(status=' ', wc_rev=rev),
dest_name + "/B/E/beta" : Item(status=' ', wc_rev=rev),
dest_name + "/B/F" : Item(status=' ', wc_rev=rev),
dest_name + "/mu" : Item(status=' ', wc_rev=rev),
dest_name + "/C" : Item(status=' ', |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.