input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
(\d+)", osinfo)
if match and match.group(1):
return (match.group(1).split(" ")[0],
match.group(2).split(".")[0])
f_path = self._root_dir + "/etc/lsb-release"
if os.path.exists(f_path):
distribution = ""
version = ""
osinfo = FileUtil(f_path).getdata()
match = re.search(r"DISTRIB_ID=(.+)(\n|$)",
osinfo, re.MULTILINE)
if match:
distribution = match.group(1).split(" ")[0]
match = re.search(r"DISTRIB_RELEASE=(.+)(\n|$)",
osinfo, re.MULTILINE)
if match:
version = match.group(1).split(".")[0]
if distribution and version:
return (distribution, version)
f_path = self._root_dir + "/etc/os-release"
if os.path.exists(f_path):
distribution = ""
version = ""
osinfo = FileUtil(f_path).getdata()
match = re.search(r"NAME=\"?(.+)\"?(\n|$)",
osinfo, re.MULTILINE)
if match:
distribution = match.group(1).split(" ")[0]
match = re.search(r"VERSION_ID=\"?(.+)\"?(\n|$)",
osinfo, re.MULTILINE)
if match:
version = match.group(1).split(".")[0]
if distribution and version:
return (distribution, version)
return ("", "")
def osversion(self):
"""Get guest operating system"""
if self.osdistribution()[0]:
return "linux"
return ""
class KeyStore(object):
"""Basic storage for authentication tokens to be used
with dockerhub and private repositories
"""
def __init__(self, keystore_file):
"""Initialize keystone"""
self.keystore_file = keystore_file
self.credential = dict()
def _verify_keystore(self):
"""Verify the keystore file and directory"""
keystore_uid = FileUtil(self.keystore_file).uid()
if keystore_uid != -1 and keystore_uid != Config.uid:
raise IOError("not owner of keystore: %s" %
(self.keystore_file))
keystore_dir = os.path.dirname(self.keystore_file)
if FileUtil(keystore_dir).uid() != Config.uid:
raise IOError("keystore dir not found or not owner: %s" %
(keystore_dir))
if (keystore_uid != -1 and
(os.stat(self.keystore_file).st_mode & 0o077)):
raise IOError("keystore is accessible to group or others: %s" %
(self.keystore_file))
def _read_all(self):
"""Read all credentials from file"""
try:
with open(self.keystore_file, "r") as filep:
return json.load(filep)
except (IOError, OSError, ValueError):
return dict()
def _shred(self):
"""Shred file content"""
self._verify_keystore()
try:
size = os.stat(self.keystore_file).st_size
with open(self.keystore_file, "rb+") as filep:
filep.write(" " * size)
except (IOError, OSError):
return False
return True
def _write_all(self, auths):
"""Write all credentials to file"""
self._verify_keystore()
oldmask = None
try:
oldmask = os.umask(0o77)
with open(self.keystore_file, "w") as filep:
json.dump(auths, filep)
os.umask(oldmask)
except (IOError, OSError):
if oldmask is not None:
os.umask(oldmask)
return False
return True
def get(self, url):
"""Get credential from keystore for given url"""
auths = self._read_all()
try:
self.credential = auths[url]
return self.credential["auth"]
except KeyError:
pass
return ""
def put(self, url, credential, email):
"""Put credential in keystore for given url"""
if not credential:
return False
auths = self._read_all()
auths[url] = {"auth": credential, "email": email, }
self._shred()
return self._write_all(auths)
def delete(self, url):
"""Delete credential from keystore for given url"""
self._verify_keystore()
auths = self._read_all()
try:
del auths[url]
except KeyError:
return False
self._shred()
return self._write_all(auths)
def erase(self):
"""Delete all credentials from keystore"""
self._verify_keystore()
try:
self._shred()
os.unlink(self.keystore_file)
except (IOError, OSError):
return False
return True
class Msg(object):
"""Write messages to stdout and stderr. Allows to filter the
messages to be displayed through a verbose level, also allows
to control if child process that produce output through a
file descriptor should be redirected to /dev/null
"""
NIL = -1
ERR = 0
MSG = 1
WAR = 2
INF = 3
VER = 4
DBG = 5
DEF = INF
level = DEF
previous = DEF
nullfp = None
chlderr = sys.stderr
chldout = sys.stdout
chldnul = sys.stderr
def __init__(self, new_level=None):
"""
Initialize Msg level and /dev/null file pointers to be
used in subprocess calls to obfuscate output and errors
"""
if new_level is not None:
Msg.level = new_level
try:
if Msg.nullfp is None:
Msg.nullfp = open("/dev/null", "w")
except (IOError, OSError):
Msg.chlderr = sys.stderr
Msg.chldout = sys.stdout
Msg.chldnul = sys.stderr
else:
Msg.chlderr = Msg.nullfp
Msg.chldout = Msg.nullfp
Msg.chldnul = Msg.nullfp
def setlevel(self, new_level=None):
"""Define debug level"""
if new_level is None:
new_level = Msg.previous
else:
Msg.previous = Msg.level
Msg.level = new_level
if Msg.level >= Msg.DBG:
Msg.chlderr = sys.stderr
Msg.chldout = sys.stdout
else:
Msg.chlderr = Msg.nullfp
Msg.chldout = Msg.nullfp
return Msg.previous
def out(self, *args, **kwargs):
"""Write text to stdout respecting verbose level"""
level = Msg.MSG
if "l" in kwargs:
level = kwargs["l"]
if level <= Msg.level:
sys.stdout.write(" ".join([str(x) for x in args]) + '\n')
def err(self, *args, **kwargs):
"""Write text to stderr respecting verbose level"""
level = Msg.ERR
if "l" in kwargs:
level = kwargs["l"]
if level <= Msg.level:
sys.stderr.write(" ".join([str(x) for x in args]) + '\n')
class Unique(object):
"""Produce unique identifiers for container names, temporary
file names and other purposes. If module uuid does not exist
it tries to use as last option the random generator.
"""
def __init__(self):
self.string_set = "abcdef"
self.def_name = "udocker"
def _rnd(self, size):
"""Generate a random string"""
return "".join(
random.sample(self.string_set * 64 + string.digits * 64, size))
def uuid(self, name):
"""Get an ID"""
if not name:
name = self.def_name
try:
return str(uuid.uuid3(uuid.uuid4(), str(name) + str(time.time())))
except (NameError, AttributeError):
return(("%s-%s-%s-%s-%s") %
(self._rnd(8), self._rnd(4), self._rnd(4),
self._rnd(4), self._rnd(12)))
def imagename(self):
"""Get a container image name"""
return self._rnd(16)
def layer_v1(self):
"""Get a container layer name"""
return self._rnd(64)
def filename(self, filename):
"""Get a filename"""
prefix = self.def_name + "-" + str(os.getpid()) + "-"
try:
return(prefix +
str(uuid.uuid3(uuid.uuid4(), str(time.time()))) +
"-" + str(filename))
except (NameError, AttributeError):
return prefix + self.uuid(filename) + "-" + str(filename)
class ChkSUM(object):
"""Checksumming for files"""
def __init__(self):
try:
dummy = hashlib.sha256()
self._sha256_call = self._hashlib_sha256
except NameError:
self._sha256_call = self._openssl_sha256
def _hashlib_sha256(self, filename):
"""sha256 calculation using hashlib"""
hash_sha256 = hashlib.sha256()
try:
with open(filename, "rb") as filep:
for chunk in iter(lambda: filep.read(4096), b""):
hash_sha256.update(chunk)
return hash_sha256.hexdigest()
except (IOError, OSError):
return ""
def _openssl_sha256(self, filename):
"""sha256 calculation using openssl"""
cmd = "openssl dgst -hex -r -sha256 %s" % (filename)
output = Uprocess().get_output(cmd)
if output is None:
return ""
match = re.match("^(\\S+) ", output)
if match:
return match.group(1)
return ""
def sha256(self, filename):
"""
Call the actual sha256 implementation selected in __init__
"""
return self._sha256_call(filename)
class FileUtil(object):
"""Some utilities to manipulate files"""
tmptrash = dict()
safe_prefixes = []
orig_umask = None
def __init__(self, filename=None):
self._tmpdir = Config.tmpdir
if filename == "-":
self.filename = "-"
self.basename = "-"
return
try:
self.filename = os.path.abspath(filename)
self.basename = os.path.basename(self.filename)
except (AttributeError, TypeError):
self.filename = filename
self.basename = filename
self._register_prefix(self._tmpdir)
def _register_prefix(self, prefix):
"""Register directory prefixes where remove() is allowed"""
if prefix not in FileUtil.safe_prefixes:
filename = prefix
if os.path.isdir(filename) and not filename.endswith("/"):
FileUtil.safe_prefixes.append(filename + "/")
FileUtil.safe_prefixes.append(os.path.realpath(filename) + "/")
else:
FileUtil.safe_prefixes.append(filename)
FileUtil.safe_prefixes.append(os.path.realpath(filename))
def register_prefix(self):
"""Register self.filename as prefix where remove() is allowed"""
self._register_prefix(self.filename)
def umask(self, new_umask=None):
"""Set umask"""
if new_umask is not None:
try:
old_umask = os.umask(new_umask)
except (TypeError, ValueError):
return False
if FileUtil.orig_umask is None:
FileUtil.orig_umask = old_umask
else:
try:
os.umask(FileUtil.orig_umask)
except (TypeError, ValueError):
return False
return True
def mktmp(self):
"""Generate a temporary filename"""
while True:
tmp_file = self._tmpdir + "/" + \
Unique().filename(self.basename)
if not os.path.exists(tmp_file):
FileUtil.tmptrash[tmp_file] = True
self.filename = tmp_file
return tmp_file
def mkdir(self):
"""Create directory"""
try:
os.makedirs(self.filename)
except (OSError, IOError, AttributeError):
return False
return True
def mktmpdir(self):
"""Create temporary directory"""
dirname = self.mktmp()
if FileUtil(dirname).mkdir():
return dirname
return None
def uid(self):
"""Get the file owner user id"""
try:
return os.stat(self.filename).st_uid
except (IOError, OSError):
return -1
def _is_safe_prefix(self, filename):
"""Check if file prefix falls under valid prefixes"""
for safe_prefix in FileUtil.safe_prefixes:
if filename.startswith(safe_prefix):
return True
return False
def remove(self, force=False):
"""Delete files or directories"""
if not os.path.exists(self.filename):
pass
elif self.filename.count("/") < 2:
Msg().err("Error: delete pathname too short: ", self.filename)
return False
elif self.uid() != Config.uid:
Msg().err("Error: delete not owner: ", self.filename)
return False
elif (not force) and (not self._is_safe_prefix(self.filename)):
Msg().err("Error: delete outside of directory tree: ",
self.filename)
return False
elif os.path.isfile(self.filename) or os.path.islink(self.filename):
try:
os.remove(self.filename)
except (IOError, OSError):
Msg().err("Error: deleting file: ", self.filename)
return False
elif os.path.isdir(self.filename):
cmd = "/bin/rm -Rf %s || /bin/chmod -R u+w %s && /bin/rm -Rf %s" % \
(self.filename, self.filename, self.filename)
if subprocess.call(cmd, stderr=Msg.chlderr, shell=True,
close_fds=True, env=None):
Msg().err("Error: deleting directory: ", self.filename)
return False
if self.filename in dict(FileUtil.tmptrash):
del FileUtil.tmptrash[self.filename]
return True
def verify_tar(self):
"""Verify a tar file"""
if not os.path.isfile(self.filename):
return False
else:
cmd = "tar t"
if Msg.level >= Msg.VER:
cmd += "v"
cmd += "f " + self.filename
if subprocess.call(cmd, shell=True, stderr=Msg.chlderr,
stdout=Msg.chldnul, close_fds=True):
return False
return True
def cleanup(self):
"""Delete all temporary files"""
tmptrash_copy = dict(FileUtil.tmptrash)
for filename in tmptrash_copy:
FileUtil(filename).remove()
def isdir(self):
"""Is filename a directory"""
try:
if os.path.isdir(self.filename):
return True
except (IOError, OSError, TypeError):
pass
return False
def size(self):
"""File size in bytes"""
try:
fstat = os.stat(self.filename)
return fstat.st_size
| |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.1'
# jupytext_version: 0.8.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# language_info:
# codemirror_mode:
# name: ipython
# version: 3
# file_extension: .py
# mimetype: text/x-python
# name: python
# nbconvert_exporter: python
# pygments_lexer: ipython3
# version: 3.6.7
# varInspector:
# cols:
# lenName: 16
# lenType: 16
# lenVar: 40
# kernels_config:
# python:
# delete_cmd_postfix: ''
# delete_cmd_prefix: 'del '
# library: var_list.py
# varRefreshCmd: print(var_dic_list())
# r:
# delete_cmd_postfix: ') '
# delete_cmd_prefix: rm(
# library: var_list.r
# varRefreshCmd: 'cat(var_dic_list()) '
# types_to_exclude:
# - module
# - function
# - builtin_function_or_method
# - instance
# - _Feature
# window_display: false
# ---
# %% [markdown]
# # [<NAME> (1998)](https://www.journals.uchicago.edu/doi/pdf/10.1086/250034)
#
# - Original version by <NAME>
# - Comments and extensions by <NAME>
# - Further edits by <NAME>
# %% [markdown]
# [](https://mybinder.org/v2/gh/econ-ark/DemARK/master?filepath=notebooks%2FKrusellSmith.ipynb)
#
# %% [markdown]
# ### Overview
#
# The benchmark Krusell-Smith model has the following broad features:
# * The aggregate state switches between "good" and "bad" with known probabilities
# * All consumers experience the same aggregate state for the economy (good or bad)
# * _ex ante_ there is only one type of consumer, which is infinitely lived
# * _ex post_ heterogeneity arises from uninsurable idiosyncratic income shocks
# * Specifically, individuals are at risk of spells of unemployment
# * In a spell of unemployment, their income is zero
#
# Thus, each agent faces two types of uncertainty: About their employment state, and about the income they will earn when employed. And the values of income and unemployment risk depend on the aggregate state.
#
# %% [markdown]
# ### Details
#
# #### Idiosyncratic
# Each agent _attempts_ to supply an amount of productive labor $\ell$ in each period. (Here and below we mostly follow the notation of Krusell and Smith (1998)).
#
# However, whether they _succeed_ in supplying that labor (and earning a corresponding wage) is governed by the realization of the stochastic variable $\epsilon$. If the agent is unlucky, $\epsilon$ is zero and the agent is unemployed. The amount of labor they succeed in supplying is thus $\epsilon\ell$.
#
# #### Aggregate
# Aggregate output ($\bar{y}$) is produced using a Cobb-Douglas production function using capital and labor. (Bars over variables indicate the aggregate value of a variable that has different values across different idiosyncratic consumers).
#
# $z$ denotes the aggregate shock to productivity. $z$ can take two values, either $z_g$ -- the "good" state, or $z_b < z_g$ -- the "bad" state. Consumers gain income from providing labor, and from the rental return on any capital they own. Labor and capital markets are perfectly efficient so both factors are both paid their marginal products.
#
# The agent can choose to save by buying capital $k$ which is bounded below at the borrowing constraint of 0.
#
#
# Putting all of this together, aggregate output is given by:
# \begin{eqnarray}
# \bar{y} & = & z\bar{k}^\alpha \bar{\ell}^{1-\alpha}
# \end{eqnarray}
#
# %% [markdown]
# The aggregate shocks $z$ follow first-order Markov chains with the transition probability of moving from state $s$ to state $s'$ denoted by $\pi_{ss'}$. The aggregate shocks and individual shocks are correlated: The probability of being unemployed is higher in bad times, when aggregate productivity is low, than in good times, when aggregate productivity is high.
#
# #### Idiosyncratic and Aggregate Together
#
# The individual shocks satisfy the law of large numbers, and the model is constructed so that the number of agents who are unemployed in the good state always equals $u_g$, and is always $u_b$ in the bad state. Given the aggregate state, individual shocks are independent from each other.
#
# For the individual, the probability of moving between a good state and employment to a bad state and unemployment is denoted $\pi_{gb10}$ with similar notation for the other transition probabilities.
#
# (Krusell and Smith allow for serially correlated unemployment at the idiosyncratic level. Here we will simplify this and have unemployment be serially uncorrelated.)
# %% [markdown]
# Finally, $\Gamma$ denotes the current distribution of consumers over capital and employment status, and $H$ denotes the law of motion of this distribution.
# %% [markdown]
# #### The Idiosyncratic Individual's Problem Given the Aggregate State
#
# The individual's problem is:
# \begin{eqnarray*}
# V(k, \epsilon; \Gamma, z) &=& \max_{k'}\{U(c) + \beta \mathbb{E}[V(k' ,\epsilon'; \Gamma', z')|z, \epsilon]\} \\
# c + k' &=& r(\bar{k}, \bar{\ell}, z)k + w(\bar{k}, \bar{\ell}, z)\ell\epsilon + (1-\delta)k \\
# \Gamma' &=& H(\Gamma, z, z') \\
# k' &\geq& 0 \\
# \end{eqnarray*}
# %% [markdown]
# Krusell and Smith define an equilibrium as a law of motion $H$, a value function $V$, a rule for updating capital $f$ and pricing functions $r$ and $w$, such that $V$ and $f$ solve the consumers problem, $r$ and $w$ denote the marginal products of capital and labour, and $H$ is consistent with $f$ (i.e. if we add up all of the individual agents capital choices we get the correct distribution of capital).
# %% [markdown]
# ##### Discussion of the KS Algorithm
#
# In principle, $\Gamma$ is a high-dimensional object because it includes the whole distribution of individuals' wealth in the economy. Because the optimal amount to save is a nonlinear function of the level of idiosyncratic $k$, next period's aggregate capital stock $\bar{k}'$ depends on the distribution of the holdings of idiosyncratic $k$ across the population of consumers. Therefore the law of motion $H$ is not a trivial function of the $\Gamma$.
#
# KS simplified this problem by noting the following.
#
# 1. The agent cares about the future aggregate aggregate state only insofar as that state affects their own personal value of $c$
# 1. Future values of $c$ depend on the aggregate state only through the budget constraint
# 1. The channels by which the budget constraint depends on the aggregate state are:
# * The probability distributions of $\epsilon$ and $z$ are affected by the aggregate state
# * Interest rates and wages depend on the future values of $\bar{k}$ and $\bar{\ell}$
# 1. The probability distributions for the future values of $\{\epsilon, z\}$ are known
# * They are fully determined by the Markov transition matrices
# 1. But the values of $r$ and $w$ are both determined by the future value of $\bar{k}$ (in combination with the exogenous value of $\bar{\ell}$)
# * So the only _endogenous_ object that the agent needs to form expectations about, in order to have a complete rational expectation about everything affecting them, is $\bar{k}'$
#
# The key result in Krusell and Smith is the discovery that a very simple linear rule does an extraordinarily good job (though not quite perfect) in forecasting $\bar{k'}$
#
# They then argue that, since rationality is surely bounded to some degree, the solution that an agent obtains using a good forecasting rule for $\bar{k}'$ is "good enough" to compute an "approximate" solution to the consumer's optimization problem.
#
# They define a generic algorithm to find a forecasting rule for $\bar{k}$ as follows
#
# 1. Choose the number of moments $n$ of the distribution of $k$ to be included in the set of variables to forecast $\bar{k}'$. In the simplest case, $n=1$, the only forecasting variable for next period's $\bar{k}'$ is the mean (the first moment, $n=1$)) of current capital, $\bar{k}$.
# 2. Each individual adopts the same belief about the law motion of these moments, $H_I$ and finds the optimal decision policy, $f_I$, contingent on that guess.
# 3. Use the optimal policy to simulate a history of aggregate capital with a large number of agents.
# 4. Characterize the realized law of motion using the same number of moments $n$
# 5. Compare it with the $H_I$, what is taken as given by individuals.
# 6. Iterate until the two converge.
#
# In the end, the solution to the original problem is well approximated by the following simplified problem:
#
# \begin{eqnarray*}
# V(k, \epsilon; \bar k, z) &=& max_{c, k'}\{U(c) + \beta E[V(k' ,\epsilon'; \bar k', z')|z, \epsilon]\} \\
# c + k' &=& r(\bar{k}, \bar{\ell}, z)k + w(\bar{k}, \bar{\ell}, z)l\epsilon + (1-\delta)k \\
# \text{When }~ z=z_g, \quad \mathbb{E}[\log\bar{k}'] & = & a_0 + a_1 \log\bar k \\
# \text{When }~ z=z_b, | |
(0x5319, 0), # East Asian ideograph
0x6F576F: (0xC904, 0), # Korean hangul
0x6F532C: (0xC12D, 0), # Korean hangul
0x23356F: (0x8C74, 0), # East Asian ideograph
0x6F4B7B: (0xB11E, 0), # Korean hangul
0x215B2A: (0x8E91, 0), # East Asian ideograph
0x6F4F6F: (0xB9DB, 0), # Korean hangul
0x21344D: (0x5321, 0), # East Asian ideograph
0x22344E: (0x64A2, 0), # East Asian ideograph
0x23344F: (0x8B3F, 0), # East Asian ideograph
0x2E7450: (0x7F82, 0), # East Asian ideograph
0x213451: (0x532F, 0), # East Asian ideograph
0x335230: (
0x7F6E,
0,
), # East Asian ideograph (variant of 215230 which maps to 7F6E)
0x4B3668: (0x5358, 0), # East Asian ideograph
0x234553: (0x9356, 0), # East Asian ideograph
0x21725D: (0x5620, 0), # East Asian ideograph
0x27554F: (0x53F6, 0), # East Asian ideograph
0x213453: (0x5339, 0), # East Asian ideograph
0x223454: (0x6490, 0), # East Asian ideograph
0x213455: (0x5340, 0), # East Asian ideograph
0x215F6F: (0x9748, 0), # East Asian ideograph
0x215B2C: (0x8EAA, 0), # East Asian ideograph
0x234554: (0x9371, 0), # East Asian ideograph
0x6F5028: (0xBA4D, 0), # Korean hangul
0x283457: (0x63B8, 0), # East Asian ideograph
0x274B2D: (0x736D, 0), # East Asian ideograph
0x2D3458: (0x4EDF, 0), # East Asian ideograph
0x6F4C65: (0xB2D0, 0), # Korean hangul
0x284934: (0x6D43, 0), # East Asian ideograph
0x233459: (0x8B59, 0), # East Asian ideograph
0x6F5772: (0xC90D, 0), # Korean hangul
0x233A21: (0x8E30, 0), # East Asian ideograph
0x213A22: (0x5A49, 0), # East Asian ideograph
0x21345B: (0x5347, 0), # East Asian ideograph
0x233A24: (0x8E47, 0), # East Asian ideograph
0x213A25: (0x5A4A, 0), # East Asian ideograph
0x233A26: (0x8E46, 0), # East Asian ideograph
0x273A27: (0x5987, 0), # East Asian ideograph
0x273A28: (0x5A04, 0), # East Asian ideograph
0x213A29: (0x5A3C, 0), # East Asian ideograph
0x213A2A: (0x5A62, 0), # East Asian ideograph
0x213A2B: (0x5A5A, 0), # East Asian ideograph
0x213A2C: (0x5A77, 0), # East Asian ideograph
0x213A2D: (0x5A9A, 0), # East Asian ideograph
0x233A2E: (0x8E4C, 0), # East Asian ideograph
0x213A2F: (0x5A7F, 0), # East Asian ideograph
0x223A30: (0x670F, 0), # East Asian ideograph
0x224767: (0x6CAC, 0), # East Asian ideograph
0x233A32: (0x8E4F, 0), # East Asian ideograph
0x223A33: (0x6712, 0), # East Asian ideograph
0x223A34: (0x6713, 0), # East Asian ideograph
0x233A35: (0x8E62, 0), # East Asian ideograph
0x233A36: (0x8E60, 0), # East Asian ideograph
0x213A37: (0x5AB2, 0), # East Asian ideograph
0x223A38: (0x6719, 0), # East Asian ideograph
0x223A39: (0x6718, 0), # East Asian ideograph
0x233A3A: (0x8E54, 0), # East Asian ideograph
0x273A3B: (0x59AA, 0), # East Asian ideograph
0x213A3C: (0x5AD6, 0), # East Asian ideograph
0x213A3D: (0x5AE3, 0), # East Asian ideograph
0x233A3E: (0x8E5A, 0), # East Asian ideograph
0x233A3F: (0x8E5E, 0), # East Asian ideograph
0x233A40: (0x8E55, 0), # East Asian ideograph
0x273A41: (0x5A34, 0), # East Asian ideograph
0x213A42: (0x5B09, 0), # East Asian ideograph
0x273A43: (0x5A75, 0), # East Asian ideograph
0x273A44: (0x5A07, 0), # East Asian ideograph
0x273A45: (0x59A9, 0), # East Asian ideograph
0x233A46: (0x8E95, 0), # East Asian ideograph
0x223A47: (0x6723, 0), # East Asian ideograph
0x233A48: (0x8E6D, 0), # East Asian ideograph
0x213A49: (0x5B24, 0), # East Asian ideograph
0x273A4A: (0x5A74, 0), # East Asian ideograph
0x273A4B: (0x5A76, 0), # East Asian ideograph
0x223A4C: (0x673E, 0), # East Asian ideograph
0x213462: (0x5351, 0), # East Asian ideograph
0x223A4E: (0x673F, 0), # East Asian ideograph
0x213A4F: (0x5B53, 0), # East Asian ideograph
0x213A50: (0x5B54, 0), # East Asian ideograph
0x213A51: (0x5B55, 0), # East Asian ideograph
0x213A52: (0x5B57, 0), # East Asian ideograph
0x213A53: (0x5B58, 0), # East Asian ideograph
0x213A54: (0x5B5D, 0), # East Asian ideograph
0x213A55: (0x5B5C, 0), # East Asian ideograph
0x233A57: (0x8E8B, 0), # East Asian ideograph
0x223A58: (0x6757, 0), # East Asian ideograph
0x213A59: (0x5B64, 0), # East Asian ideograph
0x213A5A: (0x5B69, 0), # East Asian ideograph
0x273A5B: (0x5B59, 0), # East Asian ideograph
0x223A5C: (0x6747, 0), # East Asian ideograph
0x213A5D: (0x5B73, 0), # East Asian ideograph
0x233A5E: (0x8E9A, 0), # East Asian ideograph
0x273A5F: (0x5B5A, 0), # East Asian ideograph
0x273A60: (0x5B66, 0), # East Asian ideograph
0x223A61: (0x6755, 0), # East Asian ideograph
0x213A62: (0x5B7D, 0), # East Asian ideograph
0x233A63: (0x8E98, 0), # East Asian ideograph
0x233A64: (0x8E9E, 0), # East Asian ideograph
0x223466: (0x64B3, 0), # East Asian ideograph
0x223A66: (0x674C, 0), # East Asian ideograph
0x223A67: (0x6759, 0), # East Asian ideograph
0x223A68: (0x6748, 0), # East Asian ideograph
0x213A69: (0x5B8C, 0), # East Asian ideograph
0x275553: (0x8364, 0), # East Asian ideograph
0x233A6B: (0x8EA5, 0), # East Asian ideograph
0x213A6C: (0x5B97, 0), # East Asian ideograph
0x213A6D: (0x5B9A, 0), # East Asian ideograph
0x213A6E: (0x5B9C, 0), # East Asian ideograph
0x233A6F: (0x8EA7, 0), # East Asian ideograph
0x213A70: (0x5B99, 0), # East Asian ideograph
0x223A71: (0x674A, 0), # East Asian ideograph
0x233A72: (0x8E99, 0), # East Asian ideograph
0x213A73: (0x5BA3, 0), # East Asian ideograph
0x213A74: (0x5BA6, 0), # East Asian ideograph
0x213A75: (0x5BA4, 0), # East Asian ideograph
0x213A76: (0x5BA2, 0), # East Asian ideograph
0x213A77: (0x5BB0, 0), # East Asian ideograph
0x213A78: (0x5BB8, 0), # East Asian ideograph
0x233A7A: (0x8EBC, 0), # East Asian ideograph
0x213A7B: (0x5BB4, 0), # East Asian ideograph
0x223A7C: (0x6785, 0), # East Asian ideograph
0x213A7D: (0x5BB9, 0), # East Asian ideograph
0x213A7E: (0x5BB3, 0), # East Asian ideograph
0x23233F: (0x844A, 0), # East Asian ideograph
0x4B763D: (
0x57F4,
0,
), # East Asian ideograph (variant of 21763D which maps to 57F4)
0x22746B: (0x7F7E, 0), # East Asian ideograph
0x283B7D: (0x53F0, 0), # East Asian ideograph (duplicate simplified)
0x22346C: (0x64D3, 0), # East Asian ideograph
0x6F5927: (0xCC39, 0), # Korean hangul
0x393B39: (0x5BF3, 0), # East Asian ideograph
0x213F26: (0x614C, 0), # East Asian ideograph
0x235222: (
0x9957,
0,
), # East Asian ideograph (variant of 475222 which maps to 9957)
0x2D346E: (0x5373, 0), # East Asian ideograph
0x276232: (0x9E23, 0), # East Asian ideograph
0x6F5333: (0xC13C, 0), # Korean hangul
0x213D5B: (0x5F79, 0), # East Asian ideograph
0x213471: (0x5378, 0), # East Asian ideograph
0x287472: (0x7F74, 0), # East Asian ideograph
0x23344D: (0x8B56, 0), # East Asian ideograph
0x335223: (0x7E8E, 0), # East Asian ideograph
0x233473: (0x8B45, 0), # East Asian ideograph
0x273F3F: (0x51ED, 0), # East Asian ideograph
0x213474: (0x537F, 0), # East Asian ideograph
0x213475: (0x5384, 0), # East Asian ideograph
0x21325D: (0x50F9, 0), # East Asian ideograph
0x225F21: (0x75F9, 0), # East Asian ideograph
0x217477: (0x576D, 0), # East Asian ideograph
0x225F22: (0x75FC, 0), # East Asian ideograph
0x23456F: (0x9364, 0), # East Asian ideograph
0x275F23: (0x9648, 0), # East Asian ideograph
0x213479: (0x53A5, 0), # East Asian ideograph
0x275F24: (0x9646, 0), # East Asian ideograph
0x22747A: (0x7F91, 0), # East Asian ideograph
0x21347B: (0x53B2, 0), # East Asian ideograph
0x21392F: (0x5954, 0), # East Asian ideograph
0x225269: (0x7150, 0), # East Asian ideograph
0x4B4835: (0x6DA3, 0), # East Asian ideograph
0x21347D: (0x53C3, 0), # East Asian ideograph
0x2D5F28: (0x9665, 0), # East Asian ideograph
0x6F5336: (0xC149, 0), # Korean hangul
0x6F5329: (0xC127, 0), # Korean hangul
0x225F29: (0x7616, 0), # East Asian ideograph
0x275F2A: (0x9634, 0), # East Asian ideograph
0x275F2B: (0x961F, 0), # East Asian ideograph
0x216C41: (0x52D1, 0), # East Asian ideograph
0x225F2C: (0x7608, 0), # East Asian ideograph
0x6F577A: (0xC958, 0), # Korean hangul
0x225F2D: (0x7615, 0), # East Asian ideograph
0x295731: (0x9C8B, 0), # East Asian ideograph
0x276222: (0x9CC5, 0), # East Asian ideograph
0x225F2E: (0x760C, 0), # East Asian ideograph
0x23455D: (0x9349, 0), # East Asian ideograph
0x6F5567: (0xC5FE, 0), # Korean hangul
0x215F2F: (0x9685, 0), # East Asian ideograph
0x273340: (0x51BB, 0), # East Asian ideograph
0x223B21: (0x677B, 0), # East Asian ideograph
0x223B22: (0x6792, 0), # East Asian ideograph
0x223B23: (0x6776, 0), # East Asian ideograph
0x213B24: (0x5BC4, 0), # East Asian ideograph
0x223B25: (0x6791, 0), # East Asian ideograph
0x223B26: (0x6799, 0), # East | |
<reponame>LB-KatarzynaDylska/o3de
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
# Shape components associated with specific light types.
LIGHT_SHAPES = {
'sphere': 'Sphere Shape',
'spot_disk': 'Disk Shape',
'capsule': 'Capsule Shape',
'quad': 'Quad Shape',
'polygon': 'Polygon Prism Shape',
}
class Tests:
light_creation = (
"Light Entity successfully created",
"P0: Light Entity failed to be created")
light_component_removal = (
"Light component successfully removed",
"P1: Light component failed to be removed")
light_component = (
"Entity has a Light component",
"P0: Entity failed to find Light component")
removal_undo = (
"UNDO Light component removal success",
"P0: UNDO Light component removal failed")
edit_light_color = (
f"Light color updated",
f"P1: Light color failed to update")
edit_intensity_value = (
f"Intensity updated",
f"P1: Intensity failed to update")
edit_attenuation_radius = (
f"Attenuation radius Radius updated",
f"P1: Attenuation radius Radius failed to update")
enable_shadows = (
f"Shadows enabled",
f"P1: Shadows failed to be enabled")
disable_shadows = (
f"Shadows disabled",
f"P1: Shadows failed to be disabled")
edit_shadow_bias = (
f"Shadow Bias updated",
f"P1: Shadow Bias failed to be updated")
edit_normal_bias = (
f"Normal shadow bias updated",
f"P1: Normal shadow bias failed to update")
edit_filtering_sample_count = (
f"Filtering sample count updated",
f"P1: Filtering sample count failed to update")
edit_esm_exponent = (
f"ESM exponent updated",
f"P1: ESM exponent failed to update")
edit_inner_angle = (
f"Shutters Inner angle updated",
f"P1: Inner angle failed to update")
edit_outer_angle = (
f"Shutters Outer angle updated",
f"P1: Outer angle failed to update")
disable_shutters = (
f"Shutters disabled",
f"P1: Shutters failed to be disabled")
enable_shutters = (
f"Shutters enabled",
f"P1: Shutters failed to be enabled")
enable_both_directions = (
f"Both directions enabled",
f"P1: Both directions failed to be enabled")
disable_both_directions = (
f"Both directions disabled",
f"P1: Both directions failed to be disabled")
enable_fast_approximation = (
f"Fast approximation enabled",
f"P1: Fast approximation failed to be enabled")
disable_fast_approximation = (
f"Fast approximation disabled",
f"P1: Fast approximation failed to be disabled")
enter_game_mode = (
"Entered game mode",
"P0: Failed to enter game mode")
exit_game_mode = (
"Exited game mode",
"P0: Couldn't exit game mode")
is_hidden = (
"Entity is hidden",
"P0: Entity was not hidden")
is_visible = (
"Entity is visible",
"P0: Entity was not visible")
entity_deleted = (
"Entity deleted",
"P0: Entity was not deleted")
deletion_undo = (
"UNDO deletion success",
"P0: UNDO deletion failed")
deletion_redo = (
"REDO deletion success",
"P0: REDO deletion failed")
def AtomEditorComponents_Light_AddedToEntity():
"""
Summary:
Tests the Light component can be added to an entity and has the expected functionality.
Test setup:
- Wait for Editor idle loop.
- Open the "Base" level.
Expected Behavior:
The component can be added, used in game mode, hidden/shown, deleted, all components can be manipulated,
and has accurate required components.
Creation and deletion undo/redo should also work.
Test Steps:
1) Create a Light entity with no components.
2) Add Light component to the Light entity.
3) Remove existing Light component on the entity.
4) UNDO the light component removal.
5) Set the light type.
6) Check for Shape component.
7) Edit the Color parameter.
8) Set the Intensity mode parameter.
9) Edit the Intensity parameter.
10) Set the Attenuation radius Mode.
11) Edit the Attenuation radius Radius parameter (explicit only).
12) Enable shadows (if applicable).
13) Edit the Shadows Bias parameter.
14) Edit the Normal shadow bias parameter.
15) Set the Shadowmap size.
16) Set the Shadow filter method.
17) Edit the Filtering sample count parameter.
18) Edit the ESM Exponent parameter.
19) Disable Shadows (re-enabled after test for game mode verification).
20) Edit the Inner angle parameter.
21) Edit the Outer angle parameter.
22) Disable Shutters.
23) Enable Shutters.
24) Enable Both directions.
25) Disable Both directions (re-enabled after test for game mode verification).
26) Enable Fast approximation.
27) Disable Fast approximation.
28) Enter/Exit game mode.
29) Test IsHidden.
30) Test IsVisible.
REPEAT tests 2-30 for all applicable light types.
31) Delete Light entity.
32) UNDO deletion.
33) REDO deletion.
34) Look for errors.
:return: None
"""
import azlmbr.legacy.general as general
import azlmbr.math as math
from editor_python_test_tools.editor_entity_utils import EditorEntity
from editor_python_test_tools.utils import Report, Tracer, TestHelper
from Atom.atom_utils.atom_constants import (AtomComponentProperties, LIGHT_TYPES, INTENSITY_MODE,
ATTENUATION_RADIUS_MODE, SHADOWMAP_SIZE, SHADOW_FILTER_METHOD)
with Tracer() as error_tracer:
# Test setup begins.
# Setup: Wait for Editor idle loop before executing Python hydra scripts then open "Base" level.
TestHelper.init_idle()
TestHelper.open_level("Graphics", "base_empty")
# Test steps begin.
# 1. Create a Light entity with no components.
light_entity = EditorEntity.create_editor_entity(AtomComponentProperties.light())
Report.critical_result(Tests.light_creation, light_entity.exists())
# 2. Add a Light component to the Light entity.
light_component = light_entity.add_component(AtomComponentProperties.light())
Report.critical_result(Tests.light_component,
light_entity.has_component(AtomComponentProperties.light()))
# 3. Remove the light component.
light_component.remove()
general.idle_wait_frames(1)
Report.critical_result(Tests.light_component_removal,
not light_entity.has_component(AtomComponentProperties.light()))
# 4. Undo the Light component removal.
general.undo()
general.idle_wait_frames(1)
Report.result(Tests.removal_undo,
light_entity.has_component(AtomComponentProperties.light()))
# Copy LIGHT_TYPES to pop the 'unknown' key to prevent it from being run in this test.
light_types_copy = LIGHT_TYPES.copy()
light_types_copy.pop('unknown')
# Cycle through light types to test component properties.
for light_type in light_types_copy.keys():
# Remove the Light component to begin loop with a clean component.
light_component.remove()
general.idle_wait_frames(1)
Report.critical_result(Tests.light_component_removal,
not light_entity.has_component(AtomComponentProperties.light()))
# Add a new Light component to begin parameter tests.
light_component = light_entity.add_component(AtomComponentProperties.light())
# 5. Set light type.
light_component.set_component_property_value(
AtomComponentProperties.light('Light type'), LIGHT_TYPES[light_type])
general.idle_wait_frames(1)
test_light_type = (
f"Set light type: {light_type.upper()}",
f"P0: Light component failed to set {light_type.upper()} type")
Report.result(test_light_type, light_component.get_component_property_value(
AtomComponentProperties.light('Light type')) == LIGHT_TYPES[light_type])
# 6. Check for Shape component.
if LIGHT_TYPES[light_type] in (LIGHT_TYPES['sphere'], LIGHT_TYPES['spot_disk'], LIGHT_TYPES['capsule'],
LIGHT_TYPES['quad'], LIGHT_TYPES['polygon']):
light_shape = LIGHT_SHAPES[light_type]
test_light_shape = (
f"{light_shape} present",
f"P1: {light_shape} was not found")
Report.result(test_light_shape, light_entity.has_component(light_shape))
# 7. Edit Color parameter.
color_value = math.Color(1.0, 0.0, 0.0, 1.0)
light_component.set_component_property_value(AtomComponentProperties.light('Color'), color_value)
general.idle_wait_frames(1)
light_color = light_component.get_component_property_value(
AtomComponentProperties.light('Color'))
Report.result(Tests.edit_light_color, light_color.IsClose(color_value))
# 8. Set Intensity mode.
# if LIGHT_TYPES[light_type] not in (LIGHT_TYPES['simple_point'], LIGHT_TYPES['simple_spot']):
# for intensity_mode in INTENSITY_MODE.keys():
# light_component.set_component_property_value(
# AtomComponentProperties.light('Intensity mode'), INTENSITY_MODE[intensity_mode])
# general.idle_wait_frames(1)
# test_intensity_mode = (
# f"Intensity mode set to {intensity_mode}",
# f"P1: Intensity mode failed to be set to {intensity_mode}")
# Report.result(test_intensity_mode, light_component.get_component_property_value(
# AtomComponentProperties.light('Intensity mode')) == INTENSITY_MODE[intensity_mode])
# 9. Edit the Intensity parameter.
light_component.set_component_property_value(AtomComponentProperties.light('Intensity'), 1000)
general.idle_wait_frames(1)
Report.result(Tests.edit_intensity_value,
light_component.get_component_property_value(
AtomComponentProperties.light('Intensity')) == 1000)
# 10. Set the Attenuation radius Mode:
for radius_mode in ATTENUATION_RADIUS_MODE.keys():
light_component.set_component_property_value(
AtomComponentProperties.light('Attenuation radius Mode'), ATTENUATION_RADIUS_MODE[radius_mode])
general.idle_wait_frames(1)
test_attenuation_mode = (
f"Attenuation radius Mode set to {radius_mode}",
f"P1: Attenuation radius Mode failed to be set to {radius_mode}")
Report.result(test_attenuation_mode, light_component.get_component_property_value(
AtomComponentProperties.light('Attenuation radius Mode')) == ATTENUATION_RADIUS_MODE[radius_mode])
# 11. Edit the Attenuation radius Radius parameter (explicit only).
if ATTENUATION_RADIUS_MODE[radius_mode] == ATTENUATION_RADIUS_MODE['explicit']:
light_component.set_component_property_value(
AtomComponentProperties.light('Attenuation radius Radius'), 1000)
general.idle_wait_frames(1)
Report.result(Tests.edit_attenuation_radius,
light_component.get_component_property_value(
AtomComponentProperties.light('Attenuation radius Radius')) == 1000)
# Shadow tests for applicable light types:
if LIGHT_TYPES[light_type] in (LIGHT_TYPES['sphere'], LIGHT_TYPES['spot_disk']):
# 12. Enable Shadows:
light_component.set_component_property_value(
AtomComponentProperties.light('Enable shadow'), True)
general.idle_wait_frames(1)
Report.result(
Tests.enable_shadows,
light_component.get_component_property_value(
AtomComponentProperties.light('Enable shadow')) is True)
# 13. Edit the Shadows Bias parameter.
light_component.set_component_property_value(
AtomComponentProperties.light('Shadows Bias'), 100)
general.idle_wait_frames(1)
Report.result(
Tests.edit_shadow_bias,
light_component.get_component_property_value(
AtomComponentProperties.light('Shadows Bias')) == 100)
# 14. Edit the Normal shadow bias parameter.
light_component.set_component_property_value(
AtomComponentProperties.light('Normal shadow bias'), 10)
general.idle_wait_frames(1)
Report.result(
Tests.edit_normal_bias,
light_component.get_component_property_value(
AtomComponentProperties.light('Normal shadow bias')) == 10)
# 15. Set the Shadowmap size.
# for shadowmap_size in SHADOWMAP_SIZE.keys():
# light_component.set_component_property_value(
# AtomComponentProperties.light('Shadowmap size'), SHADOWMAP_SIZE[shadowmap_size])
# general.idle_wait_frames(1)
# test_shadowmap_size = (
# f"Shadowmap size set to {shadowmap_size}.",
# f"P1: Shadowmap size failed to be set to {shadowmap_size}.")
# Report.result(test_shadowmap_size, light_component.get_component_property_value(
# AtomComponentProperties.light('Shadowmap size')) == SHADOWMAP_SIZE[shadowmap_size])
# Shadow filter method tests.
# 16. Set the Shadow filter method.
for filter_method in SHADOW_FILTER_METHOD.keys():
light_component.set_component_property_value(
AtomComponentProperties.light('Shadow filter method'), SHADOW_FILTER_METHOD[filter_method])
general.idle_wait_frames(1)
test_shadow_filter_method = (
f"Shadow filter method set to {filter_method}",
f"P1: Shadow filter method set to {filter_method}")
Report.result(test_shadow_filter_method, light_component.get_component_property_value(
AtomComponentProperties.light('Shadow filter method')) == SHADOW_FILTER_METHOD[filter_method])
# 17. Edit the Filtering sample count parameter.
if SHADOW_FILTER_METHOD[filter_method] in (SHADOW_FILTER_METHOD['PCF'],
SHADOW_FILTER_METHOD['PCF+ESM']):
light_component.set_component_property_value(
AtomComponentProperties.light('Filtering sample count'), 64)
general.idle_wait_frames(1)
Report.result(Tests.edit_filtering_sample_count,
light_component.get_component_property_value(
AtomComponentProperties.light('Filtering sample count')) == 64)
# 18. Edit the ESM Exponent parameter.
if SHADOW_FILTER_METHOD[filter_method] in (SHADOW_FILTER_METHOD['ESM'],
SHADOW_FILTER_METHOD['PCF+ESM']):
light_component.set_component_property_value(
AtomComponentProperties.light('ESM exponent'), 5000)
general.idle_wait_frames(1)
Report.result(Tests.edit_esm_exponent,
light_component.get_component_property_value(
AtomComponentProperties.light('ESM exponent')) == 5000)
# 19. Disable Shadows (re-enabled after test for game mode verification):
light_component.set_component_property_value(
AtomComponentProperties.light('Enable shadow'), False)
general.idle_wait_frames(1)
Report.result(
Tests.disable_shadows,
light_component.get_component_property_value(
AtomComponentProperties.light('Enable shadow')) is | |
<gh_stars>0
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
###############################################################################
# Helper Functions
###############################################################################
class Identity(nn.Module):
def forward(self, x):
return x
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
def norm_layer(x): return Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
init_weights(net, init_type, init_gain=init_gain)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from <NAME>'s neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you can specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
return init_net(net, init_type, init_gain, gpu_ids)
##############################################################################
# Classes
##############################################################################
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
# self.loss = nn.MSELoss(reduction = "none") ################################################
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
# print(prediction.shape)
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
| |
or 0),
'cig_url': contact_group.get_absolute_url()
+ 'members/'
+ str(contact_with_extra_fields.id),
'title': _('{contact} in group {group}').format(
contact=contact_with_extra_fields,
group=contact_group),
'msg_count': msg_count,
'msg_count_unread': msg_count_unread,
})
def membership_extended_widget_factory(request, contact_group):
return lambda contact_with_extra_fields: \
membership_extended_widget(
request, contact_with_extra_fields, contact_group)
def field_widget(contact_field, contact_with_extra_fields):
attrib_name = DISP_FIELD_PREFIX + str(contact_field.id)
raw_value = getattr(contact_with_extra_fields, attrib_name)
if raw_value:
html_value = contact_field.format_value_html(raw_value)
return mark_safe(html_value)
else:
try:
default_html_func = getattr(contact_field, 'default_value_html')
html_value = default_html_func()
return mark_safe(html_value)
except AttributeError:
return ''
def field_widget_factory(contact_field):
return lambda contact_with_extra_fields: \
field_widget(contact_field, contact_with_extra_fields)
class CustomColumnsFilter(filters.ListFilter):
'''
This is not really a filter. This acutally adds columns to the query.
'''
title = ugettext_lazy('Change columns')
template = 'choose_columns.html'
def __init__(self, request, params, model, view):
super().__init__(request, params, model, view)
params.pop('fields', None)
params.pop('savecolumns', None)
def has_output(self):
return True # This is required so that queryset is called
def choices(self, cl):
# This is an ugly hack to recover all the non-fields django-filters, to
# build the select column base return url
# We do it here because we need the cl.
return cl.get_query_string({}, ['fields', 'savecolumns']),
def queryset(self, request, q):
return q
def expected_parameters(self):
return ['fields']
class BaseContactListView(NgwListView):
'''
Base view for contact list.
That view should NOT be called directly since there is no user check.
'''
template_name = 'contact_list.html'
list_filter = CustomColumnsFilter,
actions = (
'action_csv_export', # See NgwListView
'action_vcard_export',
'action_bcc',
'add_to_group',
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.list_display = []
def get_root_queryset(self):
# Make sure self.contactgroup is defined:
if not hasattr(self, 'contactgroup'):
self.contactgroup = None
q = ContactQuerySet(Contact._default_manager.model,
using=Contact._default_manager._db)
current_cg = self.contactgroup
list_display = []
request = self.request
user = request.user
fields = request.GET.getlist('fields', None)
if not fields:
fields = get_default_columns(user)
strfields = ','.join(fields)
fields = strfields.split(',')
if request.GET.get('savecolumns', False):
user.set_fieldvalue(request, FIELD_COLUMNS, strfields)
self.strfields = strfields
self.fields = fields
for prop in self.fields:
if prop == 'name':
if current_cg is not None and current_cg.date:
q.add_busy(current_cg.id)
q.add_birthday(current_cg)
else:
q.add_busy()
q.add_birthday()
list_display.append('name_with_relative_link')
elif prop.startswith(DISP_GROUP_PREFIX):
groupid = int(prop[len(DISP_GROUP_PREFIX):])
if not perms.c_can_see_members_cg(user.id, groupid):
# just ignore groups that aren't allowed to be seen
continue
q.add_group(groupid)
cg = ContactGroup.objects.get(pk=groupid)
# attribute_name = 'text_'+prop
# setattr(self, attribute_name,
# membership_to_text_factory(groupid))
# cols.append((cg.name, attribute_name, None))
attribute_name = 'html_'+prop
attribute = membership_extended_widget_factory(request, cg)
attribute.short_description = str(cg)
setattr(self, attribute_name, attribute)
list_display.append(attribute_name)
# cols.append(('group_{}_flags'.format(groupid),
# 'group_{}_flags'.format(groupid), None))
elif prop.startswith(DISP_FIELD_PREFIX):
fieldid = prop[len(DISP_FIELD_PREFIX):]
cf = ContactField.objects.get(pk=fieldid)
if not perms.c_can_view_fields_cg(
user.id, cf.contact_group_id):
continue # Just ignore fields that can't be seen
q.add_field(fieldid)
attribute_name = 'html_'+prop
attribute = field_widget_factory(cf)
attribute.short_description = cf.name
attribute.admin_order_field = prop
# TODO: Investigate why there are so many warnings:
# attribute.allow_tags = True
setattr(self, attribute_name, attribute)
list_display.append(attribute_name)
elif prop == 'busy':
if current_cg is not None:
if current_cg.date:
q.add_busy(current_cg.id)
list_display.append('agenda')
else:
raise ValueError('Invalid field '+prop)
if current_cg is not None:
q.add_group(current_cg.id)
q.add_messages(current_cg.id)
self.group_status = membership_extended_widget_factory(
request, current_cg)
self.group_status.short_description = _('Status')
list_display.append('group_status')
# cols.append(('group_{}_flags'.format(current_cg.id),
# 'group_{}_flags'.format(current_cg.id), None))
# cols.append(('group_{}_inherited_flags'.format(current_cg.id),
# 'group_{}_inherited_flags'.format(current_cg.id),
# None))
# cols.append(('group_{}_inherited_aflags'.format(current_cg.id),
# 'group_{}_inherited_aflags'.format(current_cg.id),
# None))
self.list_display = list_display
return q
def get_search_results(self, request, queryset, search_term):
'''
Contact list views handle the search in a very special way.
Returns a tuple containing a queryset to implement the search,
and a boolean indicating if the results may contain duplicates.
'''
self.filter_str = search_term
filter = parse_filterstring(search_term, request.user.id)
self.filter_html = filter.to_html()
return filter.apply_filter_to_query(queryset), False
def get_context_data(self, **kwargs):
context = {}
context['title'] = _('Contact list')
context['objtype'] = Contact
context['nav'] = Navbar(Contact.get_class_navcomponent())
context.update(kwargs)
result = super().get_context_data(**context)
result['fields_form'] = FieldSelectForm(
self.request.user, initial={'fields': self.fields})
result['display'] = self.cl.params.get('display', 'mg') # TODO
result['filter'] = self.filter_str
result['filter_html'] = self.filter_html
result['reset_filter_link'] = self.cl.get_query_string({}, 'q')
return result
def name_with_relative_link(self, contact):
current_cg = self.contactgroup
flags = ''
birthday = getattr(contact, 'birthday', None)
if birthday is not None:
birthday = date(*[int(c) for c in birthday.split('-')])
if current_cg is not None and current_cg.date:
event_length = current_cg.end_date - current_cg.date
bseml = Config.get_birthday_show_event_max_length()
if event_length < timedelta(days=bseml): # interval means +1
# Next aniversary after event start date:
anniversary = date(
current_cg.date.year,
birthday.month,
birthday.day)
if anniversary < current_cg.date:
try:
anniversary = date(
anniversary.year + 1,
anniversary.month,
anniversary.day)
except ValueError: # Febuary 29th
anniversary = date(
anniversary.year + 1,
anniversary.month,
anniversary.day - 1)
age = anniversary.year - birthday.year
# Translators: This is the next birthday strftime(3)
# format, detailled, but without the year
stranniv = anniversary.strftime(_('%A %B %e'))
hint = _('{age} years on {date}').format(
date=stranniv,
age=age)
flags += (' <span class=iconbirthday title="{}"></span>'
.format(html.escape(hint)))
else:
age = date.today().year - birthday.year
hint = _('{age} years today').format(age=age)
flags += ' <span class=iconbirthday title="{}"></span>'.format(
html.escape(hint))
busy = getattr(contact, 'busy', None)
if busy is not None and busy & perms.MEMBER:
hint = _('That contact is busy. Click here for details.')
if current_cg:
excluded_gid = current_cg.id
else:
excluded_gid = GROUP_EVERYBODY
flags += ' <span class=iconbusy title="{}" ' \
'data-contactid="{}" data-groupid={}>' \
'</span>'.format(
html.escape(hint),
contact.id,
excluded_gid)
return html.format_html(
mark_safe('<a href="{id}/"><b>{name}</a></b> {flags}'),
id=contact.id,
name=html.escape(contact.name),
flags=mark_safe(flags),
)
name_with_relative_link.short_description = ugettext_lazy('Name')
name_with_relative_link.admin_order_field = 'name'
def agenda(self, contact):
busy = getattr(contact, 'busy')
if busy & perms.MEMBER:
return _('Busy')
elif busy & perms.INVITED:
return _('Invited')
elif busy == 0:
return _('Available')
else:
return 'Error {}'.format(busy)
agenda.short_description = ugettext_lazy('Agenda')
agenda.admin_order_field = 'busy'
def action_bcc(self, request, queryset):
emails = []
noemails = []
for contact in queryset:
# only the first email of each contact
c_emails = contact.get_fieldvalues_by_type('EMAIL')
if c_emails:
emails.append(c_emails[0])
else:
noemails.append(contact.name)
if emails:
messages.add_message(
request, messages.SUCCESS,
mark_safe('<a href="{}">{}</a>'.format(
'mailto:?bcc=' + ', '.join(emails),
_('List generated. Click here.'))))
if noemails:
messages.add_message(
request, messages.WARNING,
_('The following people do not have an email address: {}')
.format(', '.join(noemails)))
return None
action_bcc.short_description = ugettext_lazy(
"Send email locally (thunderbird or similar)")
def action_vcard_export(self, request, queryset):
result = ''
for contact in queryset:
result += contact.vcard()
return HttpResponse(result, content_type='text/x-vcard')
action_vcard_export.short_description = ugettext_lazy(
"Vcard format export")
def add_to_group(self, request, queryset):
ids = request.POST.getlist('_selected_action')
return HttpResponseRedirect(
'/contacts/add_to_group?ids=' + ','.join(ids))
add_to_group.short_description = ugettext_lazy("Add to another group")
class ContactListView(NgwUserAcl, BaseContactListView):
'''
Only show visible contacts
'''
def get_root_queryset(self):
qs = super().get_root_queryset()
qs.qry_from.append(
'JOIN v_c_can_see_c ON contact.id=v_c_can_see_c.contact_id_2')
qs.filter('v_c_can_see_c.contact_id_1 = {}'.format(
self.request.user.id))
return qs
#######################################################################
#
# Add to another group
#
#######################################################################
class GroupAddManyForm(forms.Form):
ids = forms.CharField(widget=forms.widgets.HiddenInput)
def __init__(self, user, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user = user
self.fields['group'] = forms.ChoiceField(
label=_('Target group'),
choices=[
('', _('Choose a group')),
(_('Permanent groups'), [
(group.id, group.name)
for group in ContactGroup
.objects
.filter(date__isnull=1)
.with_user_perms(user.id, perms.CHANGE_MEMBERS)
.order_by('name')]),
(_('Events'), [
(group.id, str(group))
for group in ContactGroup
.objects
.filter(date__isnull=0)
.filter(perso_unavail=False)
.with_user_perms(user.id, perms.CHANGE_MEMBERS)
.order_by('-date', 'name')]),
],
)
self.fields['flags'] = FlagsField(label=ugettext_lazy('Membership'))
contact_ids = kwargs['initial']['ids'].split(',')
contacts = Contact.objects.filter(pk__in=contact_ids)
contacts = contacts.extra(
tables=('v_c_can_see_c',),
where=(
'v_c_can_see_c.contact_id_1={}'.format(self.user.id),
'v_c_can_see_c.contact_id_2=contact.id'))
self.fields['contacts'] = forms.MultipleChoiceField(
label=_('Contacts'),
choices=[(contact.id, contact.name) for contact in contacts],
initial=contact_ids,
widget=forms.widgets.CheckboxSelectMultiple(
attrs={'class': 'contactchoices'}))
def clean(self):
data = super().clean()
if 'group' in data:
flags = data['flags']
if (flags & ~perms.ADMIN_ALL):
group = get_object_or_404(
ContactGroup, pk=self.cleaned_data['group'])
if group.virtual:
self.add_error('group', _(
'This is a virtual group. It cannot have members.'))
if (flags & perms.ADMIN_ALL
and not perms.c_operatorof_cg(self.user.id,
self.cleaned_data['group'])):
self.add_error('group', _(
'You need to be operator of the target group to add this'
' kind of membership.'))
if data['flags'] == 0:
self.add_error('flags', _('You must select at least one mode'))
return data
def add_them(self, request):
group_id = self.cleaned_data['group']
target_group = get_object_or_404(ContactGroup, pk=group_id)
contact_ids = self.cleaned_data['contacts']
contacts = Contact.objects.filter(pk__in=contact_ids)
# Check selected contacts are visible
contacts = contacts.extra(
tables=('v_c_can_see_c',),
where=(
'v_c_can_see_c.contact_id_1={}'.format(self.user.id),
'v_c_can_see_c.contact_id_2=contact.id'))
modes = ''
intvalue = self.cleaned_data['flags']
for flag, anint in perms.FLAGTOINT.items():
if anint & intvalue:
modes += '+' + flag
target_group.set_member_n(request, contacts, modes)
class GroupAddManyView(NgwUserAcl, FormView):
form_class = GroupAddManyForm
template_name = 'group_add_contacts_to.html'
def get_initial(self):
if self.request.method == 'POST':
querydict = self.request.POST
else:
querydict = self.request.GET
return {'ids': querydict['ids']}
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def get_context_data(self, **kwargs):
context = {}
if self.request.method == 'POST':
querydict = self.request.POST
else:
querydict = self.request.GET
ids = [int(id) for id in querydict['ids'].split(',')]
context['title'] = _('Add {} contact(s) to a group').format(len(ids))
context['nav'] = Navbar(Contact.get_class_navcomponent())
context['nav'].add_component(('add_to_group', _('add contacts to')))
context['json_ids'] = mark_safe(json.dumps(ids))
context.update(kwargs)
return super().get_context_data(**context)
def form_valid(self, form):
form.add_them(self.request)
self.gid = form.cleaned_data['group']
self.success_form = form # Used by get_success_url
return super().form_valid(form)
def form_invalid(self, form):
return super().form_invalid(form)
def get_success_url(self):
group_id = self.gid # from form_valid()
target_group = get_object_or_404(ContactGroup, pk=group_id)
return target_group.get_absolute_url() + 'members/'
class ContactCheckAvailableView(NgwUserAcl, View):
def post(self, request, *args, **kwargs):
if self.request.method == 'POST':
| |
<gh_stars>0
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
#
####
# File: txtAdventure_procedural_journey.py
# Project: random_journey
#-----
# Created Date: Saturday 18.07.2020, 12:27
# Author: Apop85
#-----
# Last Modified: Monday 20.07.2020, 21:17
#-----
# Copyright (c) 2020 Apop85
# This software is published under the MIT license.
# Check http://www.opensource.org/licenses/MIT for further informations
#-----
# Description: A small text adventure which will create a procedural path to travel
####
import os
import msvcrt
import re
from random import randint as rng
from random import seed
def print_menu(title, options):
filler="█"
filler2="▒"
print_height=9
terminal_width=(os.get_terminal_size())[0]
terminal_height=(os.get_terminal_size())[1]
print(filler*terminal_width, end="")
print(filler+filler2*(terminal_width-2)+filler, end="")
for line in title:
print(filler+filler2+line.center(terminal_width-4)+filler2+filler, end="")
print_height+=1
print(filler+filler2*(terminal_width-2)+filler, end="")
print(filler*terminal_width, end="")
print(filler+filler2*(terminal_width-2)+filler, end="")
right_side_width=int(2*terminal_width/3)
left_side_width=int(terminal_width/3)
while right_side_width+left_side_width < terminal_width:
right_side_width+=1
for key in options.keys():
if key != 0:
print_height+=1
print(filler+filler2+str(key).center(left_side_width-2) + options[key][lang].ljust(right_side_width-2)+filler2+filler, end="")
print(filler+filler2*(terminal_width-2)+filler, end="")
print(filler+filler2+str(0).center(left_side_width-2) + options[0][lang].ljust(right_side_width-2)+filler2+filler, end="")
print(filler+filler2*(terminal_width-2)+filler, end="")
print(filler*terminal_width)
height_delta=terminal_height-print_height
if height_delta > 1:
for i in range(0,height_delta-2):
print()
pressed_key=(msvcrt.getch()).decode("ascii")
validated, pressed_key=check_choice(options, pressed_key)
return pressed_key, validated
def make_map(binary, width, height):
map_data=binary[width*-1:]
direction={1:["╝", "╔"], 2:["╗", "╚"]}
max_length=0
map=[]
for i in range(0,len(map_data)):
if i == 0:
map+=[""]
last=0
if map_data[i] == "2":
map[last]+=direction[2][0]
last_len=len(map[last])
if last == 0:
map.insert(0,"")
else:
last-=1
if last_len-1 > len(map[last]):
map[last]+=" "*(last_len-(len(map[last])+1))
map[last]+=direction[2][1]
else:
map[last]+=direction[1][0]
last_len=len(map[last])
last+=1
if last > len(map)-1:
map.insert(last,"")
if last_len-1 > len(map[last]):
map[last]+=" "*(last_len-(len(map[last])+1))
map[last]+=direction[1][1]
if len(map[last]) > max_length:
max_length = len(map[last])
map_corrected=[]
for line in map:
if len(line) < max_length:
map_corrected+=[line+" "*(max_length-len(line))]
else:
map_corrected+=[line]
return map_corrected
def display_game(map,screen,inventory,options,game_turn):
filler="█"
filler2="▒"
# Line counter
counter=10
# Get Terminal dimensions and calculate screen part sizes
terminal_width=(os.get_terminal_size())[0]
terminal_height=(os.get_terminal_size())[1]
map_width=terminal_width-4
map_height=int(terminal_height/3)-4
map_visual=make_map(map, map_width, map_height)
ls_width = int(terminal_width/3*2)
rs_width = terminal_width-ls_width
opt_height = terminal_height-(map_height*2)
# Split content to lines with matching lengths
# min_line_length=ls_width-int(ls_width*0.2)
min_line_length=5
max_line_length=ls_width-4
pattern=r'.{'+str(min_line_length)+r','+str(max_line_length)+r'}[ \.]'
line_pattern=re.compile(pattern, re.DOTALL)
content_lines=line_pattern.findall(screen)
# cut map to current height
if len(map_visual) != map_height and len(map_visual) > 0:
for line in map_visual:
if line[-1] == "╔" or line[-1] == "╚":
last_index=map_visual.index(line)
last_line=line
break
last_index_line=int(map_height/2)
# Wenn der letzte Index 0 ist und die Karte noch nicht genug hoch ist
empty_space=" "*len(map_visual[0])
if last_index == 0 and len(map_visual) < map_height:
while len(map_visual) < map_height:
map_visual.insert(0,empty_space)
map_visual+=[empty_space]
if len(map_visual) > map_height:
del map_visual[-1]
# Wenn zu viele Informationen vorhanden sind
elif len(map_visual) > map_height:
# Ist der Zielindex kleiner als der aktuelle Index lösche index 0
while last_index_line < map_visual.index(last_line):
del map_visual[0]
# Ist der Zielindex grösser als der aktuelle Index füge leere Zeilen hinzu
while last_index_line > map_visual.index(last_line):
map_visual.insert(0,empty_space)
while len(map_visual) > map_height:
del map_visual[-1]
while len(map_visual) < map_height:
map_visual+=[empty_space]
elif len(map_visual) < map_height:
# As long the new line is not in place add lines
while last_index_line < map_visual.index(last_line):
del map_visual[0]
while last_index_line > map_visual.index(last_line):
map_visual.insert(0,empty_space)
while len(map_visual) > map_height:
del map_visual[-1]
while len(map_visual) < map_height:
map_visual+=[empty_space]
elif len(map_visual) == 0:
map_visual=[" "*map_width]*map_height
print(filler*terminal_width, end="")
print(filler+filler2*(terminal_width-2)+filler, end="")
# Mrint map region
for i in range(len(map_visual)-1,-1,-1):
print(filler+filler2+map_visual[i].center(terminal_width-4)+filler2+filler, end="")
counter+=1
print(filler+filler2*(terminal_width-2)+filler, end="")
print(filler*terminal_width, end="")
print(filler+filler2*(terminal_width-2)+filler, end="")
# Print content region
for line in content_lines:
print(filler+filler2+line.center(ls_width-3)+filler2+filler+filler2+" ".center(rs_width-4)+filler2+filler, end="")
counter+=1
if len(content_lines) < map_height:
target_lines = map_height-len(content_lines)
for i in range(0,target_lines):
print(filler+filler2+" ".center(ls_width-3)+filler2+filler+filler2+" ".center(rs_width-4)+filler2+filler, end="")
counter+=1
print(filler+filler2*(terminal_width-2)+filler, end="")
print(filler*terminal_width, end="")
print(filler+filler2*(terminal_width-2)+filler, end="")
# Print options
ls_part=int(ls_width/3)
ls_part2=ls_width-ls_part
for key in options.keys():
print(filler+filler2+str(key).rjust(ls_part-3)+options[key][lang].center(ls_part2)+filler2+filler+filler2+" ".center(rs_width-4)+filler2+filler, end="")
counter+=1
while counter+2 < terminal_height:
print(filler+filler2+" ".center(ls_width-3)+filler2+filler+filler2+" ".center(rs_width-4)+filler2+filler, end="")
counter+=1
print(filler+filler2*(terminal_width-2)+filler, end="")
print(filler*terminal_width)
# Read and validate presseds key
pressed_key=(msvcrt.getch()).decode("ascii")
validated, pressed_key=check_choice(options, pressed_key)
return pressed_key, validated
def check_choice(options, pressed_key):
# Check if pressed key is valid
possible_choices=options.keys()
try:
if pressed_key.isdecimal() and int(pressed_key) in possible_choices:
return True, int(pressed_key)
else:
return False, pressed_key
except:
return False, "X"
def main_menu():
while True:
check=False
# Define menu options in english and german
title=[" _____ _ ", "| __ |___ ___ _| |___ _____ ",
"| -| .'| | . | . | | ", "|__|__|__,|_|_|___|___|_|_|_| ",
" __ ", " __| |___ _ _ ___ ___ ___ _ _ ",
"| | | . | | | _| | -_| | |", "|_____|___|___|_| |_|_|___|_ |",
" |___|"]
options={0:["Exit", "Beenden"],
1:["New Game", "Neues Spiel"],
2:["Load Game", "Spiel Laden"],
3:["Highscore", "Highscore"],
4:["Language", "Sprache"]}
# Get players choice
while not check:
choice, check=print_menu(title, options)
if choice == 0:
return
elif choice == 1:
global game_seed
game_seed=new_game()
if game_seed != None or game_seed != "":
current_seed=init_game(game_seed, 0)
health=100
start_game(current_seed)
elif choice == 2:
pass
else:
global lang
if lang == 0:
lang=1
else:
lang=0
def random_seed():
game_seed=""
usable_characters="abcdefghijklmnopqrstuvwxyz <>-_/*+!?)(&%ç@§°öäüÖÄÜéàèÉÀÈABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
for i in range(0,16):
game_seed+=usable_characters[rng(0,len(usable_characters)-1)]
return game_seed
def new_game():
game_seed=random_seed()
while True:
check=False
content = {0:["Back", "Zurück"],
1:["Keep seed", "Seed beibehalten"],
2:["Own seed", "Eigener Seed"],
3:["New random seed", "Neuer Zufallsseed"]}
while not check:
title=[ " _____ _____ ", "| | |___ _ _ _ | __|___ _____ ___ ",
"| | | | -_| | | | | | | .'| | -_|", "|_|___|___|_____| |_____|__,|_|_|_|___|",
"", "SEED: {}".format(game_seed), ""]
choice, check=print_menu(title, content)
if choice == 0:
return None
elif choice == 1:
return game_seed
elif choice == 2:
message=["Enter your Seed: ", "Gewünschten Seed eingeben: "]
game_seed=input(message[lang])
elif choice == 3:
game_seed=random_seed()
continue
def game_turn_seed(game_seed, game_turn, min=0, max=100):
seed(game_seed)
iteration=0
while True:
while iteration < game_turn:
r_seed=rng(min, max)
iteration+=1
r_seed=rng(min, max)
yield r_seed
def init_game(game_seed, game_turn):
seed_generator = game_turn_seed(game_seed, game_turn)
for item in seed_generator:
current_seed=item
return current_seed
def get_perks(content_list):
luck, gear_chance, danger = 0, 0, 0
text = content_list[lang]
if "luck" in content_list[2].keys():
luck+=content_list[2]["luck"]
if "gear_chance" in content_list[2].keys():
gear_chance+=content_list[2]["gear_chance"]
if "danger" in content_list[2].keys():
danger+=content_list[2]["danger"]
return (text, luck, gear_chance, danger)
def start_game(current_seed, health=100, game_turn=0, path="", armor=0, luck=0, gear_chance=0, danger=0, inventory={}):
while health > 0:
checked=False
screen_content=[]
intro, items, left_door, right_door=get_content(current_seed)
# Processing gathered informations
for item in [intro, left_door, right_door]:
new_perks = get_perks(item)
screen_content+=[new_perks[0]]
luck+=new_perks[1]
gear_chance+=new_perks[2]
danger+=new_perks[3]
for item in items:
new_perks = get_perks(item)
screen_content.insert(-2,new_perks[0])
luck+=new_perks[1]
gear_chance+=new_perks[2]
danger+=new_perks[3]
screen_content=" ".join(screen_content)
options = {0:["Exit", "Beenden"],
1:["Left door", "Linke Türe"],
2:["Right door", "Rechte Türe"]}
while not checked:
choice, checked = display_game(path,screen_content,inventory,options,game_turn)
if choice in [1,2]:
path+=str(choice)
game_turn+=1
elif choice == 0:
exit()
seed_generator = game_turn_seed(game_seed, game_turn)
for random in seed_generator:
current_seed=random
break
def get_content(current_seed):
seed(current_seed)
intros=[
["You enter a dark cavern. ","Du betritts eine dunkle Kammer. ",{"luck":0.2,"gear_chance":-0.3, "danger":0.3}],
["There's a big mess here. Dirt and stones everywhere. ","Es herrscht ein totales Chaos hier. Dreck und Steine überall. ", {"luck":-0.5,"gear_chance":0.5, "danger":-0.2}],
["I see some light over there. I might check this!","Da drüben ist Licht! Ich sollte das untersuchen!",{"luck":0.5, "gear_chance":0.6}],
["It's an empty room. It doesn't look like there is something interesting here. ","Ein leerer Raum. Es sieht nicht so aus als wäre hier etwas. ",{"gear_chance":0.5}],
["There is a strange smell in this Room. Where does it come from? ","In diesem Raum riecht es sehr komisch. Woher könnte das kommen? ",{"luck":0.2,"gear_chance":-0.3, "danger":0.3}]
# ["","",{}]
]
random=rng(0,len(intros)-1)
intro=intros[random]
# items=[]
room_items=[
["There is a wooden chair with restraints in the middle of the room. ","Da ist ein hölzerner Stuhl mit angebrachten Fesseln in der mitte des Raums. ",{"danger":0.2,"gear_chance":0.4}],
["There is no furniture in this room. ","Es gibt keine Möbel in diesem Raum. ",{"luck":-0.4, "gear_chance":-1}],
["A dark wooden table is pushed away from the wall. Seems like someone was searching something behind. ","Ein dunkler hölzerner Tisch wurde von der Wand weggestossen. Sieht so aus als ob da jemand etwas gesucht hat. ",{"luck":-0.1, "danger":0.2, "gear_chance":0.5}],
["There is a weird looking book on the floor. ","Es liegt ein komisch aussehendes Buch auf dem Boden. ",{"luck":0.3,"gear_chance":-0.2}],
["A rusty old shelve is attached to the left wall of the room. ","Eine rostige Ablage ist an der linken Wand im Raum befestigt. ",{"luck":0.6,"gear_chance":0.7}]
# ["","",{}],
# ["","",{}]
]
random=rng(0,len(room_items)-1)
# items+=[room_items[random]]
items=[room_items[random]]
directions=[["left", "linken"], ["right", "rechten"]]
choosable_directions=[directions[0][lang], directions[1][lang]]
cnt=0
for direction in choosable_directions:
doors=[
["On the {} side is a door made of metal. ".format(direction),"Auf der {} Seite ist eine Tür aus metall. ".format(direction),{}],
["There is a wooden door on the {} side. ".format(direction),"{} befindet sich ein hölzernes Tor. ".format((direction.capitalize()).rstrip("en")+"s"),{}],
["A loud scratching noise comes from the {} door. Something wants to get out. ".format(direction),"Ein lautes Kratzen ist hinter der {} Tür zu hören. Etwas möchte da raus. ".format(direction),{}],
["On the {} | |
the call.
call.return_value = dlp.DlpJob()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_dlp_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_dlp_job_flattened_error():
client = DlpServiceClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_dlp_job(
dlp.GetDlpJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_dlp_job_flattened_async():
client = DlpServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.get_dlp_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = dlp.DlpJob()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dlp.DlpJob())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_dlp_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_dlp_job_flattened_error_async():
client = DlpServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_dlp_job(
dlp.GetDlpJobRequest(), name="name_value",
)
def test_delete_dlp_job(transport: str = "grpc", request_type=dlp.DeleteDlpJobRequest):
client = DlpServiceClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.delete_dlp_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_dlp_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dlp.DeleteDlpJobRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_dlp_job_from_dict():
test_delete_dlp_job(request_type=dict)
@pytest.mark.asyncio
async def test_delete_dlp_job_async(transport: str = "grpc_asyncio"):
client = DlpServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = dlp.DeleteDlpJobRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.delete_dlp_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_dlp_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert response is None
def test_delete_dlp_job_field_headers():
client = DlpServiceClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dlp.DeleteDlpJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.delete_dlp_job), "__call__") as call:
call.return_value = None
client.delete_dlp_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_dlp_job_field_headers_async():
client = DlpServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dlp.DeleteDlpJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.delete_dlp_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_dlp_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_dlp_job_flattened():
client = DlpServiceClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.delete_dlp_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_dlp_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_delete_dlp_job_flattened_error():
client = DlpServiceClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_dlp_job(
dlp.DeleteDlpJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_dlp_job_flattened_async():
client = DlpServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.delete_dlp_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_dlp_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_dlp_job_flattened_error_async():
client = DlpServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_dlp_job(
dlp.DeleteDlpJobRequest(), name="name_value",
)
def test_cancel_dlp_job(transport: str = "grpc", request_type=dlp.CancelDlpJobRequest):
client = DlpServiceClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.cancel_dlp_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.cancel_dlp_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dlp.CancelDlpJobRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_cancel_dlp_job_from_dict():
test_cancel_dlp_job(request_type=dict)
@pytest.mark.asyncio
async def test_cancel_dlp_job_async(transport: str = "grpc_asyncio"):
client = DlpServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = dlp.CancelDlpJobRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.cancel_dlp_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.cancel_dlp_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert response is None
def test_cancel_dlp_job_field_headers():
client = DlpServiceClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dlp.CancelDlpJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.cancel_dlp_job), "__call__") as call:
call.return_value = None
client.cancel_dlp_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_cancel_dlp_job_field_headers_async():
client = DlpServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dlp.CancelDlpJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.cancel_dlp_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.cancel_dlp_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", | |
"""
Module for customising opensim segmented muscle points
"""
import os
import numpy as np
import copy
from gias2.fieldwork.field import geometric_field
from gias2.fieldwork.field.tools import fitting_tools
from gias2.common import transform3D
from gias2.registration import alignment_fitting as af
from gias2.musculoskeletal.bonemodels import bonemodels
from gias2.musculoskeletal import osim
import muscleVolumeCalculator
import re
import math
import json
from numpy import pi
from scipy.interpolate import interp1d
import pdb
SELF_DIR = os.path.split(__file__)[0]
DATA_DIR = os.path.join(SELF_DIR, 'data/node_numbers/')
TEMPLATE_OSIM_PATH = os.path.join(SELF_DIR, 'data', 'gait2392_simbody_wrap.osim')
VALID_SEGS = set(['pelvis',
'femur-l', 'femur-r',
'tibia-l', 'tibia-r',
])
OSIM_FILENAME = 'gait2392_simbody.osim'
VALID_UNITS = ('nm', 'um', 'mm', 'cm', 'm', 'km')
TIBFIB_SUBMESHES = ('tibia', 'fibula')
TIBFIB_SUBMESH_ELEMS = {'tibia': range(0, 46),
'fibula': range(46,88),
}
TIBFIB_BASISTYPES = {'tri10':'simplex_L3_L3','quad44':'quad_L3_L3'}
def dim_unit_scaling(in_unit, out_unit):
"""
Calculate the scaling factor to convert from the input unit (in_unit) to
the output unit (out_unit). in_unit and out_unit must be a string and one
of ['nm', 'um', 'mm', 'cm', 'm', 'km'].
inputs
======
in_unit : str
Input unit
out_unit :str
Output unit
returns
=======
scaling_factor : float
"""
unit_vals = {
'nm': 1e-9,
'um': 1e-6,
'mm': 1e-3,
'cm': 1e-2,
'm': 1.0,
'km': 1e3,
}
if in_unit not in unit_vals:
raise ValueError(
'Invalid input unit {}. Must be one of {}'.format(
in_unit, list(unit_vals.keys())
)
)
if out_unit not in unit_vals:
raise ValueError(
'Invalid input unit {}. Must be one of {}'.format(
in_unit, list(unit_vals.keys())
)
)
return unit_vals[in_unit]/unit_vals[out_unit]
def update_femur_opensim_acs(femur_model):
femur_model.acs.update(
*bonemodels.model_alignment.createFemurACSOpenSim(
femur_model.landmarks['femur-HC'],
femur_model.landmarks['femur-MEC'],
femur_model.landmarks['femur-LEC'],
side=femur_model.side
)
)
def update_tibiafibula_opensim_acs(tibiafibula_model):
tibiafibula_model.acs.update(
*bonemodels.model_alignment.createTibiaFibulaACSOpenSim(
tibiafibula_model.landmarks['tibiafibula-MM'],
tibiafibula_model.landmarks['tibiafibula-LM'],
tibiafibula_model.landmarks['tibiafibula-MC'],
tibiafibula_model.landmarks['tibiafibula-LC'],
side=tibiafibula_model.side
)
)
def splitTibiaFibulaGFs(tibfibGField):
tib = tibfibGField.makeGFFromElements(
'tibia',
TIBFIB_SUBMESH_ELEMS['tibia'],
TIBFIB_BASISTYPES,
)
fib = tibfibGField.makeGFFromElements(
'fibula',
TIBFIB_SUBMESH_ELEMS['fibula'],
TIBFIB_BASISTYPES,
)
return tib, fib
def localOsim2Global(body, model):
#find the knee angle
knee = model.joints['knee_l']
kneeAngle = model.joints['knee_l'].coordSets['knee_angle_l'].defaultValue
knee_lTrans = np.zeros(3)
#get the spline values
trans1X = knee.getSimmSplineParams('translation1')[0]
trans1Y = knee.getSimmSplineParams('translation1')[1]
f = interp1d(trans1X, trans1Y, kind='cubic')
knee_lTrans[0] = f(kneeAngle)
trans2X = knee.getSimmSplineParams('translation2')[0]
trans2Y = knee.getSimmSplineParams('translation2')[1]
f2 = interp1d(trans2X, trans2Y, kind='cubic')
knee_lTrans[1] = f2(kneeAngle)
#find the knee angle
knee = model.joints['knee_r']
kneeAngle = model.joints['knee_r'].coordSets['knee_angle_r'].defaultValue
knee_rTrans = np.zeros(3)
#get the spline values
trans1X = knee.getSimmSplineParams('translation1')[0]
trans1Y = knee.getSimmSplineParams('translation1')[1]
f = interp1d(trans1X, trans1Y, kind='cubic')
knee_rTrans[0] = f(kneeAngle)
trans2X = knee.getSimmSplineParams('translation2')[0]
trans2Y = knee.getSimmSplineParams('translation2')[1]
f2 = interp1d(trans2X, trans2Y, kind='cubic')
knee_rTrans[1] = f2(kneeAngle)
if body == 'pelvis':
trans = np.zeros(3)
elif body == 'femur_l':
trans = model.joints['hip_l'].locationInParent
elif body == 'femur_r':
trans = model.joints['hip_r'].locationInParent
elif body == 'tibia_l':
trans = (model.joints['hip_l'].locationInParent +
knee_lTrans)
elif body == 'tibia_r':
trans = (model.joints['hip_r'].locationInParent +
knee_rTrans)
elif body == 'talus_l':
trans = (model.joints['hip_l'].locationInParent +
knee_lTrans +
model.joints['ankle_l'].locationInParent)
elif body == 'talus_r':
trans = (model.joints['hip_r'].locationInParent +
knee_rTrans +
model.joints['ankle_r'].locationInParent)
elif body == 'calcn_l':
trans = (model.joints['hip_l'].locationInParent +
knee_lTrans +
model.joints['ankle_l'].locationInParent +
model.joints['subtalar_l'].locationInParent)
elif body == 'calcn_r':
trans = (model.joints['hip_r'].locationInParent +
knee_rTrans +
model.joints['ankle_r'].locationInParent +
model.joints['subtalar_r'].locationInParent)
elif body == 'toes_l':
trans = (model.joints['hip_l'].locationInParent +
knee_lTrans +
model.joints['ankle_l'].locationInParent +
model.joints['subtalar_l'].locationInParent +
model.joints['mtp_l'].locationInParent)
elif body == 'toes_r':
trans = (model.joints['hip_r'].locationInParent +
knee_rTrans +
model.joints['ankle_r'].locationInParent +
model.joints['subtalar_r'].locationInParent +
model.joints['mtp_r'].locationInParent)
return trans
class gait2392MuscleCustomiser(object):
def __init__(self, config, ll=None, osimmodel=None, landmarks=None):
"""
Class for customising gait2392 muscle points using host-mesh fitting
inputs
======
config : dict
Dictionary of option. (work in progress) Example:
{
'osim_output_dir': '/path/to/output/model.osim',
'in_unit': 'mm',
'out_unit': 'm',
'write_osim_file': True,
'update_knee_splines': False,
'static_vas': False,
}
ll : LowerLimbAtlas instance
Model of lower limb bone geometry and pose
osimmodel : opensim.Model instance
The opensim model instance to customise
"""
self.config = config
self.ll = ll
self.trcdata = landmarks
self.gias_osimmodel = None
if osimmodel is not None:
self.set_osim_model(osimmodel)
self._unit_scaling = dim_unit_scaling(
self.config['in_unit'], self.config['out_unit']
)
def set_osim_model(self, model):
self.gias_osimmodel = osim.Model(model=model)
def cust_pelvis(self):
pelvis = self.ll.models['pelvis']
#load the pelvis muscle attachment node numbers
with open(DATA_DIR + 'pelvisNodeNumbers.txt') as infile:
pelvisData = json.load(infile)
pelvisAttachmentNodeNums = pelvisData.values()
pelvisMuscleNames = pelvisData.keys()
pelvisMuscleNames = [str(item) for item in pelvisMuscleNames]
#the muscle attachments were selected an a 24x24 mesh
pelvisPoints, lhF = pelvis.gf.triangulate([24,24])
#align the discretised pelvis points and the muscle attachments to the opensims pelvis local coordinate system
localPelvisPoints = pelvis.acs.map_local(pelvisPoints)/1000
pelvisAttachments = localPelvisPoints[pelvisAttachmentNodeNums]
for i in range(len(pelvisMuscleNames)):
muscle = self.gias_osimmodel.muscles[str(pelvisMuscleNames[i])]
pathPoints = muscle.path_points
s = sorted(muscle.path_points.keys())
#aSite will be 0 if the attachment is an origin and -1 if insertion
if pathPoints[s[0]].body.name == 'pelvis':
aSite = 0
elif pathPoints[s[-1]].body.name == 'pelvis':
aSite = -1
#update the location of the pathpoint
pp = pathPoints[s[aSite]]
pp.location = pelvisAttachments[i]
def cust_femur_l(self):
leftFemur = self.ll.models['femur-l']
#load in the femur muscle attachment node numbers
with open(DATA_DIR + 'leftFemurNodeNumbers.txt') as infile:
leftFemurData = json.load(infile)
leftFemurAttachmentNodeNums = leftFemurData.values()
leftFemurMuscleNames = leftFemurData.keys()
leftFemurMuscleNames = [str(item) for item in leftFemurMuscleNames]
#update the geometric field coordinate system to match opensims
update_femur_opensim_acs(leftFemur)
#the muscle attachments were selected an a 24x24 mesh
leftFemurPoints, lhF = leftFemur.gf.triangulate([24,24])
#align the discretised femur points and the muscle attachments to the opensims femur local coordinate system
localLeftFemurPoints = leftFemur.acs.map_local(leftFemurPoints)/1000
leftFemurAttachments = localLeftFemurPoints[leftFemurAttachmentNodeNums]
for i in range(len(leftFemurMuscleNames)):
muscleLeft = self.gias_osimmodel.muscles[str(leftFemurMuscleNames[i])]
pathPointsLeft = muscleLeft.path_points
sL = sorted(muscleLeft.path_points.keys())
#aSite will be 0 if the attachment is an origin and -1 if insertion
if pathPointsLeft[sL[0]].body.name == 'femur_l':
aSite = 0
elif pathPointsLeft[sL[-1]].body.name == 'femur_l':
aSite = -1
#update the location of the pathpoint
ppL = pathPointsLeft[sL[aSite]]
ppL.location = leftFemurAttachments[i]
def cust_femur_r(self):
rightFemur = self.ll.models['femur-r']
rightFemur.side = 'right'
with open(DATA_DIR + 'rightFemurNodeNumbers.txt') as infile:
rightFemurData = json.load(infile)
rightFemurAttachmentNodeNums = rightFemurData.values()
rightFemurMuscleNames = rightFemurData.keys()
rightFemurMuscleNames = [str(item) for item in rightFemurMuscleNames]
#update the geometric field coordinate system to match opensims
update_femur_opensim_acs(rightFemur)
rightFemurPoints, rhF = rightFemur.gf.triangulate([24,24])
localRightFemurPoints = rightFemur.acs.map_local(rightFemurPoints)/1000
rightFemurAttachments = localRightFemurPoints[rightFemurAttachmentNodeNums]
#update attachments
for i in range(len(rightFemurMuscleNames)):
muscleRight = self.gias_osimmodel.muscles[str(rightFemurMuscleNames[i])]
pathPointsRight = muscleRight.path_points
sR = sorted(muscleRight.path_points.keys())
#aSite will be 0 if the attachment is an origin and -1 if insertion
if pathPointsRight[sR[0]].body.name == 'femur_r':
aSite = 0
elif pathPointsRight[sR[-1]].body.name == 'femur_r':
aSite = -1
ppR = pathPointsRight[sR[aSite]]
ppR.location = rightFemurAttachments[i]
def cust_tibia_l(self):
#the tibia, patella and fibula all use the same fieldwork model to align with opensim
leftTibFib = self.ll.models['tibiafibula-l']
leftPatella = self.ll.models['patella-l']
update_tibiafibula_opensim_acs(leftTibFib)
leftTib, leftFib = splitTibiaFibulaGFs(leftTibFib.gf)
leftTibia = bonemodels.TibiaFibulaModel('tibia', leftTibFib.gf)
#load in the tibia muscle attachment node numbers
with open(DATA_DIR + 'leftTibiaNodeNumbers.txt') as infile:
leftTibiaData = json.load(infile)
leftTibiaAttachmentNodeNums = leftTibiaData.values()
leftTibiaMuscleNames = leftTibiaData.keys()
leftTibiaMuscleNames = [str(item) for item in leftTibiaMuscleNames]
#load in the fibula muscle attachment node numbers
with open(DATA_DIR + 'leftFibulaNodeNumbers.txt') as infile:
leftFibulaData = json.load(infile)
leftFibulaAttachmentNodeNums = leftFibulaData.values()
leftFibulaMuscleNames = leftFibulaData.keys()
leftFibulaMuscleNames = [str(item) for item in leftFibulaMuscleNames]
#load in the patella muscle attachment node numbers
with open(DATA_DIR + 'leftPatellaNodeNumbers.txt') as infile:
leftPatellaData = json.load(infile)
leftPatellaAttachmentNodeNums = leftPatellaData.values()
leftPatellaMuscleNames = leftPatellaData.keys()
leftPatellaMuscleNames = [str(item) for item in leftPatellaMuscleNames]
leftTibiaPoints, lhF = leftTib.triangulate([24,24])
leftFibulaPoints, lhF = leftFib.triangulate([24,24])
leftPatellaPoints, lhf = leftPatella.gf.triangulate([24,24])
localLeftTibiaPoints = leftTibFib.acs.map_local(leftTibiaPoints)/1000
leftTibiaAttachments = localLeftTibiaPoints[leftTibiaAttachmentNodeNums]
localLeftFibulaPoints = leftTibFib.acs.map_local(leftFibulaPoints)/1000
leftFibulaAttachments = localLeftFibulaPoints[leftFibulaAttachmentNodeNums]
localLeftPatellaPoints = leftTibFib.acs.map_local(leftPatellaPoints)/1000
leftPatellaAttachments = localLeftPatellaPoints[leftPatellaAttachmentNodeNums]
#update the tibia attachments
for i in range(len(leftTibiaMuscleNames)):
muscleLeft = self.gias_osimmodel.muscles[str(leftTibiaMuscleNames[i])]
pathPointsLeft = muscleLeft.path_points
sL = sorted(muscleLeft.path_points.keys())
#aSite will be 0 if the attachment is an origin and -1 if insertion
if pathPointsLeft[sL[0]].body.name == 'tibia_l':
aSite = 0
elif pathPointsLeft[sL[-1]].body.name == 'tibia_l':
aSite = -1
ppL = pathPointsLeft[sL[aSite]]
ppL.location = leftTibiaAttachments[i]
#update the fibula attachments
for i in range(len(leftFibulaMuscleNames)):
muscleLeft = self.gias_osimmodel.muscles[str(leftFibulaMuscleNames[i])]
pathPointsLeft = muscleLeft.path_points
sL = sorted(muscleLeft.path_points.keys())
#aSite will be 0 if the attachment is an origin and -1 if insertion
if pathPointsLeft[sL[0]].body.name == 'tibia_l':
aSite = 0
elif pathPointsLeft[sL[-1]].body.name == 'tibia_l':
aSite = -1
ppL = pathPointsLeft[sL[aSite]]
ppL.location = leftFibulaAttachments[i]
#update the patella attachments
for i in range(len(leftPatellaMuscleNames)):
muscleLeft = self.gias_osimmodel.muscles[str(leftPatellaMuscleNames[i])]
pathPointsLeft = muscleLeft.path_points
sL = sorted(muscleLeft.path_points.keys())
#aSite will be 0 if the attachment is an origin and -1 if insertion
if pathPointsLeft[sL[0]].body.name == 'tibia_l':
aSite = 0
elif pathPointsLeft[sL[-1]].body.name == 'tibia_l':
aSite = -1
ppL = pathPointsLeft[sL[aSite]]
ppL.location = leftPatellaAttachments[i]
def cust_tibia_r(self):
rightTibFib = self.ll.models['tibiafibula-r']
rightPatella = self.ll.models['patella-r']
update_tibiafibula_opensim_acs(rightTibFib)
rightTib, rightFib = splitTibiaFibulaGFs(rightTibFib.gf)
rightTibia = bonemodels.TibiaFibulaModel('tibia', rightTibFib.gf)
#load in the tibia attachment node numbers
with open(DATA_DIR + 'rightTibiaNodeNumbers.txt') as infile:
rightTibiaData = json.load(infile)
rightTibiaAttachmentNodeNums = rightTibiaData.values()
rightTibiaMuscleNames = rightTibiaData.keys()
rightTibiaMuscleNames = [str(item) for item in rightTibiaMuscleNames]
#load in the fibula attachment node numbers
with open(DATA_DIR + 'rightFibulaNodeNumbers.txt') as infile:
rightFibulaData = json.load(infile)
rightFibulaAttachmentNodeNums = rightFibulaData.values()
rightFibulaMuscleNames = rightFibulaData.keys()
rightFibulaMuscleNames = [str(item) | |
cleanup code here would be to make a
# change to Slice, so that a Slice either doesn't have a
# reference to a Sheet, or doesn't hold onto that reference.
#
# 2: Some cleanup code will always be required here in
# Simulation.__new__: As well as removing references to Sheets
# from Slices, it is also necessary to remove references to
# sheets from Simulation's lists of EPs - otherwise the sheets
# are not garbage collected and memory usage will go up every
# time a new Simulation is created. This cleanup must be in
# Simulation.__new__ so that it runs whenever a simulation is
# created or unpickled (it can't be done e.g. in
# load_snapshot).
#
# 3: this particular implementation assumes the only instances
# of Slice are in ConnectionFields, which is true for our
# simulations. (This won't matter when the slice cleanup
# becomes unnecessary.)
if hasattr(n,'_cleanup'):
n._cleanup()
# if we don't collect() here (exactly here - not in _cleanup,
# and not later), gc seems to lose track of some objects and
# there is still a (smaller) memory increase with every call
# to load_snapshot()
import gc
gc.collect()
return n
# CEBALERT: see gc alert in __new__()
def _cleanup(self):
# will always be required: in case eps haven't been started
# so are still in the list
if hasattr(self,'eps_to_start'):
self.eps_to_start[:]=[]
if hasattr(self,'_event_processors'):
for name,EP in self._event_processors.items():
for c in EP.in_connections:
if hasattr(c,'_cleanup'):
c._cleanup()
# will always be required
self._event_processors[name]=None
# (check when cleaning up existing mechanism for
# adding sheets e.g. sim['x']=sheet could first set
# sim['x'] to None if there is already a sheet with
# name x...)
# CEBALERT: if we're keeping this, should have a better name
def convert_to_time_type(self,time):
"""
Convert the supplied time to the Simulation's time_type.
"""
return (self.forever if time == self.forever else self.time.time_type(time))
# Note that __init__ can still be called after the
# Simulation(register=True) instance has been created. E.g. with
# Simulation.register is True,
# Simulation(name='A'); Simulation(name='B')
#
# would leave the single Simulation(register=True) instance with
# name=='B'. This is because, as is usual in Python, __new__
# creates an instance of a class, while __init__ is subsequently
# given that instance (to initialize).
def __init__(self, *args,**params):
"""
Initialize a Simulation instance.
"""
param.Parameterized.__init__(self, **params)
self.time(val=0.0)
self.views = AttrDict()
self._event_processors = {}
self._model = None
if self.register:
# Indicate that no specific name has been set
self.name=params.get('name')
# Set up debugging messages to include the simulator time
param.parameterized.dbprint_prefix= \
(lambda: "Time: "+self.timestr()+" ")
self.events = [] # CB: consider collections.deque? (PEP 290)
self._events_stack = []
self.eps_to_start = []
self.item_scale=1.0 # this variable determines the size of each item in a diagram
# CB (this comment applies to SomeTimer!): make this a
# parameter for documentation? Otherwise nobody will know
# about being able to adjust step.
#
# we set step to 2 so that by default timing doesn't slow simulation too much. but
# e.g. leaving it as None would result in info at 2% increments of requested run duration,
# no matter what duration (0.005 or 5, etc).
self.timer = SomeTimer(func=self.run,
simulation_time_fn=self.time)
@property
def model(self):
return self._model
@model.setter
def model(self, model):
"""
Setting a Model object automatically calls the setup method of
the object.
"""
if model is None: return
elif not callable(model):
raise Exception("Model object %r is not callable" % model)
elif isinstance(model, type):
raise Exception("Please supply an instantiated model object and not a Model class.")
self._model = model
model.setup()
self.name = model.name
def __getitem__(self,item_name):
"""
Return item_name if it exists as an EventProcessor in
the Simulation. See objects().
"""
if not isinstance(item_name,str):
raise TypeError("Expected string (objects in the Simulation are indexed by name); %s is a %s"%(item_name,type(item_name)))
try:
return self.objects()[item_name]
except KeyError:
raise AttributeError("Simulation doesn't contain '"+item_name+"'.")
# CEBALERT: should this at least give a warning when an existing
# EP is replaced?
def __setitem__(self,ep_name,ep):
"""
Add ep to the simulation, setting its name to ep_name.
ep must be an EventProcessor.
If ep_name already exists in the simulation, ep overwrites
the original object (as for a dictionary).
Note: EventProcessors do not necessarily have to be added to
the simulation to be used, but otherwise they will not receive the
start() message. Adding a node to the simulation also sets the
backlink node.simulation, so that the node can enqueue events
and read the simulation time.
"""
if not isinstance(ep_name,str):
raise TypeError("Expected string for item name (EPs in the Simulation are indexed by name).")
if not isinstance(ep,EventProcessor):
raise TypeError("Expected EventProcessor: objects in the Simulation must be EPs.")
if ep in self._event_processors.values():
self.warning("EventProcessor "+str(ep)+" () already exists in the simulation and will not be added.")
else:
ep.initialized=False
ep.name=ep_name
ep.initialized=True
# deletes and overwrites any existing EP with the same name,
# silently, as if a dictionary
if ep.name in self._event_processors: del self[ep.name]
self._event_processors[ep_name] = ep
ep.simulation = self
self.eps_to_start.append(ep)
if hasattr(ep,'views'):
self.views[ep_name] = ep.views
def __delitem__(self,ep_name):
"""
Dictionary-style deletion of EPs from the simulation; see __delete_ep().
Deletes EP from simulation, plus connections that come into it and
connections that go out of it.
"""
if not isinstance(ep_name,str):
raise TypeError("Expected string for item name (EPs in the Simulation are indexed by name).")
self.__delete_ep(ep_name)
def __delete_ep(self,ep_name):
"""
Remove the specified EventProcessor from the simulation, plus
delete connections that come into it and connections that go from it.
(Used by 'del sim[ep_name]' (as for a dictionary) to delete
an event processor from the simulation.)
"""
ep = self[ep_name]
# remove from simulation list of eps
del self._event_processors[ep_name]
# remove out_conections that go to this ep
for conn in ep.in_connections:
conn.remove()
# remove in_connections that come from this ep
for conn in ep.out_connections:
conn.remove()
def __iter__(self):
for obj in self.objects(): yield obj
def __dir__(self):
"""
Extend dir() to include simulation objects as well. Useful
for software that examines the list of possible objects, such
as tab completion in IPython.
"""
default_dir = dir(type(self)) + list(self.__dict__)
return sorted(set(default_dir + self.objects().keys()))
def __getattr__(self, name):
"""
Provide a simpler attribute-like syntax for accessing objects
within a Simulation (e.g. sim.obj1, for an EventProcessor
"obj1" in sim).
"""
if name=='_event_processors':
raise AttributeError
try:
return self.objects()[name]
except:
raise AttributeError
def timestr(self,specified_time=None):
"""
Returns the specified time (or the current time, if none
specified) formatted using time.time_printing_format, which allows
users to control how much precision, etc. is used for time
displays.
"""
# CEBALERT: I doubt this gets all attributes. Does it get
# properties (not that there are any right now)?
all_vars = dict(self.get_param_values())
all_vars.update(self.__dict__)
if specified_time is not None:
all_vars['_time']=specified_time
elif self.time is not None:
all_vars['_time'] = self.time()
else:
raise Exception('No time object available.')
timestr = self.time_printing_format % all_vars
return timestr
@property
def timestr_prop(self):
"""
A property that simply returns self.timestr(); useful for setting the
interactive command-line prompt.
"""
return self.timestr()
def basename(self):
"""
Return a string suitable for labeling an object created
by the current simulation at the current time. By default
this is simply the name of the simulation + " " +
the result from evaluating the time_printing_format parameter.
"""
all_vars = dict(self.get_param_values())
all_vars.update(self.__dict__)
all_vars['timestr']=self.timestr()
return self.basename_format % all_vars
# Change current run() to _run(), and current run_and_time() to run()?
# CEBALERT: need to simplify duration/until code. Hiding 'until' option
# until it's fixed (presumably nobody's using it).
def run_and_time(self, duration=forever):
if duration==self.forever:
# CEBALERT: timing code not setup to handle indefinite durations
# (e.g. 'self.forever')
self.run(duration)
return
else:
self.timer.call_and_time(duration)
def __call__(self, load=True, setup_options=None,
instantiate_options=True, verbose=False):
"""
Optionally regenerate the simulation specification and
instantiate the model (when load is set to True). If
setup_options is not None, a new specification will be created
with the given options - see the docstring of Model.setup for
more information.
If load is set to True, the model is instantiated using | |
# -*- coding: utf-8 -*-
import codecs
import os, sys
import copy
import random
import json
import math
import decimal
import datetime
import threading
import exceptions
import time
import base64
import md5
from gevent import socket
import urllib, urllib2, urlparse
from socket import error
import errno
import subprocess
from multiprocessing import Process, Queue, current_process, freeze_support
import shutil
import re
#from PIL import Image
import StringIO
import cgi
import uuid
import copy
from contextlib import contextmanager
from gevent import pywsgi
import gevent
import gevent.fileobject
from gevent.local import local
from gevent.subprocess import check_output
import pymongo
import gridfs
from bson.objectid import ObjectId
try:
from geventhttpclient import HTTPClient, URL
except:
print('geventhttpclient import error')
try:
import geventwebsocket
from geventwebsocket.handler import WebSocketHandler
except:
print('geventwebsocket import error')
# try:
# from pysimplesoap.server import SoapDispatcher, WSGISOAPHandler
# from pysimplesoap.client import SoapClient, SoapFault
# except:
# print('pysimplesoap import error')
try:
from PIL import Image
except :
print('PIL import error')
try:
from lxml import etree
except:
print('lxml import error')
try:
import czml
except:
print('czml import error')
try:
from py3o.template import Template
except:
print('import py3o.template error')
import werkzeug
from werkzeug.wrappers import Request, BaseResponse
from werkzeug.local import LocalProxy
from werkzeug.contrib.sessions import FilesystemSessionStore
from werkzeug.utils import dump_cookie, parse_cookie
from werkzeug.routing import Map, Rule, BaseConverter, ValidationError, HTTPException
from sessions import MongoClient, MongodbSessionStore
import configobj
import db_util
import bayes_util
from module_locator import module_path, dec, dec1, enc, enc1
ENCODING = None
ENCODING1 = None
STATICRESOURCE_DIR = None
STATICRESOURCE_CSS_DIR = None
STATICRESOURCE_JS_DIR = None
STATICRESOURCE_IMG_DIR = None
UPLOAD_PHOTOS_DIR = None
UPLOAD_VOICE_DIR = None
gConfig = None
gStaticCache = {}
gTileCache = {}
#deprecated
gSatTileCache = {}
gMapTileCache = {}
gTerrainCache = {}
gGreenlets = {}
gClusterProcess = {}
gLoginToken = {}
gSecurityConfig = {}
gWebSocketsMap = {}
gTcpReconnectCounter = 0
gTcpSock = None
gHttpClient = {}
gFormTemplate = []
_SPECIAL = re.escape('()<>@,;:\\"/[]?={} \t')
_RE_SPECIAL = re.compile('[%s]' % _SPECIAL)
_QSTR = '"(?:\\\\.|[^"])*"' # Quoted string
_VALUE = '(?:[^%s]+|%s)' % (_SPECIAL, _QSTR) # Save or quoted string
_OPTION = '(?:;|^)\s*([^%s]+)\s*=\s*(%s)' % (_SPECIAL, _VALUE)
_RE_OPTION = re.compile(_OPTION) # key=value part of an Content-Type like header
gSessionStore = None
gRequests = None
gRequest = None
gProxyRequest = None
gJoinableQueue = None
class BooleanConverter(BaseConverter):
def __init__(self, url_map, randomify=False):
super(BooleanConverter, self).__init__(url_map)
self.regex = '(?:true|false)'
def to_python(self, value):
return value == 'true'
def to_url(self, value):
return value and 'true' or 'false'
class Py3oItem(object):
pass
gUrlMap = Map([
Rule('/', endpoint='firstaccess'),
Rule('/websocket', endpoint='handle_websocket'),
#Rule('/auth_check/<username>/isnew/<bool:isnew>', endpoint='saveuser'),
Rule('/get_salt', endpoint='get_salt'),
Rule('/auth_check/<username>', endpoint='auth_check'),
Rule('/auth_check', endpoint='auth_check'),
Rule('/register/<username>/<password>', endpoint='user_add'),
Rule('/register/<username>', endpoint='user_add'),
Rule('/register', endpoint='user_add'),
Rule('/unregister/<username>', endpoint='user_delete'),
Rule('/unregister', endpoint='user_delete'),
Rule('/login/<username>/<password>', endpoint='login'),
Rule('/login/<username>', endpoint='login'),
Rule('/login', endpoint='login'),
Rule('/logout', endpoint='logout'),
Rule('/reset_password/<username>/<password>', endpoint='reset_password'),
Rule('/reset_password/<username>', endpoint='reset_password'),
Rule('/reset_password', endpoint='reset_password'),
Rule('/user_check', endpoint='user_check'),
Rule('/user_query', endpoint='user_query'),
Rule('/user_update', endpoint='user_update'),
Rule('/function_add', endpoint='function_add'),
Rule('/function_query', endpoint='function_query'),
Rule('/function_update', endpoint='function_update'),
Rule('/function_delete', endpoint='function_delete'),
Rule('/role_add', endpoint='role_add'),
Rule('/role_update', endpoint='role_update'),
Rule('/role_query', endpoint='role_query'),
Rule('/role_delete', endpoint='role_delete'),
Rule('/role_template_save', endpoint='role_template_save'),
Rule('/role_template_get', endpoint='role_template_get'),
Rule('/workflow_add', endpoint='workflow_add'),
Rule('/workflow_query', endpoint='workflow_query'),
Rule('/workflow_query/<_id>', endpoint='workflow_query'),
Rule('/workflow_update', endpoint='workflow_update'),
Rule('/workflow_delete', endpoint='workflow_delete'),
Rule('/workflow_delete/<_id>', endpoint='workflow_delete'),
Rule('/workflow_template_add', endpoint='workflow_template_add'),
Rule('/workflow_template_query', endpoint='workflow_template_query'),
Rule('/workflow_template_query/<_id>', endpoint='workflow_template_query'),
Rule('/workflow_template_update', endpoint='workflow_template_update'),
Rule('/workflow_template_delete', endpoint='workflow_template_delete'),
Rule('/workflow_template_delete/<_id>', endpoint='workflow_template_delete'),
Rule('/workflow_form_fill', endpoint='workflow_form_fill'),
Rule('/workflow_form_blank', endpoint='workflow_form_blank'),
Rule('/user_add', endpoint='user_add'),
Rule('/user_get', endpoint='user_get'),
Rule('/all_user_get', endpoint='all_user_get'),
Rule('/user_remove', endpoint='user_remove'),
Rule('/group_add', endpoint='group_add'),
Rule('/group_get', endpoint='group_get'),
Rule('/group_update', endpoint='group_update'),
Rule('/group_remove', endpoint='group_remove'),
Rule('/user_group_get', endpoint='user_group_get'),
Rule('/user_contact_get', endpoint='user_contact_get'),
Rule('/chat_broadcast', endpoint='chat_broadcast'),
Rule('/chat_log_query', endpoint='chat_log_query'),
Rule('/chat_log_remove', endpoint='chat_log_remove'),
Rule('/gridfs/upload', endpoint='gridfs_upload'),
Rule('/gridfs/get', endpoint='gridfs_get'),
Rule('/gridfs/get/<_id>', endpoint='gridfs_get'),
Rule('/gridfs/get/<_id>/thumbnail/<width>/<height>', endpoint='gridfs_get'),
Rule('/gridfs/query/<width>/<height>', endpoint='gridfs_query'),
Rule('/gridfs/query/<width>/<height>/<limit>', endpoint='gridfs_query'),
Rule('/gridfs/query/<width>/<height>/<limit>/<skip>', endpoint='gridfs_query'),
Rule('/gridfs/delete', endpoint='gridfs_delete'),
Rule('/gridfs/delete/<_id>', endpoint='gridfs_delete'),
Rule('/antibird/get_equip_list', endpoint='get_equip_list'),
Rule('/antibird/get_latest_records_by_imei', endpoint='get_latest_records_by_imei'),
Rule('/antibird/equip_tower_mapping', endpoint='equip_tower_mapping'),
Rule('/state_examination/save', endpoint='state_examination_save'),
Rule('/state_examination/query', endpoint='state_examination_query'),
Rule('/state_examination/query/line_names', endpoint='state_examination_query_line_names'),
Rule('/state_examination/delete', endpoint='state_examination_delete'),
Rule('/state_examination/delete/<_id>', endpoint='state_examination_delete'),
Rule('/bayesian/query/graphiz', endpoint='bayesian_query_graphiz'),
Rule('/bayesian/query/node', endpoint='bayesian_query_node'),
Rule('/bayesian/query/predict', endpoint='bayesian_query_predict'),
Rule('/bayesian/save/node', endpoint='bayesian_save_node'),
Rule('/bayesian/delete/node', endpoint='bayesian_delete_node'),
Rule('/bayesian/delete/node/<_id>', endpoint='bayesian_delete_node'),
Rule('/bayesian/query/domains_range', endpoint='bayesian_query_domains_range'),
Rule('/bayesian/save/domains_range', endpoint='bayesian_save_domains_range'),
Rule('/bayesian/delete/domains_range', endpoint='bayesian_delete_domains_range'),
Rule('/bayesian/delete/domains_range/<_id>', endpoint='bayesian_delete_domains_range'),
Rule('/bayesian/reset/unit', endpoint='bayesian_reset_unit'),
], converters={'bool': BooleanConverter})
@contextmanager
def session_manager(environ):
global gRequests, gRequest
if gRequests is None:
gRequests = local()
gRequest = LocalProxy(lambda: gRequests.request)
gRequests.request = Request(environ)
yield
gRequests.request = None
def init_global():
global ENCODING, ENCODING1, STATICRESOURCE_DIR, STATICRESOURCE_CSS_DIR, STATICRESOURCE_JS_DIR, STATICRESOURCE_IMG_DIR, UPLOAD_PHOTOS_DIR, UPLOAD_VOICE_DIR
global gConfig, gStaticCache, gGreenlets, gClusterProcess, gSecurityConfig, gJoinableQueue
ENCODING = 'utf-8'
ENCODING1 = 'gb18030'
STATICRESOURCE_DIR = os.path.join(module_path(), 'static')
#CONFIGFILE = os.path.join(module_path(), 'ogc-config.ini')
#gConfig = configobj.ConfigObj(db_util.CONFIGFILE, encoding='UTF8')
gConfig = db_util.gConfig
if gConfig['web'].has_key('webroot') and len(gConfig['web']['webroot'])>0:
if os.path.exists(gConfig['web']['webroot']):
STATICRESOURCE_DIR = gConfig['web']['webroot']
STATICRESOURCE_CSS_DIR = os.path.join(STATICRESOURCE_DIR, 'css')
STATICRESOURCE_JS_DIR = os.path.join(STATICRESOURCE_DIR, 'js')
STATICRESOURCE_IMG_DIR = os.path.join(STATICRESOURCE_DIR, 'img')
UPLOAD_PHOTOS_DIR = os.path.join(STATICRESOURCE_DIR,'photos', 'upload')
UPLOAD_VOICE_DIR = os.path.join(STATICRESOURCE_DIR,'voice')
if gConfig['wsgi']['application'].lower() == 'authorize_platform':
gSecurityConfig = db_util.mongo_find_one(gConfig['authorize_platform']['mongodb']['database'],
gConfig['authorize_platform']['mongodb']['collection_security_config'],
{},
'authorize_platform'
)
if gSecurityConfig is None:
gSecurityConfig = {}
if gConfig['wsgi']['application'].lower() in ['pay_platform', 'fake_gateway_alipay']:
gJoinableQueue = gevent.queue.JoinableQueue(maxsize=int(gConfig['pay_platform']['queue']['max_queue_size']))
l = db_util.mongo_find(gConfig['pay_platform']['mongodb']['database'],
gConfig['pay_platform']['mongodb']['collection_config'],
{},
0,
'pay_platform'
)
for i in l:
del i['_id']
key = i.keys()[0]
gSecurityConfig[key] = i[key]
if len(l) == 0:
gSecurityConfig = {}
if gConfig['wsgi']['application'].lower() == 'chat_platform':
gJoinableQueue = gevent.queue.JoinableQueue(maxsize=int(gConfig['chat_platform']['queue']['max_queue_size']))
def handle_static(environ, aUrl):
global ENCODING, gConfig
global STATICRESOURCE_DIR, STATICRESOURCE_JS_DIR, STATICRESOURCE_CSS_DIR, STATICRESOURCE_IMG_DIR, UPLOAD_VOICE_DIR
statuscode, contenttype, body = '404 Not Found', 'text/plain;charset=' + ENCODING, '404 Not Found'
surl = dec(aUrl)#.replace('//', '').replace('/', os.path.sep)
if surl[0:2] == '//':
surl = surl[2:]
if surl[0] == '/':
surl = surl[1:]
p = os.path.join(STATICRESOURCE_DIR , surl)
isBin = False
ext = os.path.splitext(p)[1]
if '.' in surl:
ext = surl[surl.rindex('.'):]
else:
ext = os.path.splitext(p)[1]
print('handle_static p=%s' % p)
if len(ext)>0:
if gConfig['mime_type'].has_key(ext):
if 'image/' in gConfig['mime_type'][ext]:
isBin = True
if '/octet-stream' in gConfig['mime_type'][ext]:
isBin = True
if '/pdf' in gConfig['mime_type'][ext]:
isBin = True
contenttype = gConfig['mime_type'][ext]
if ext == '.js':
if not os.path.exists(p):
p = os.path.join(STATICRESOURCE_JS_DIR, aUrl[aUrl.rindex('/')+1:])
elif ext == '.css':
if not os.path.exists(p):
p = os.path.join(STATICRESOURCE_CSS_DIR, aUrl[aUrl.rindex('/')+1:])
elif 'image/' in gConfig['mime_type'][ext]:
if not os.path.exists(p):
p = os.path.abspath(os.path.join(STATICRESOURCE_IMG_DIR, aUrl[aUrl.rindex('/')+1:]))
if not os.path.exists(p):
p = os.path.join(STATICRESOURCE_DIR , aUrl)
#p = os.path.abspath(p)
p = dec(p)
if os.path.exists(p):
statuscode = '200 OK'
mode = 'r'
if isBin:
mode = 'rb'
with open(p, mode) as f:
f1 = gevent.fileobject.FileObjectThread(f, mode)
body = f1.read()
else:
statuscode = '404 Not Found'
body = '404 Not Found'
else:
contenttype = 'application/octet-stream'
if os.path.exists(p):
statuscode = '200 OK'
with open(p, 'rb') as f:
f1 = gevent.fileobject.FileObjectThread(f, 'rb')
body = f1.read()
else:
if ext == '.3gp':
id = surl[surl.rindex('/') + 1:]
id = id.replace('.3gp', '')
fn = get_voice_file_latest(id)
if fn:
with open(os.path.join(UPLOAD_VOICE_DIR, fn), 'rb') as f:
f1 = gevent.fileobject.FileObjectThread(f, 'rb')
body = f1.read()
statuscode = '200 OK'
else:
contenttype = 'text/plain;charset=' + ENCODING
statuscode = '500 Internal Server Error'
body = '500 Internal Server Error'
headers = {}
headers['Content-Type'] = str(contenttype)
return statuscode, headers, body
def handle_wfs_GetCapabilities(params):
headers = {}
headers['Content-Type'] = 'text/xml;charset=' + ENCODING
s = create_wfs_GetCapabilities()
return '200 OK', headers, s
def handle_wfs_GetFeature(params):
headers = {}
headers['Content-Type'] = 'text/xml;charset=' + ENCODING
s = create_wfs_GetFeature()
return '200 OK', headers, s
def create_wfs_GetCapabilities():
namespace = {'ows':"http://www.opengis.net/ows",
'ogc':"http://www.opengis.net/ogc",
'wfs':"http://www.opengis.net/wfs",
'gml':"http://www.opengis.net/gml",
'xlink':"http://www.w3.org/1999/xlink",
'xsi':"http://www.w3.org/2001/XMLSchema-instance",
'schemaLocation':"http://www.opengis.net/wfs/1.1.0/WFS.xsd",
'my':"http://localhost:88/my"
}
wfs = '{%s}' % namespace['wfs']
ogc = '{%s}' % namespace['ogc']
ows = '{%s}' % namespace['ows']
xlink = '{%s}' % namespace['xlink']
root = etree.Element(wfs+"WFS_Capabilites", xmlns="http://www.opengis.net/wfs", nsmap=namespace, version="1.1.0", updateSequence="0")
#ServiceIdentification
ServiceIdentification = etree.SubElement(root, ows + "ServiceIdentification")
Title = etree.SubElement(ServiceIdentification, ows + "Title").text = gConfig['wfs']['ServiceIdentification_Title']
ServiceType = etree.SubElement(ServiceIdentification, ows + "ServiceType").text = 'WFS'
ServiceTypeVersion = etree.SubElement(ServiceIdentification, ows + "ServiceTypeVersion").text = '1.1.0'
#OperationsMetadata
OperationsMetadata = etree.SubElement(root, ows + "OperationsMetadata")
Operation= etree.SubElement(OperationsMetadata, ows + "Operation", name="GetCapabilities")
DCP= etree.SubElement(Operation, ows + "DCP")
HTTP= etree.SubElement(DCP, ows + "HTTP")
href = xlink + 'href'
Get= etree.SubElement(HTTP, ows + "Get", {href:gConfig['wfs']['url']})
#Constraint= etree.SubElement(Get, ows + "Constraint", name="GetEncoding")
#AllowedValues= etree.SubElement(Constraint, ows + "AllowedValues")
#Value= etree.SubElement(AllowedValues, ows + "Value").text = 'KVP'
#Operation= etree.SubElement(OperationsMetadata, ows + "Operation", name="GetTile")
#DCP= etree.SubElement(Operation, ows + "DCP")
#HTTP= etree.SubElement(DCP, ows + "HTTP")
#Get= etree.SubElement(HTTP, ows + "Get", {href:gConfig['wmts']['url']})
Parameter = etree.SubElement(Operation, ows + "Parameter", name="AcceptVersions")
Value = etree.SubElement(Parameter, ows + "Value").text = "1.1.0"
Value = etree.SubElement(Parameter, ows + "Value").text = "1.0.0"
Parameter = etree.SubElement(Operation, ows + "Parameter", name="AcceptFormats")
Value = etree.SubElement(Parameter, ows + "Value").text = "text/xml"
Parameter = etree.SubElement(Operation, ows + "Parameter", name="Sections")
Value = etree.SubElement(Parameter, ows + "Value").text = "ServiceIdentification"
Value = etree.SubElement(Parameter, ows + "Value").text = "OperationsMetadata"
Value = etree.SubElement(Parameter, ows + "Value").text = "FeatureTypeList"
Value = etree.SubElement(Parameter, ows + "Value").text = "ServesGMLObjectTypeList"
Value = etree.SubElement(Parameter, ows + "Value").text = "SupportsGMLObjectTypeList"
Value = etree.SubElement(Parameter, ows + "Value").text = "Filter_Capabilities"
Operation= etree.SubElement(OperationsMetadata, ows + "Operation", name="DescribeFeatureType")
DCP= etree.SubElement(Operation, ows + "DCP")
HTTP= etree.SubElement(DCP, ows + "HTTP")
Get= etree.SubElement(HTTP, ows + "Get", {href:gConfig['wfs']['url']})#+'/wfs.cgi?'})
Post= etree.SubElement(HTTP, ows + "Post", {href:gConfig['wfs']['url']})#+'/wfs.cgi'})
Parameter = etree.SubElement(Operation, ows + "Parameter", name="outputFormat")
Value = etree.SubElement(Parameter, ows + "Value").text = "text/xml; subtype=gml/3.1.1"
Operation= etree.SubElement(OperationsMetadata, ows + "Operation", name="GetFeature")
DCP= etree.SubElement(Operation, ows + "DCP")
HTTP= etree.SubElement(DCP, ows + "HTTP")
Get= etree.SubElement(HTTP, ows + "Get", {href:gConfig['wfs']['url']})#+'/wfs.cgi?'})
Post= etree.SubElement(HTTP, ows + "Post", {href:gConfig['wfs']['url']})#+'/wfs.cgi'})
Parameter = etree.SubElement(Operation, ows + "Parameter", name="resultType")
Value = etree.SubElement(Parameter, ows + "Value").text = "results"
Value = etree.SubElement(Parameter, ows + "Value").text = "hits"
Parameter = etree.SubElement(Operation, ows + "Parameter", name="outputFormat")
Value = etree.SubElement(Parameter, ows + "Value").text = "text/xml; subtype=gml/3.1.1"
Operation= etree.SubElement(OperationsMetadata, | |
[u'h'] ,
u'婒' : [u't'] ,
u'㻕' : [u'j'] ,
u'齔' : [u'c'] ,
u'䏗' : [u'k', u'g'] ,
u'睤' : [u'b'] ,
u'寧' : [u'z', u'n'] ,
u'鳩' : [u'q', u'j', u'z'] ,
u'佴' : [u'm', u'e', u'n'] ,
u'遶' : [u'r'] ,
u'瓹' : [u'j'] ,
u'梆' : [u'b'] ,
u'刍' : [u'c'] ,
u'霏' : [u'f'] ,
u'㾔' : [u'l'] ,
u'䂖' : [u's'] ,
u'薘' : [u'd'] ,
u'漟' : [u't'] ,
u'墦' : [u'f'] ,
u'鶨' : [u'd'] ,
u'䜯' : [u'j'] ,
u'蠱' : [u'g'] ,
u'疸' : [u'd'] ,
u'弿' : [u'j'] ,
u'恁' : [u'r'] ,
u'車' : [u'c', u'j'] ,
u'㝏' : [u'j'] ,
u'硑' : [u'p'] ,
u'曚' : [u'm'] ,
u'偡' : [u'z'] ,
u'镣' : [u'l'] ,
u'绪' : [u'x'] ,
u'菬' : [u'z'] ,
u'浳' : [u'y'] ,
u'固' : [u'g'] ,
u'鯼' : [u'z'] ,
u'辉' : [u'h'] ,
u'瀌' : [u'b'] ,
u'咏' : [u'y'] ,
u'塚' : [u'z'] ,
u'茖' : [u'g'] ,
u'枙' : [u'e'] ,
u'䠜' : [u'r'] ,
u'鬦' : [u'd'] ,
u'義' : [u'y'] ,
u'躳' : [u'g'] ,
u'猶' : [u'y'] ,
u'垹' : [u'b'] ,
u'艀' : [u'f'] ,
u'曃' : [u't', u'd'] ,
u'䭆' : [u'n'] ,
u'驐' : [u'd'] ,
u'结' : [u'j'] ,
u'觝' : [u'd'] ,
u'牠' : [u't'] ,
u'団' : [u't'] ,
u'懭' : [u'k'] ,
u'䩰' : [u'x'] ,
u'镺' : [u'a'] ,
u'秽' : [u'h'] ,
u'鈋' : [u'e'] ,
u'涊' : [u'r', u'l', u'n'] ,
u'嬑' : [u'y'] ,
u'㪐' : [u'l'] ,
u'樛' : [u'j'] ,
u'钤' : [u'q'] ,
u'䈫' : [u'n'] ,
u'嶪' : [u'y'] ,
u'贵' : [u'g'] ,
u'沴' : [u'l'] ,
u'娻' : [u'd'] ,
u'㖺' : [u'c'] ,
u'故' : [u'g'] ,
u'䓄' : [u'y'] ,
u'韎' : [u'm'] ,
u'絕' : [u'j'] ,
u'峔' : [u'm'] ,
u'豟' : [u'e'] ,
u'濞' : [u'p', u'b'] ,
u'啥' : [u's'] ,
u'㓤' : [u'q', u'j'] ,
u'摯' : [u'z'] ,
u'䟮' : [u'f'] ,
u'雸' : [u'a'] ,
u'籿' : [u'c'] ,
u'忾' : [u'k'] ,
u'漈' : [u'j'] ,
u'压' : [u'y'] ,
u'邍' : [u'y'] ,
u'䜘' : [u'j'] ,
u'萚' : [u't'] ,
u'條' : [u't'] ,
u'弨' : [u'c'] ,
u'鰪' : [u'g'] ,
u'薯' : [u's'] ,
u'㜸' : [u'n'] ,
u'琺' : [u'f'] ,
u'墽' : [u'q'] ,
u'鶿' : [u'c', u'z'] ,
u'䱊' : [u'm'] ,
u'酌' : [u'z'] ,
u'痏' : [u'w'] ,
u'楜' : [u'h'] ,
u'鋡' : [u'h'] ,
u'㱪' : [u'm'] ,
u'䅬' : [u'a', u'y'] ,
u'虮' : [u'q', u'j'] ,
u'櫱' : [u'n'] ,
u'奼' : [u'c'] ,
u'鹾' : [u'c'] ,
u'䐅' : [u'y'] ,
u'複' : [u'f'] ,
u'皎' : [u'j'] ,
u'尕' : [u'g'] ,
u'愗' : [u'm'] ,
u'亞' : [u'y'] ,
u'鎠' : [u'g'] ,
u'礧' : [u'l'] ,
u'殰' : [u'd'] ,
u'儷' : [u'l'] ,
u'阹' : [u'q'] ,
u'䏀' : [u'l'] ,
u'胂' : [u's', u'c'] ,
u'湉' : [u't'] ,
u'寐' : [u'm'] ,
u'飒' : [u's'] ,
u'䙙' : [u'c'] ,
u'譛' : [u'z'] ,
u'烢' : [u'z'] ,
u'幩' : [u'f'] ,
u'捫' : [u'm'] ,
u'䣲' : [u'f'] ,
u'跴' : [u'c'] ,
u'㙹' : [u'm'] ,
u'筻' : [u'g'] ,
u'薁' : [u'y'] ,
u'戄' : [u'j'] ,
u'稔' : [u'r'] ,
u'褞' : [u'y'] ,
u'疡' : [u'y'] ,
u'判' : [u'p'] ,
u'蒫' : [u'c'] ,
u'愮' : [u'y'] ,
u'䶱' : [u't'] ,
u'鲻' : [u'z'] ,
u'社' : [u's'] ,
u'衈' : [u'e'] ,
u'瓋' : [u'z'] ,
u'兎' : [u't'] ,
u'恘' : [u'q'] ,
u'䳛' : [u'y'] ,
u'鏥' : [u'x'] ,
u'硨' : [u'c'] ,
u'轲' : [u'k'] ,
u'毵' : [u's'] ,
u'偸' : [u't'] ,
u'㳻' : [u'z'] ,
u'考' : [u'k'] ,
u'枂' : [u'y'] ,
u'䤉' : [u'r', u'm'] ,
u'頓' : [u'z', u'd'] ,
u'羒' : [u'f'] ,
u'躜' : [u'z'] ,
u'瀣' : [u'x'] ,
u'垢' : [u'g'] ,
u'㤩' : [u'k'] ,
u'蜭' : [u'h'] ,
u'暬' : [u'x'] ,
u'鼽' : [u'q'] ,
u'纼' : [u'y', u'z'] ,
u'跆' : [u't'] ,
u'睍' : [u'x'] ,
u'囌' : [u's'] ,
u'虗' : [u'x'] ,
u'旖' : [u'y'] ,
u'佝' : [u'k', u'g'] ,
u'鹧' : [u'z'] ,
u'緦' : [u's'] ,
u'賰' : [u's'] ,
u'嗶' : [u'b'] ,
u'㽽' : [u'g'] ,
u'攀' : [u'p'] ,
u'䆃' : [u'd'] ,
u'芅' : [u'y'] ,
u'紐' : [u'n'] ,
u'妓' : [u'j'] ,
u'骕' : [u's'] ,
u'唠' : [u'l'] ,
u'阢' : [u'w'] ,
u'犥' : [u'p'] ,
u'渲' : [u'x'] ,
u'䪵' : [u'y'] ,
u'鞷' : [u'g'] ,
u'荄' : [u'g'] ,
u'濇' : [u's'] ,
u'幒' : [u'z'] ,
u'魔' : [u'm'] ,
u'䟗' : [u's'] ,
u'胙' : [u'z'] ,
u'㙢' : [u'm'] ,
u'忧' : [u'y'] ,
u'飩' : [u'z', u't'] ,
u'䭴' : [u'y', u'h'] ,
u'㟷' : [u'd'] ,
u'鑶' : [u'c'] ,
u'烹' : [u'p'] ,
u'沆' : [u'h', u'k'] ,
u'嘍' : [u'l'] ,
u'錏' : [u'y'] ,
u'㮔' : [u't'] ,
u'䒖' : [u'x'] ,
u'膘' : [u'p', u'b'] ,
u'岦' : [u'l'] ,
u'馨' : [u'x'] ,
u'䌯' : [u'q', u'g'] ,
u'谱' : [u'p'] ,
u'熸' : [u'j'] ,
u'嬿' : [u'y'] ,
u'摁' : [u'e'] ,
u'諊' : [u'j'] ,
u'籑' : [u'x', u'z'] ,
u'拚' : [u'p', u'f'] ,
u'呡' : [u'w'] ,
u'酣' : [u'h'] ,
u'㧨' : [u'q'] ,
u'竪' : [u's'] ,
u'蟬' : [u'c', u's'] ,
u'楳' : [u'm'] ,
u'勺' : [u's', u'z'] ,
u'讉' : [u'y'] ,
u'琌' : [u'l'] ,
u'傏' : [u't'] ,
u'㴒' : [u'y'] ,
u'蜖' : [u'h'] ,
u'掙' : [u'z'] ,
u'䰜' : [u'l'] ,
u'鼦' : [u'd'] ,
u'箩' : [u'l'] ,
u'誳' : [u'q'] ,
u'眶' : [u'k'] ,
u'厹' : [u'q'] ,
u'㰼' : [u'q'] ,
u'虀' : [u'j'] ,
u'拃' : [u'z'] ,
u'但' : [u'd'] ,
u'鹐' : [u'q'] ,
u'距' : [u'j'] ,
u'癠' : [u'j'] ,
u'勣' : [u'j'] ,
u'㽦' : [u'x'] ,
u'旭' : [u'x'] ,
u'买' : [u'm'] ,
u'酺' : [u'p'] ,
u'緽' : [u'c'] ,
u'䜁' : [u'x'] ,
u'阋' : [u'x'] ,
u'弑' : [u's'] ,
u'㺐' : [u'z'] ,
u'減' : [u'j'] ,
u'䆚' : [u't'] ,
u'䘫' : [u'r', u'n'] ,
u'妪' : [u'y'] ,
u'褵' : [u'l'] ,
u'梴' : [u'c'] ,
u'帻' : [u'z'] ,
u'慅' : [u's', u'c'] ,
u'鏎' : [u'b'] ,
u'祕' : [u'm'] ,
u'壔' : [u'd'] ,
u'衟' : [u'd'] ,
u'毞' : [u'p'] ,
u'入' : [u'r'] ,
u'息' : [u'x'] ,
u'䏮' : [u'x'] ,
u'鋸' : [u'j'] ,
u'硿' : [u'k'] ,
u'対' : [u'd'] ,
u'欈' : [u'w'] ,
u'型' : [u'x'] ,
u'钍' : [u't'] ,
u'䌘' : [u'm'] ,
u'耚' : [u'p'] ,
u'沝' : [u'z'] ,
u'嬨' : [u'c'] ,
u'頪' : [u'l'] ,
u'䒭' : [u'd'] ,
u'膯' : [u't'] ,
u'瀺' : [u'c'] ,
u'岽' : [u'd'] ,
u'馿' : [u'l'] ,
u'䡊' : [u'p', u'f'] ,
u'镌' : [u'j'] ,
u'燏' : [u'y'] ,
u'浜' : [u'b'] ,
u'䧟' : [u'x'] ,
u'雡' : [u'l'] ,
u'㡪' : [u'n'] ,
u'艮' : [u'h', u'g'] ,
u'滱' : [u'k'] ,
u'嵼' : [u'c'] ,
u'驾' : [u'j'] ,
u'䀅' : [u's', u'z'] ,
u'贇' : [u'y'] ,
u'犎' : [u'f'] ,
u'堕' : [u'h', u'd'] ,
u'攗' : [u'm'] ,
u'䪞' : [u'z'] ,
u'鞠' : [u'q', u'j'] ,
u'紧' : [u'j'] ,
u'澰' : [u'l'] ,
u'唷' : [u'y'] ,
u'鈹' : [u'p'] ,
u'㪾' : [u'l', u'g'] ,
u'蓂' : [u'm'] ,
u'橉' : [u'l'] ,
u'忐' : [u't'] ,
u'鳒' : [u'j'] ,
u'䉙' : [u'y'] ,
u'轛' : [u'd'] ,
u'㟠' : [u'g'] ,
u'瓢' : [u'p'] ,
u'婩' : [u'y'] ,
u'杫' : [u's'] ,
u'䳲' : [u'z'] ,
u'觴' : [u's'] ,
u'罻' : [u'y', u'w'] ,
u'要' : [u'y'] ,
u'渄' : [u'f'] ,
u'䊇' : [u'b'] ,
u'醑' : [u'x'] ,
u'瘔' : [u'k'] ,
u'蔞' : [u'j', u'l'] ,
u'禡' : [u'm'] ,
u'帤' : [u'r'] ,
u'被' : [u'p', u'b'] ,
u'洮' : [u'y', u't', u'd'] ,
u'䆱' : [u't'] ,
u'邻' : [u'l'] ,
u'甾' : [u'z'] ,
u'葈' : [u'x'] ,
u'磋' : [u'c'] ,
u'嵎' : [u'y'] ,
u'汘' : [u'q'] ,
u'䃛' : [u'q', u'l'] ,
u'瑨' : [u'j'] ,
u'荲' : [u'c'] ,
u'柵' : [u'c', u'z', u's'] ,
u'屸' : [u'l'] ,
u'调' : [u'z', u'd', u't'] ,
u'殂' : [u'c'] ,
u'䔉' : [u'l'] ,
u'鐓' : [u'd'] ,
u'玒' : [u'h'] ,
u'芜' : [u'w'] ,
u'簣' : [u'k'] ,
u'客' : [u'k'] ,
u'㔩' : [u'e'] ,
u'謭' : [u'j'] ,
u'檬' : [u'm'] ,
u'䐳' : [u'y'] ,
u'錽' : [u'w'] ,
u'犼' : [u'h'] ,
u'臆' : [u'y'] ,
u'筍' : [u's'] ,
u'嫌' : [u'x'] ,
u'詗' : [u'x'] ,
u'槖' : [u't'] ,
u'䍝' : [u'z', u't'] ,
u'鉧' : [u'm'] ,
u'燦' : [u'c'] ,
u'胰' : [u'y'] ,
u'穷' : [u'q'] ,
u'姶' : [u'y'] ,
u'椀' : [u'w'] ,
u'䶃' : [u'h'] ,
u'躅' : [u'z'] ,
u'㰎' : [u'z'] ,
u'焐' : [u'w'] ,
u'喓' : [u'y'] ,
u'隕' : [u'y'] ,
u'夠' : [u'g'] ,
u'㶣' : [u'c'] ,
u'騢' : [u'x'] ,
u'纥' : [u'h', u'g'] ,
u'戲' : [u'x'] ,
u'䚵' : [u't'] ,
u'鮷' : [u't'] ,
u'轄' : [u'x', u'h'] ,
u'揇' : [u'n'] ,
u'剒' : [u'c'] ,
u'㛕' : [u'y'] ,
u'靔' : [u't'] ,
u'䯗' : [u'b'] ,
u'賙' : [u'z'] ,
u'㩢' : [u'm'] ,
u'罤' : [u't'] ,
u'叧' : [u'g'] ,
u'铩' : [u's'] ,
u'㯷' : [u'p'] ,
u'顶' : [u'd'] ,
u'糹' : [u's'] ,
u'悆' : [u'y'] ,
u'娍' : [u'c'] ,
u'鼏' : [u'm'] ,
u'㞔' : [u'y'] ,
u'䢖' : [u'y', u'l'] ,
u'趘' : [u'x'] ,
u'期' : [u'q', u'j'] ,
u'閨' : [u'g'] ,
u'伯' : [u'b'] | |
from rdfframes.dataset.expandable_dataset import ExpandableDataset
from rdfframes.dataset.rdfpredicate import PredicateDirection
__author__ = """
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
"""
class KnowledgeGraph:
"""
High level class represents one or more knowledge graphs (URIs). It
contains a group of convenience functions to initialize datasets before
applying any operations on them
"""
default_graphs = {
'dbpedia': 'http://dbpedia.org',
'dblp': 'http://dblp.l3s.de'
}
default_graph_prefixes = {
'dbpedia': {
'dcterms': 'http://purl.org/dc/terms/',
'rdfs': 'http://www.w3.org/2000/01/rdf-schema#',
'dbpp': 'http://dbpedia.org/property/',
'dbpr': 'http://dbpedia.org/resource/',
'dbpo': 'http://dbpedia.org/ontology/'},
'dblp': {
"xsd": "http://www.w3.org/2001/XMLSchema#",
"swrc": "http://swrc.ontoware.org/ontology#",
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"dc": "http://purl.org/dc/elements/1.1/",
"dcterm": "http://purl.org/dc/terms/",
"dblprc": "http://dblp.l3s.de/d2r/resource/conferences/"}
}
def __init__(self, graph_name=None, graph_uri=None, prefixes=None):
"""
Initializes the object with one graph. Other graphs can be added using
add_graph method.
:param graph_name: graph user defined name
:type graph_name: string
:param graph_uri: graph URI
:type graph_uri: string
:param prefixes: a dictionary of the prefixes to use in this graph. Keys
are the prefixes and values are the URIs.
:type prefixes: a dictionary where the key and value are strings.
"""
self.graphs = {}
self.graph_prefixes = {}
self._add_graph(graph_name, graph_uri, prefixes)
def add_graph(self, graph_name=None, graph_uri=None, prefixes=None):
"""
add more knowledge graph URIs to this KnowledgeGraph instance
:param graph_name: graph user defined name
:type graph_name: string
:param graph_uri: graph URI
:type graph_uri: string
:param prefixes: a dictionary of the prefixes to use in this graph. Keys
are the prefixes and values are the URIs.
:type prefixes: a dictionary where the key and value are strings.
:return: None or raise Exception
"""
self._add_graph(graph_name, graph_uri, prefixes)
def _add_graph(self, graph_name=None, graph_uri=None, prefixes=None):
"""
add a knowledge graph URI or a set of prefixes to this KnowledgeGraph instance.
:param graph_name: graph user defined name
:type graph_name: string
:param graph_uri: graph URI
:type graph_uri: string
:param prefixes: a dictionary of the prefixes to use in this graph. Keys
are the prefixes and values are the URIs.
:type prefixes: a dictionary where the key and value are strings.
:return:
"""
if graph_name is not None:
if len(graph_name) <= 0:
raise Exception("Graph name cannot be an empty string.")
elif graph_uri is not None:
self.graphs[graph_name] = graph_uri
if prefixes is not None:
self.__add_graph_prefixes(graph_name, prefixes)
else:
self.__load_default_prefixes(graph_name)
elif graph_name in KnowledgeGraph.default_graphs:
self.graphs[graph_name] = KnowledgeGraph.default_graphs[graph_name]
if graph_name in KnowledgeGraph.default_graph_prefixes:
self.__add_graph_prefixes(graph_name, KnowledgeGraph.default_graph_prefixes[graph_name])
else:
self.__load_default_prefixes(graph_name)
else:
raise Exception("Graph {} is not one of the default graphs.".format(graph_name))
elif graph_uri is not None:
graph_name = "graph{}".format(len(self.graphs))
self.graphs[graph_name] = graph_uri
if prefixes is not None:
self.__add_graph_prefixes(graph_name, prefixes)
else:
self.__load_default_prefixes(graph_name)
else:
graph_name = ""
if prefixes is not None:
self.__add_graph_prefixes(graph_name, prefixes)
else:
self.__load_default_prefixes(graph_name)
def __add_graph_prefixes(self, graph_name, graph_prefixes):
"""
add prefixes to be used with the graph specified by graph_name
:param graph_name: graph user defined name
:type graph_name: string
:param graph_prefixes: a dictionary of the prefixes to use in this graph. Keys
are the prefixes and values are the prefix URIs.
:type graph_prefixes: a dictionary where the key and value are strings.
:return:
"""
if graph_name not in self.graph_prefixes:
self.graph_prefixes[graph_name] = {}
for prefix, prefix_uri in graph_prefixes.items():
self.graph_prefixes[graph_name][prefix] = prefix_uri
def __load_default_prefixes(self, graph_name):
"""
if no prefixes are given with the graph, load the default list of
prefixes to be used for this graph
:param graph_name: graph name
:type graph_name: string
:return:
"""
default_prefixes = {
"foaf": "http://xmlns.com/foaf/0.1/",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"xsd": "http://www.w3.org/2001/XMLSchema#",
"xtypes": "http://purl.org/xtypes/",
"dcterms": "http://purl.org/dc/terms/",
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"dc": "http://purl.org/dc/elements/1.1/",
}
if graph_name not in KnowledgeGraph.default_graph_prefixes:
self.__add_graph_prefixes(graph_name, default_prefixes)
else:
self.__add_graph_prefixes(graph_name, KnowledgeGraph.default_graph_prefixes[graph_name])
def entities(self, class_name, new_dataset_name='dataset', entities_col_name='entity'):
"""
Retrieves all entities in the predefined graphs whose type is the passed class_name.
Equivalent to the following sparql query:
select distinct ?e
where {
?e rdf:type ?class_class
}
:param class_name: the name of the class
:type class_name: string
:param new_dataset_name: the name of the created dataset holding the entities
:type new_dataset_name: string
:param entities_col_name: entities column name in the returned dataset
:type entities_col_name: string
:return: new dataset with one column of the URIs entities of the class
:rtype: Dataset
"""
for graph in self.graph_prefixes:
if "rdf" not in self.graph_prefixes[graph]:
self.graph_prefixes[graph]['rdf'] = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
return ExpandableDataset(self, new_dataset_name, class_name, class_name) \
.expand(class_name, [
('rdf:type', entities_col_name, False, PredicateDirection.INCOMING)])
def features(self, class_name, new_dataset_name='dataset', features_col_name='feature_uri'):
"""
Retrieves all features in my graphs for all entities whose type is class_name
Equivalent to the following sparql query:
select distinct ?p
where {
?e type ?class.
?e ?p ?o.
}
:param new_dataset_name: the name of the created dataset holding the entities
:type new_dataset_name: string
:param class_name: class that are part of my graphs
:type class_name: string
:param features_col_name: features column name in the returned dataset
:type features_col_name: string
:return: new dataset with two columns mapping each class URI to the
matching features
:rtype: Dataset
"""
for graph in self.graph_prefixes:
if "rdf" not in self.graph_prefixes[graph]:
self.graph_prefixes[graph]['rdf'] = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
return ExpandableDataset(self, new_dataset_name, class_name, class_name)\
.expand(class_name, [('rdf:type', "entity", False, PredicateDirection.INCOMING)])\
.expand("entity", [(features_col_name, "feature_value", False, PredicateDirection.OUTGOING)])
def entities_and_features(self, class_name, features, new_dataset_name='dataset', entities_col_name='entity'):
"""
Retrieves all entities in my graphs whose types are in the passed
classes and their specified features.
When an entity has two values for a specific features, two rows are returned for the same entity.
Equivalent to the following query:
select ?e ?o1 ?o2 ..
where {
?e type ?class
?e ?p1 ?o1
?e ?p2 ?o2
..
}
:param new_dataset_name: the name of the created dataset holding the entities
:type new_dataset_name: string
:param class_name: class that are part of my graphs
:type class_name: string
:param features: a list 2-tuples (feature_uri, new_col_name) where each tuple represents a feature.
:type features: a list of tuples of strings
:param entities_col_name: entities column name in the returned dataset
:type entities_col_name: string
:return: new dataset with at least two columns mapping each class URI to
the matching entities and their features
:rtype: Dataset
"""
for graph in self.graph_prefixes:
if "rdf" not in self.graph_prefixes[graph]:
self.graph_prefixes[graph]['rdf'] = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
ds = ExpandableDataset(self, new_dataset_name, class_name, class_name)\
.expand(class_name, [('rdf:type', entities_col_name, False, PredicateDirection.INCOMING)])
predicate_list = []
for (pred_uri, col_name) in features:
predicate_list.append((pred_uri, col_name, False, PredicateDirection.OUTGOING))
ds.expand(entities_col_name, predicate_list)
return ds
def classes_and_freq(self, new_dataset_name='dataset', classes_col_name='class', frequency_col_name='frequency'):
"""
retrieves all classes in my graph and their number of instances.
Equivalent to the following query:
select ?class count(distinct ?e)
where {
?e type ?class.
}
group by ?class
:param new_dataset_name: the name of the created dataset holding the entities
:type new_dataset_name: string
:param classes_col_name: class column name in the returned dataset
:type classes_col_name: string
:param frequency_col_name: frequency column name in the returned dataset
:type frequency_col_name: string
:return: new dataset with two columns mapping each class URI to the
number of entities of this type
:rtype: Dataset
"""
for graph in self.graph_prefixes:
if "rdf" not in self.graph_prefixes[graph]:
self.graph_prefixes[graph]['rdf'] = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
return ExpandableDataset(self, new_dataset_name, "instance", "instance")\
.expand("instance", [('rdf:type', classes_col_name, False, PredicateDirection.OUTGOING)])\
.group_by([classes_col_name])\
.count('instance', frequency_col_name)
def features_and_freq(self, class_name, new_dataset_name='dataset', features_col_name="feature",
frequency_col_name='frequency'):
"""
retrieves all features of the specified class and their frequency.
equivalent to the following query:
select ?class ?p count(distinct ?e)
where {
?e type ?class.
?e ?p ?o
}
group by ?class, ?p
:param class_name: class that are part of my graphs
:type class_name: string
:param new_dataset_name: the name of the created dataset holding the entities
:type new_dataset_name: string
:param features_col_name: features column name in the returned dataset
:type features_col_name: string
:param frequency_col_name: frequency column name in the returned dataset
:type frequency_col_name: string
:return: new dataset with three columns mapping each class URI to the
matching features and their frequency
:rtype: Dataset
"""
for graph in self.graph_prefixes:
if "rdf" not in self.graph_prefixes[graph]:
self.graph_prefixes[graph]['rdf'] = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
return ExpandableDataset(self, new_dataset_name, class_name, class_name)\
.expand(class_name, [('rdf:type', 'instance', False, PredicateDirection.INCOMING)])\
.expand('instance', [(features_col_name, 'feature_value', False, PredicateDirection.OUTGOING)])\
.group_by([features_col_name]).\
count('feature_value', frequency_col_name, unique=True)
def num_entities(self, class_name, new_dataset_name='dataset', num_entities_col_name='num_entities'):
"""
Counts all entities in the predefined graphs whose type is the passed classes.
Equivalent to the following query:
foreach class in classes:
select ?class count(distinct ?e)
where {
?e type ?class
}
:param class_name: class that are part of my graphs
:type class_name: string
:param new_dataset_name: the name of the created dataset holding the entities
:type new_dataset_name: string
:param num_entities_col_name: count of entities column name in | |
<filename>lauetoolsnn/lauetools/dict_LaueTools.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Dictionary of several parameters concerning Detectors, Materials, Transforms etc
that are used in LaueTools and in LaueToolsGUI module
Lauetools project
April 2019
"""
__author__ = "<NAME>, CRG-IF BM32 @ ESRF"
import copy
import re
import numpy as np
##------------------------------------------------------------------------
# --- ----------- Element-materials library
#-------------------------------------------------------------------------
# label, a,b,c,alpha, beta, gamma in real space lattice, extinction rules label
dict_Materials = {
"ZrO2_1250C": ["ZrO2_1250C", [3.6423954,3.6423954,5.28113,90.0,90,90], "h+k+l=2n"],
"ZrO2_1200C": ["ZrO2_1200C", [3.640666542,3.640666542,5.27828,90,90,90], "h+k+l=2n"],
"alm": ["alm", [3.60500, 9.44500, 9.65300, 90, 90, 90], "no"],
"Ag": ["Ag", [4.085, 4.085, 4.085, 90, 90, 90], "fcc"], # confirmed by IM2NP
"Al2O3": ["Al2O3", [4.785, 4.785, 12.991, 90, 90, 120], "Al2O3"],
"Al2O3_all": ["Al2O3_all", [4.785, 4.785, 12.991, 90, 90, 120], "no"],
"Al": ["Al", [4.05, 4.05, 4.05, 90, 90, 90], "fcc"],
"Al2Cu": ["Al2Cu", [6.063, 6.063, 4.872, 90, 90, 90], "no"],
"AlN": ["AlN", [3.11, 3.11, 4.98, 90.0, 90.0, 120.0], "wurtzite"],
"Fe": ["Fe", [2.856, 2.856, 2.856, 90, 90, 90], "bcc"],
"FeAl": ["FeAl", [5.871, 5.871, 5.871, 90, 90, 90], "fcc"],
"Fe2Ta": ["Fe2Ta", [4.83, 4.83, 0.788, 90, 90, 120], "no"],
"Si": ["Si", [5.4309, 5.4309, 5.4309, 90, 90, 90], "dia"],
"CdHgTe": ["CdHgTe", [6.46678, 6.46678, 6.46678, 90, 90, 90], "dia"],
"CdHgTe_fcc": ["CdHgTe_fcc", [6.46678, 6.46678, 6.46678, 90, 90, 90], "fcc"],
"Ge": ["Ge", [5.6575, 5.6575, 5.6575, 90, 90, 90], "dia"],
"Getest": ["Getest", [5.6575, 5.6575, 5.6574, 90, 90, 90], "dia", ], # c is slightly lower
"Au": ["Au", [4.078, 4.078, 4.078, 90, 90, 90], "fcc"],
"Ge_s": ["Ge_s", [5.6575, 5.6575, 5.6575, 90, 90, 89.5], "dia", ], # Ge a bit strained
"Ge_compressedhydro": ["Ge_compressedhydro", [5.64, 5.64, 5.64, 90, 90, 90.0], "dia", ], # Ge compressed hydrostatically
"GaAs": ["GaAs", [5.65325, 5.65325, 5.65325, 90, 90, 90], "dia"], # AsGa
"GaAs_wurtz": ["GaAs_wurtz", [5.65325, 5.65325, 5.9, 90, 90, 90], "wurtzite"], # AsGa
"ZrUO2_corium": ["ZrUO2_corium", [5.47, 5.47, 5.47, 90, 90, 90], "fcc"],
"Cu": ["Cu", [3.6, 3.6, 3.6, 90, 90, 90], "fcc"],
"Crocidolite": ["Crocidolite", [9.811, 18.013, 5.326, 90, 103.68, 90], "no", ], # a= 9.811, b=18.013, c= 5.326A, beta=103,68°
"Crocidolite_2": ["Crocidolite_2", [9.76, 17.93, 5.35, 90, 103.6, 90], "no", ], # a= 9.811, b=18.013, c= 5.326A, beta=103,68°
"Crocidolite_2_72deg": ["Crocidolite_2", [9.76, 17.93, 5.35, 90, 76.4, 90], "no", ], # a= 9.811, b=18.013, c= 5.326A, beta=103,68°
"Crocidolite_whittaker_1949": ["Crocidolite_whittaker_1949", [9.89, 17.85, 5.31, 90, 180 - 72.5, 90], "no", ],
"CCDL1949": ["CCDL1949", [9.89, 17.85, 5.31, 90, 180 - 72.5, 90], "h+k=2n"],
"Crocidolite_small": ["Crocidolite_small", [9.76 / 3, 17.93 / 3, 5.35 / 3, 90, 103.6, 90], "no", ], # a= 9.811, b=18.013, c= 5.326A, beta=103,68°
"Hematite": ["Hematite", [5.03459, 5.03459, 13.7533, 90, 90, 120], "no", ], # extinction for h+k+l=3n and always les l=2n
"Magnetite_fcc": ["Magnetite_fcc", [8.391, 8.391, 8.391, 90, 90, 90], "fcc", ], # GS 225 fcc extinction
"Magnetite": ["Magnetite", [8.391, 8.391, 8.391, 90, 90, 90], "dia"], # GS 227
"Magnetite_sc": ["Magnetite_sc", [8.391, 8.391, 8.391, 90, 90, 90], "no", ], # no extinction
"NiTi": ["NiTi", [3.5506, 3.5506, 3.5506, 90, 90, 90], "fcc"],
"Ni": ["Ni", [3.5238, 3.5238, 3.5238, 90, 90, 90], "fcc"],
"NiO": ["NiO", [2.96, 2.96, 7.23, 90, 90, 120], "no"],
"dummy": ["dummy", [4.0, 8.0, 2.0, 90, 90, 90], "no"],
"CdTe": ["CdTe", [6.487, 6.487, 6.487, 90, 90, 90], "fcc"],
"CdTeDiagB": ["CdTeDiagB", [4.5721, 7.9191, 11.1993, 90, 90, 90], "no"],
"DarinaMolecule": ["DarinaMolecule", [9.4254, 13.5004, 13.8241, 61.83, 84.555, 75.231], "no", ],
# 'NbSe3' :['NbSe3',[10.006, 3.48, 15.629],'cubic'], # monoclinic structure, angle beta = 109.5 must be input in grain definition
"UO2": ["UO2", [5.47, 5.47, 5.47, 90, 90, 90], "fcc"],
"ZrO2Y2O3": ["ZrO2Y2O3", [5.1378, 5.1378, 5.1378, 90, 90, 90], "fcc"],
"ZrO2": ["ZrO2", [5.1505, 5.2116, 5.3173, 90, 99.23, 90], "VO2_mono"],
"ZrO2_SG": ["ZrO2_SG", [5.1505, 5.2116, 5.3173, 90, 99.23, 90], "14"],
"ZrO2fake1": ["ZrO2fake1", [5.1505, 5.048116, 4.988933, 90, 99.23, 90], "VO2_mono"],
"DIA": ["DIA", [5.0, 5.0, 5.0, 90, 90, 90], "dia", ], # small lattice Diamond like Structure
"DIAs": ["DIAs", [3.56683, 3.56683, 3.56683, 90, 90, 90], "dia", ], # small lattice Diamond material Structure
"FCC": ["FCC", [5.0, 5.0, 5.0, 90, 90, 90], "fcc"], # small lattice fcc Structure
"SC": ["SC", [1.0, 1.0, 1.0, 90, 90, 90], "no"], # 1Ang simple cubic Structure
"SC5": ["SC5", [5.0, 5.0, 5.0, 90, 90, 90], "no"], # 5Ang simple cubic Structure
"SC7": ["SC7", [7.0, 7.0, 7.0, 90, 90, 90], "no"], # 7Ang simple cubic Structure
"W": ["W", [3.1652, 3.1652, 3.1652, 90, 90, 90], "bcc"],
"testindex": ["testindex", [2.0, 1.0, 4.0, 90, 90, 90], "no"],
"testindex2": ["testindex2", [2.0, 1.0, 4.0, 75, 90, 120], "no"],
"Ti": ["Ti", [2.95, 2.95, 4.68, 90, 90, 120], "no"],
"Ti2AlN_w": ["Ti2AlN_w", [2.989, 2.989, 13.624, 90, 90, 120], "wurtzite"],
"Ti2AlN": ["Ti2AlN", [2.989, 2.989, 13.624, 90, 90, 120], "Ti2AlN"],
"Ti_beta": ["Ti_beta", [3.2587, 3.2587, 3.2587, 90, 90, 90], "bcc"],
"Ti_omega": ["Ti_omega", [4.6085, 4.6085, 2.8221, 90, 90, 120], "no"],
"alphaQuartz": ["alphaQuartz", [4.9, 4.9, 5.4, 90, 90, 120], "no"],
"betaQuartznew": ["betaQuartznew", [4.9, 4.9, 6.685, 90, 90, 120], "no"],
"GaN": ["GaN", [3.189, 3.189, 5.185, 90, 90, 120], "wurtzite"],
"GaN_all": ["GaN_all", [3.189, 3.189, 5.185, 90, 90, 120], "no"],
"In": ["In", [3.2517, 3.2517, 4.9459, 90, 90, 90], "h+k+l=2n"],
"In_distorted": ["In_distorted", [3.251700, 3.251133, 4.818608, 89.982926, 90.007213, 95.379102],"h+k+l=2n"],
"InN": ["InN", [3.533, 3.533, 5.693, 90, 90, 120], "wurtzite"],
"In2Bi": ["In2Bi", [5.496, 5.496, 6.585, 90, 90, 120], "194"], # GS 194
"In_epsilon": ["In_epsilon", [3.47, 3.47, 4.49, 90, 90, 90], "139"], #"h+k+l=2n"], # GS
"InGaN": ["InGaN", [(3.533 + 3.189) / 2.0, (3.533 + 3.189) / 2.0, (5.693 + 5.185) / 2.0, 90, 90, 120, ], "wurtzite", ], # wegard's law
"Ti_s": ["Ti_s", [3.0, 3.0, 4.7, 90.5, 89.5, 120.5], "no"], # Ti strained
"inputB": ["inputB", [1.0, 1.0, 1.0, 90, 90, 90], "no"],
"bigpro": ["bigpro", [112.0, 112.0, 136.0, 90, 90, 90], "no"], # big protein
"smallpro": ["smallpro", [20.0, 4.8, 49.0, 90, 90, 90], "no"], # small protein
"Nd45": ["Nd45", [5.4884, 5.4884, 5.4884, 90, 90, 90], "fcc"],
"YAG": ["YAG", [9.2, 9.2, 9.2, 90, 90, 90], "no"],
"Cu6Sn5_tetra": ["Cu6Sn5_tetra", [3.608, 3.608, 5.037, 90, 90, 90], "no"],
"Cu6Sn5_monoclinic": ["Cu6Sn5_monoclinic", [11.02, 7.28, 9.827, 90, 98.84, 90], "no", ],
"Sn_beta": ["Sn_beta", [5.83, 5.83, 3.18, 90, 90, 90], "SG141"],
"Sn_beta_all": ["Sn_all", [5.83, 5.83, 3.18, 90, 90, 90], "no"],
"Sb": ["Sb", [4.3, 4.3, 11.3, 90, 90, 120], "no"],
"quartz_alpha": ["quartz_alpha", [4.913, 4.913, 5.404, 90, 90, 120], "no"],
"ferrydrite": ["ferrydrite", [2.96, 2.96, 9.4, 90, 90, 120], "no"],
"feldspath": ["feldspath", [8.59, 12.985, 7.213, 90, 116., 90], 'no'],
"hexagonal": ["hexagonal", [1.0, 1.0, 3.0, 90, 90, 120.0], "no"],
"ZnO": ["ZnO", [3.252, 3.252, 5.213, 90, 90, 120], "wurtzite"],
"test_reference": ["test_reference", [3.2, 4.5, 5.2, 83, 92.0, 122], "wurtzite"],
"test_solution": ["test_solution", [3.252, 4.48, 5.213, 83.2569, 92.125478, 122.364], "wurtzite",],
"Y2SiO5": ["Y2SiO5", [10.34, 6.689, 12.38, 90.0, 102.5, 90.0], "no", ], # SG 15 I2/a
"VO2M1": ["VO2M1", [5.75175, 4.52596, 5.38326, 90.0, 122.6148, 90.0], "VO2_mono", ], # SG 14
"VO2M2": ["VO2M2", [4.5546, 4.5546, 2.8514, 90.0, 90, 90.0], "no" ], # SG 136 (87 deg Celsius) Rutile
"VO2R": ["VO2R", [4.5546, 4.5546, 2.8514, 90.0, 90, 90.0], "rutile"], # SG 136 (87 deg Celsius) Rutile
"ZnCuOCl": ["ZnCuOCl", [6.83972, 6.83972, 14.08845, 90.0, 90, 120.0], "SG166"],
"ZnCuOCl_all": ["ZnCuOCl_all", [6.83972, 6.83972, 14.08845, 90.0, 90, 120.0], "no"],
"ZnCuOCl_SG": ["ZnCuOCl_all", [6.83972, 6.83972, 14.08845, 90.0, 90, 120.0], "166"],
"Al2TiO5_all": ["Al2TiO5_all", [3.60500, 9.44500, 9.65300, 90.0, 90, 90], "no"],
"Al2TiO5": ["Al2TiO5", [3.60500, 9.44500, 9.65300, 90.0, 90, 90], "63"],
"Al2TiO5_new": ["Al2TiO5_new", [3.595, 9.473, 9.677, 90.0, 90, 90], "63"],
}
dict_Materials_short = {
"Al2O3": ["Al2O3", [4.785, 4.785, 12.991, 90, 90, 120], "Al2O3"],
"Al2O3_all": ["Al2O3_all", [4.785, 4.785, 12.991, 90, 90, 120], "no"],
"Al": ["Al", [4.05, 4.05, 4.05, 90, 90, 90], "fcc"],
"Al2Cu": ["Al2Cu", [6.063, 6.063, 4.872, 90, 90, 90], "no"],
"AlN": ["AlN", [3.11, 3.11, 4.98, 90.0, 90.0, 120.0], "wurtzite"],
"Fe": ["Fe", [2.856, 2.856, 2.856, 90, 90, 90], "bcc"],
"Si": ["Si", [5.4309, 5.4309, 5.4309, 90, 90, 90], "dia"],
"CdHgTe": ["CdHgTe", [6.46678, 6.46678, 6.46678, 90, 90, 90], "dia"],
"CdHgTe_fcc": ["CdHgTe_fcc", [6.46678, 6.46678, 6.46678, 90, 90, 90], "fcc"],
"Ge": ["Ge", [5.6575, 5.6575, 5.6575, 90, 90, 90], "dia"],
"Au": ["Au", [4.078, 4.078, 4.078, 90, 90, 90], "fcc"],
"GaAs": ["GaAs", [5.65325, 5.65325, 5.65325, 90, 90, 90], "dia"], # AsGa
"Cu": ["Cu", [3.6, 3.6, 3.6, 90, 90, 90], "fcc"],
"Crocidolite_whittaker_1949": ["Crocidolite_whittaker_1949", [9.89, 17.85, 5.31, 90, 180 - 72.5, 90], "no", ],
"Hematite": ["Hematite", [5.03459, 5.03459, 13.7533, 90, 90, 120], "no", ], # extinction for h+k+l=3n and always les l=2n
"Magnetite_fcc": ["Magnetite_fcc", [8.391, 8.391, 8.391, 90, 90, 90], "fcc", ], | |
864,
864, 2620, 864, 2620, 864, 2620, 864, 2620, 864, 2620, 864, 864,
864, 2620, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864, 864,
864, 864, 864, 864, 2620, 864, 864, 864, 864, 864, 864, 864, 864,
864, 864, 864, 864,
3485, 3512, 864, 13996,
3485, 3512, 864, 864, 864, 864, 864, 2620, 864, 864, 864, 2620,
864, 2620, 864, 2620, 864, 2620, 864, 864, 864, 864, 864, 2620,
864, 864, 864, 2620, 864, 2620, 864, 2620, 864, 2620, 864, 864,
864, 2620, 864, 2620, 864, 864, 864, 2620, 864, 2620, 864, 864,
864, 864, 864, 864, 864, 2620, 864, 2620, 864, 864, 864, 2620,
864, 2620, 864, 864, 864, 864,
3485, 3512, 864, 864, 864, 864, 864, 2620, 864, 864, 864, 2620,
864, 2620, 864, 2620, 864, 2620, 864, 864, 864, 864, 864, 2620,
864, 864, 864, 2620, 864, 2620, 864, 2620, 864, 2620, 864, 864,
864, 2620, 864, 2620, 864, 864, 864, 2620, 864, 2620, 864, 864,
864, 864, 864, 864, 864, 2620, 864, 2620, 864, 864, 864, 2620,
864, 2620, 864, 864, 864, 864, 3485, 3512, 864, 13996};"""
analyse.parse_and_report(input_str, 200, True, "FOO", output)
self.assertEqual(
output.getvalue(), 'Found 272 timing entries.\n'
'Potential Mark Candidates:\n'
'[3485, 864]\n'
'Potential Space Candidates:\n'
'[13996, 3512, 2620, 864]\n'
'\n'
'Guessing encoding type:\n'
'Looks like it uses space encoding. Yay!\n'
'\n'
'Guessing key value:\n'
'kFOOHdrMark = 3485\n'
'kFOOHdrSpace = 3512\n'
'kFOOBitMark = 864\n'
'kFOOOneSpace = 2620\n'
'kFOOZeroSpace = 864\n'
'kFOOSpaceGap = 13996\n'
'\n'
'Decoding protocol based on analysis so far:\n'
'\n'
'kFOOHdrMark+kFOOHdrSpace+01011111010111110100000001000000\n'
' Bits: 32\n'
' Hex: 0x5F5F4040 (MSB first)\n'
' 0x0202FAFA (LSB first)\n'
' Dec: 1600077888 (MSB first)\n'
' 33749754 (LSB first)\n'
' Bin: 0b01011111010111110100000001000000 (MSB first)\n'
' 0b00000010000000101111101011111010 (LSB first)\n'
'kFOOHdrMark+kFOOHdrSpace+01011111010111110100000001000000\n'
' Bits: 32\n'
' Hex: 0x5F5F4040 (MSB first)\n'
' 0x0202FAFA (LSB first)\n'
' Dec: 1600077888 (MSB first)\n'
' 33749754 (LSB first)\n'
' Bin: 0b01011111010111110100000001000000 (MSB first)\n'
' 0b00000010000000101111101011111010 (LSB first)\n'
'kFOOHdrMark+kFOOHdrSpace+GAP(13996)kFOOHdrMark+kFOOHdrSpace+0010111100'
'1011110110110001101100\n'
' Bits: 32\n'
' Hex: 0x2F2F6C6C (MSB first)\n'
' 0x3636F4F4 (LSB first)\n'
' Dec: 791637100 (MSB first)\n'
' 909571316 (LSB first)\n'
' Bin: 0b00101111001011110110110001101100 (MSB first)\n'
' 0b00110110001101101111010011110100 (LSB first)\n'
'kFOOHdrMark+kFOOHdrSpace+00101111001011110110110001101100\n'
' Bits: 32\n'
' Hex: 0x2F2F6C6C (MSB first)\n'
' 0x3636F4F4 (LSB first)\n'
' Dec: 791637100 (MSB first)\n'
' 909571316 (LSB first)\n'
' Bin: 0b00101111001011110110110001101100 (MSB first)\n'
' 0b00110110001101101111010011110100 (LSB first)\n'
'kFOOHdrMark+kFOOHdrSpace+GAP(13996)\n'
'Total Nr. of suspected bits: 128\n'
'\n'
'Generating a VERY rough code outline:\n'
'\n'
'// Copyright 2019 <NAME> (crankyoldgit)\n'
'// Support for FOO protocol\n'
'\n'
'#include "IRrecv.h"\n'
'#include "IRsend.h"\n'
'#include "IRutils.h"\n'
'\n'
"// WARNING: This probably isn't directly usable. It's a guide only.\n"
'\n'
'// See https://github.com/crankyoldgit/IRremoteESP8266/wiki/'
'Adding-support-for-a-new-IR-protocol\n'
'// for details of how to include this in the library.\n'
'const uint16_t kFOOHdrMark = 3485;\n'
'const uint16_t kFOOBitMark = 864;\n'
'const uint16_t kFOOHdrSpace = 3512;\n'
'const uint16_t kFOOOneSpace = 2620;\n'
'const uint16_t kFOOZeroSpace = 864;\n'
'const uint16_t kFOOSpaceGap = 13996;\n'
'const uint16_t kFOOFreq = 38000; // Hz. (Guessing the most common'
' frequency.)\n'
'const uint16_t kFOOBits = 128; // Move to IRremoteESP8266.h\n'
'const uint16_t kFOOStateLength = 16; // Move to IRremoteESP8266.h\n'
'const uint16_t kFOOOverhead = 16;\n'
"// DANGER: More than 64 bits detected. A uint64_t for 'data' won't"
' work!\n'
'#if SEND_FOO\n'
'// Function should be safe up to 64 bits.\n'
'void IRsend::sendFOO(const uint64_t data, const uint16_t nbits,'
' const uint16_t repeat) {\n'
' enableIROut(kFOOFreq);\n'
' for (uint16_t r = 0; r <= repeat; r++) {\n'
' uint64_t send_data = data;\n'
' // Header\n'
' mark(kFOOHdrMark);\n'
' space(kFOOHdrSpace);\n'
' // Data Section #1\n'
' // e.g. data = 0x5F5F4040, nbits = 32\n'
' sendData(kFOOBitMark, kFOOOneSpace, kFOOBitMark, kFOOZeroSpace,'
' send_data, 32, true);\n'
' send_data >>= 32;\n'
' // Header\n'
' mark(kFOOHdrMark);\n'
' space(kFOOHdrSpace);\n'
' // Data Section #2\n'
' // e.g. data = 0x5F5F4040, nbits = 32\n'
' sendData(kFOOBitMark, kFOOOneSpace, kFOOBitMark, kFOOZeroSpace,'
' send_data, 32, true);\n'
' send_data >>= 32;\n'
' // Header\n'
' mark(kFOOHdrMark);\n'
' space(kFOOHdrSpace);\n'
' // Gap\n'
' mark(kFOOBitMark);\n'
' space(kFOOSpaceGap);\n'
' // Header\n'
' mark(kFOOHdrMark);\n'
' space(kFOOHdrSpace);\n'
' // Data Section #3\n'
' // e.g. data = 0x2F2F6C6C, nbits = 32\n'
' sendData(kFOOBitMark, kFOOOneSpace, kFOOBitMark, kFOOZeroSpace,'
' send_data, 32, true);\n'
' send_data >>= 32;\n'
' // Header\n'
' mark(kFOOHdrMark);\n'
' space(kFOOHdrSpace);\n'
' // Data Section #4\n'
' // e.g. data = 0x2F2F6C6C, nbits = 32\n'
' sendData(kFOOBitMark, kFOOOneSpace, kFOOBitMark, kFOOZeroSpace,'
' send_data, 32, true);\n'
' send_data >>= 32;\n'
' // Header\n'
' mark(kFOOHdrMark);\n'
' space(kFOOHdrSpace);\n'
' // Gap\n'
' mark(kFOOBitMark);\n'
' space(kFOOSpaceGap);\n'
' space(kDefaultMessageGap); // A 100% made up guess of the gap'
' between messages.\n'
' }\n'
'}\n'
'#endif // SEND_FOO\n'
'\n'
'#if SEND_FOO\n'
'// Alternative >64bit function to send FOO messages\n'
'// Where data is:\n'
'// uint8_t data[kFOOStateLength] = {0x5F, 0x5F, 0x40, 0x40, 0x5F,'
' 0x5F, 0x40, 0x40, 0x2F, 0x2F, 0x6C, 0x6C, 0x2F, 0x2F, 0x6C, 0x6C};\n'
'//\n'
'// Args:\n'
'// data: An array of bytes containing the IR command.\n'
'// It is assumed to be in MSB order for this code.\n'
'// nbytes: Nr. of bytes of data in the array. (>=kFOOStateLength)\n'
'// repeat: Nr. of times the message is to be repeated.\n'
'//\n'
'// Status: ALPHA / Untested.\n'
'void IRsend::sendFOO(const uint8_t data[], const uint16_t nbytes,'
' const uint16_t repeat) {\n'
' for (uint16_t r = 0; r <= repeat; r++) {\n'
' uint16_t pos = 0;\n'
' // Data Section #1\n'
' // e.g.\n'
' // bits = 32; bytes = 4;\n'
' // *(data + pos) = {0x5F, 0x5F, 0x40, 0x40};\n'
' sendGeneric(kFOOHdrMark, kFOOHdrSpace,\n'
' kFOOBitMark, kFOOOneSpace,\n'
' kFOOBitMark, kFOOZeroSpace,\n'
' kFOOHdrMark, kFOOHdrSpace,\n'
' data + pos, 4, // Bytes\n'
' kFOOFreq, true, kNoRepeat, kDutyDefault);\n'
' pos += 4; // Adjust by how many bytes of data we sent\n'
' // Data Section #2\n'
' // e.g.\n'
' // bits = 32; bytes = 4;\n'
' // *(data + pos) = {0x5F, 0x5F, 0x40, 0x40};\n'
' sendGeneric(kFOOHdrMark, kFOOHdrSpace,\n'
' kFOOBitMark, kFOOOneSpace,\n'
' kFOOBitMark, kFOOZeroSpace,\n'
' kFOOHdrMark, kFOOHdrSpace,\n'
' data + pos, 4, // Bytes\n'
' kFOOFreq, true, kNoRepeat, kDutyDefault);\n'
' pos += 4; // Adjust by how many bytes of data we sent\n'
' // Data Section #3\n'
' // e.g.\n'
' // bits = 32; bytes = 4;\n'
' // *(data + pos) = {0x2F, 0x2F, 0x6C, 0x6C};\n'
' sendGeneric(kFOOHdrMark, kFOOHdrSpace,\n'
' kFOOBitMark, kFOOOneSpace,\n'
' kFOOBitMark, kFOOZeroSpace,\n'
' kFOOHdrMark, kFOOHdrSpace,\n'
' data + pos, 4, // Bytes\n'
' kFOOFreq, true, kNoRepeat, kDutyDefault);\n'
' pos += 4; // Adjust by how many bytes of data we sent\n'
' // Data Section #4\n'
' // e.g.\n'
' // bits = 32; bytes = 4;\n'
' // *(data + pos) = {0x2F, 0x2F, 0x6C, 0x6C};\n'
' sendGeneric(kFOOHdrMark, kFOOHdrSpace,\n'
' kFOOBitMark, kFOOOneSpace,\n'
' kFOOBitMark, kFOOZeroSpace,\n'
' kFOOHdrMark, kFOOHdrSpace,\n'
' data + pos, 4, // Bytes\n'
' kFOOFreq, true, kNoRepeat, kDutyDefault);\n'
' pos += 4; // Adjust by how many bytes of data we sent\n'
' }\n'
'}\n'
'#endif // SEND_FOO\n'
'\n'
"// DANGER: More than 64 bits detected. A uint64_t for 'data' won't "
'work!\n'
'#if DECODE_FOO\n'
'// Function should be safe up to 64 bits.\n'
'bool IRrecv::decodeFOO(decode_results *results, const uint16_t nbits,'
' const bool strict) {\n'
' if (results->rawlen < 2 * nbits + kFOOOverhead)\n'
' return false; // Too short a message to match.\n'
' if (strict && nbits != kFOOBits)\n'
' return false;\n'
'\n'
' uint16_t offset = kStartOffset;\n'
' uint64_t data = 0;\n'
' match_result_t data_result;\n'
'\n'
' // Header\n'
' if (!matchMark(results->rawbuf[offset++], kFOOHdrMark))\n'
' return false;\n'
' if (!matchSpace(results->rawbuf[offset++], kFOOHdrSpace))\n'
' return false;\n'
'\n'
' // Data Section #1\n'
' // e.g. data_result.data = 0x5F5F4040, nbits = 32\n'
' data_result = matchData(&(results->rawbuf[offset]), 32,\n'
' kFOOBitMark, kFOOOneSpace,\n'
' kFOOBitMark, kFOOZeroSpace);\n'
' offset += data_result.used;\n'
' if (data_result.success == false) return false; // Fail\n'
' data <<= 32; // Make room for the new bits of data.\n'
' data |= data_result.data;\n'
'\n'
' // Header\n'
' if (!matchMark(results->rawbuf[offset++], kFOOHdrMark))\n'
' | |
"""Provides COM objects with version-independent access to the System.Reflection.EventInfo.MemberType property.
Get: MemberType(self: _EventInfo) -> MemberTypes
"""
Name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Reflection.MemberInfo.Name property.
Get: Name(self: _EventInfo) -> str
"""
ReflectedType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Reflection.MemberInfo.ReflectedType property.
Get: ReflectedType(self: _EventInfo) -> Type
"""
class _Exception:
""" Exposes the public members of the System.Exception class to unmanaged code. """
def Equals(self, obj):
"""
Equals(self: _Exception, obj: object) -> bool
Provides COM objects with version-independent access to the
System.Object.Equals(System.Object) method.
obj: The System.Object to compare with the current System.Object.
Returns: true if the specified System.Object is equal to the current System.Object;
otherwise, false.
"""
pass
def GetBaseException(self):
"""
GetBaseException(self: _Exception) -> Exception
Provides COM objects with version-independent access to the
System.Exception.GetBaseException method.
Returns: The first exception thrown in a chain of exceptions. If the
System.Exception.InnerException property of the current exception is a null
reference (Nothing in Visual Basic), this property returns the current
exception.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: _Exception) -> int
Provides COM objects with version-independent access to the
System.Object.GetHashCode method.
Returns: The hash code for the current instance.
"""
pass
def GetObjectData(self, info, context):
"""
GetObjectData(self: _Exception, info: SerializationInfo, context: StreamingContext)
Provides COM objects with version-independent access to the
System.Exception.GetObjectData(System.Runtime.Serialization.SerializationInfo,Sy
stem.Runtime.Serialization.StreamingContext) method
info: The System.Runtime.Serialization.SerializationInfo object that holds the
serialized object data about the exception being thrown.
context: The System.Runtime.Serialization.StreamingContext structure that contains
contextual information about the source or destination.
"""
pass
def GetType(self):
"""
GetType(self: _Exception) -> Type
Provides COM objects with version-independent access to the
System.Exception.GetType method.
Returns: A System.Type object that represents the exact runtime type of the current
instance.
"""
pass
def ToString(self):
"""
ToString(self: _Exception) -> str
Provides COM objects with version-independent access to the
System.Exception.ToString method.
Returns: A string that represents the current System.Exception object.
"""
pass
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==y """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __str__(self, *args): #cannot find CLR method
pass
HelpLink = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Exception.HelpLink property.
Get: HelpLink(self: _Exception) -> str
Set: HelpLink(self: _Exception) = value
"""
InnerException = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Exception.InnerException property.
Get: InnerException(self: _Exception) -> Exception
"""
Message = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Exception.Message property.
Get: Message(self: _Exception) -> str
"""
Source = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Exception.Source property.
Get: Source(self: _Exception) -> str
Set: Source(self: _Exception) = value
"""
StackTrace = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Exception.StackTrace property.
Get: StackTrace(self: _Exception) -> str
"""
TargetSite = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Provides COM objects with version-independent access to the System.Exception.TargetSite property.
Get: TargetSite(self: _Exception) -> MethodBase
"""
class _FieldBuilder:
""" Exposes the System.Reflection.Emit.FieldBuilder class to unmanaged code. """
def GetIDsOfNames(self, riid, rgszNames, cNames, lcid, rgDispId):
"""
GetIDsOfNames(self: _FieldBuilder, riid: Guid, rgszNames: IntPtr, cNames: UInt32, lcid: UInt32, rgDispId: IntPtr) -> Guid
Maps a set of names to a corresponding set of dispatch identifiers.
riid: Reserved for future use. Must be IID_NULL.
rgszNames: An array of names to be mapped.
cNames: The count of the names to be mapped.
lcid: The locale context in which to interpret the names.
rgDispId: An array allocated by the caller that receives the identifiers corresponding to
the names.
"""
pass
def GetTypeInfo(self, iTInfo, lcid, ppTInfo):
"""
GetTypeInfo(self: _FieldBuilder, iTInfo: UInt32, lcid: UInt32, ppTInfo: IntPtr)
Retrieves the type information for an object, which can be used to get the type
information for an interface.
iTInfo: The type information to return.
lcid: The locale identifier for the type information.
ppTInfo: A pointer to the requested type information object.
"""
pass
def GetTypeInfoCount(self, pcTInfo):
"""
GetTypeInfoCount(self: _FieldBuilder) -> UInt32
Retrieves the number of type information interfaces that an object provides
(either 0 or 1).
"""
pass
def Invoke(self, dispIdMember, riid, lcid, wFlags, pDispParams, pVarResult, pExcepInfo, puArgErr):
"""
Invoke(self: _FieldBuilder, dispIdMember: UInt32, riid: Guid, lcid: UInt32, wFlags: Int16, pDispParams: IntPtr, pVarResult: IntPtr, pExcepInfo: IntPtr, puArgErr: IntPtr) -> Guid
Provides access to properties and methods exposed by an object.
dispIdMember: An identifier of a member.
riid: Reserved for future use. Must be IID_NULL.
lcid: The locale context in which to interpret arguments.
wFlags: Flags describing the context of the call.
pDispParams: A pointer to a structure containing an array of arguments, an array of argument
DISPIDs for named arguments, and counts for the number of elements in the
arrays.
pVarResult: A pointer to the location where the result will be stored.
pExcepInfo: A pointer to a structure that contains exception information.
puArgErr: The index of the first argument that has an error.
"""
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class _FieldInfo:
""" Exposes the public members of the System.Reflection.FieldInfo class to unmanaged code. """
def Equals(self, other):
"""
Equals(self: _FieldInfo, other: object) -> bool
Provides COM objects with version-independent access to the
System.Object.Equals(System.Object) method.
other: The System.Object to compare with the current System.Object.
Returns: true if the specified System.Object is equal to the current System.Object;
otherwise, false.
"""
pass
def GetCustomAttributes(self, *__args):
"""
GetCustomAttributes(self: _FieldInfo, inherit: bool) -> Array[object]
Provides COM objects with version-independent access to the
System.Reflection.MemberInfo.GetCustomAttributes(System.Boolean) method.
inherit: Specifies whether to search this member's inheritance chain to find the
attributes.
Returns: An array that contains all the custom attributes, or an array with zero
elements if no attributes are defined.
GetCustomAttributes(self: _FieldInfo, attributeType: Type, inherit: bool) -> Array[object]
Provides COM objects with version-independent access to the
System.Reflection.MemberInfo.GetCustomAttributes(System.Type,System.Boolean)
method.
attributeType: The type of attribute to search for. Only attributes that are assignable to
this type are returned.
inherit: Specifies whether to search this member's inheritance chain to find the
attributes.
Returns: An array of custom attributes applied to this member, or an array with zero (0)
elements if no attributes have been applied.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: _FieldInfo) -> int
Provides COM objects with version-independent access to the
System.Object.GetHashCode method.
Returns: The hash code for the current instance.
"""
pass
def GetIDsOfNames(self, riid, rgszNames, cNames, lcid, rgDispId):
"""
GetIDsOfNames(self: _FieldInfo, riid: Guid, rgszNames: IntPtr, cNames: UInt32, lcid: UInt32, rgDispId: IntPtr) -> Guid
Maps a set of names to a corresponding set of dispatch identifiers.
riid: Reserved for future use. Must be IID_NULL.
rgszNames: Passed-in array of names to be mapped.
cNames: Count of the names to be mapped.
lcid: The locale context in which to interpret the names.
rgDispId: Caller-allocated array that receives the IDs corresponding to the names.
"""
pass
def GetType(self):
"""
GetType(self: _FieldInfo) -> Type
Provides COM objects with version-independent access to the
System.Object.GetType method.
Returns: A System.Type object.
"""
pass
def GetTypeInfo(self, iTInfo, lcid, ppTInfo):
"""
GetTypeInfo(self: _FieldInfo, iTInfo: UInt32, lcid: UInt32, ppTInfo: IntPtr)
| |
<reponame>truthiswill/usaspending-api<filename>usaspending_api/download/v2/download_column_historical_lookups.py
from collections import OrderedDict
"""
Sets up mappings from column names used in downloads to the query paths used to get the data from django.
Not in use while we pull CSV data from the non-historical tables. Until we switch to pulling CSV downloads from the
historical tables TransactionFPDS and TransactionFABS, import download_column_lookups.py instead.
"""
"""
Code to generate these from spreadsheets:
tail -n +3 'usaspending_api/data/DAIMS_IDD_Resorted+DRW+KB+GGv7/D2-Award (Financial Assistance)-Table 1.csv' >
d2_columns.csv
def find_column(col_name, model_classes):
for (model_class, prefix) in model_classes:
if hasattr(model_class, col_name):
return '{}{}'.format(prefix, col_name)
return None
query_paths = {'transaction': {'d1': {}, 'd2': {}}, 'award': {'d1': {}, 'd2': {}}}
human_names = {'transaction': {'d1': [], 'd2': []}, 'award': {'d1': [], 'd2': []}}
models_award_d1 = ((Award, ''), (TransactionNormalized, 'latest_transaction__'),
(TransactionFPDS, 'latest_transaction__contract_data__'))
models_transaction_d1 = ((TransactionNormalized, ''), (TransactionFPDS, 'contract_data__'), (Award, 'award__'))
models_award_d2 = ((Award, ''), (TransactionNormalized, 'latest_transaction__'),
(TransactionFABS, 'latest_transaction__assistance_data__'))
models_transaction_d2 = ((TransactionNormalized, ''), (TransactionFABS, 'assistance_data__'), (Award, 'award__'))
def set_if_found(dl_name, path, model, file):
if path:
if dl_name in ('fain', 'piid', 'uri'):
dl_name = 'award_id_' + dl_name
query_paths[model][file][dl_name] = path
human_names[model][file].append(dl_name)
else:
print('Not found: {}: {}: {}'.format(model, file, dl_name))
for row in d1:
if len(row['Download Name']) <= 1:
continue
if len(row['Database Tag']) <= 1:
continue
if row['Award Level'] == 'Y':
path = find_column(row['Database Tag'], models_award_d1)
set_if_found(row['Download Name'], path, 'award', 'd1')
if row['Transaction Level'] == 'Y':
path = find_column(row['Database Tag'], models_transaction_d1)
set_if_found(row['Download Name'], path, 'transaction', 'd1')
# no database tags supplied for d2
for row in d2:
if len(row['Download Name']) <= 1:
continue
if row['Award Level?'] == 'Y':
path = find_column(row['Download Name'], models_award_d2)
if not path:
# try what it was for D1
path = query_paths['award']['d1'].get(row['Download Name'])
if path:
col_name = path.split('__')[-1]
path = find_column(col_name, models_award_d2)
set_if_found(row['Download Name'], path, 'award', 'd2')
if row['Transaction Level?'] == 'Y':
path = find_column(row['Download Name'], models_transaction_d2)
if not path:
# try what it was for D1
path = query_paths['transaction']['d1'].get(row['Download Name'])
if path:
col_name = path.split('__')[-1]
path = find_column(col_name, models_transaction_d2)
set_if_found(row['Download Name'], path, 'transaction', 'd2')
"""
query_paths = {
'award': {
'd1': OrderedDict([
('award_id_piid', 'award__piid'),
('parent_award_agency_id', 'award__latest_transaction__contract_data__referenced_idv_agency_iden'),
('parent_award_agency_name', 'award__latest_transaction__contract_data__referenced_idv_agency_desc'),
('parent_award_id', 'award__parent_award_piid'),
('obligated_amount', 'award__total_obligation'),
('current_total_value_of_award', 'award__latest_transaction__contract_data__current_total_value_award'),
('potential_total_value_of_award', 'award__latest_transaction__contract_data__potential_total_value_awar'),
('period_of_performance_start_date',
'award__latest_transaction__contract_data__period_of_performance_star'),
('period_of_performance_current_end_date',
'award__latest_transaction__contract_data__period_of_performance_curr'),
('period_of_performance_potential_end_date',
'award__latest_transaction__contract_data__period_of_perf_potential_e'),
('ordering_period_end_date', 'award__latest_transaction__contract_data__ordering_period_end_date'),
('awarding_agency_code', 'award__latest_transaction__contract_data__awarding_agency_code'),
('awarding_agency_name', 'award__latest_transaction__contract_data__awarding_agency_name'),
('awarding_sub_agency_code', 'award__latest_transaction__contract_data__awarding_sub_tier_agency_c'),
('awarding_sub_agency_name', 'award__latest_transaction__contract_data__awarding_sub_tier_agency_n'),
('awarding_office_code', 'award__latest_transaction__contract_data__awarding_office_code'),
('awarding_office_name', 'award__latest_transaction__contract_data__awarding_office_name'),
('funding_agency_code', 'award__latest_transaction__contract_data__funding_agency_code'),
('funding_agency_name', 'award__latest_transaction__contract_data__funding_agency_name'),
('funding_sub_agency_code', 'award__latest_transaction__contract_data__funding_sub_tier_agency_co'),
('funding_sub_agency_name', 'award__latest_transaction__contract_data__funding_sub_tier_agency_na'),
('funding_office_code', 'award__latest_transaction__contract_data__funding_office_code'),
('funding_office_name', 'award__latest_transaction__contract_data__funding_office_name'),
('foreign_funding', 'award__latest_transaction__contract_data__foreign_funding'),
('foreign_funding_description', 'award__latest_transaction__contract_data__foreign_funding_desc'),
('sam_exception', 'award__latest_transaction__contract_data__sam_exception'),
('sam_exception_description', 'award__latest_transaction__contract_data__sam_exception_description'),
('recipient_duns', 'award__latest_transaction__contract_data__awardee_or_recipient_uniqu'),
('recipient_name', 'award__latest_transaction__contract_data__awardee_or_recipient_legal'),
('recipient_doing_business_as_name',
'award__latest_transaction__contract_data__vendor_doing_as_business_n'),
('cage_code', 'award__latest_transaction__contract_data__cage_code'),
('recipient_parent_name', 'award__latest_transaction__contract_data__ultimate_parent_legal_enti'),
('recipient_parent_duns', 'award__latest_transaction__contract_data__ultimate_parent_unique_ide'),
('recipient_country_code', 'award__latest_transaction__contract_data__legal_entity_country_code'),
('recipient_country_name', 'award__latest_transaction__contract_data__legal_entity_country_name'),
('recipient_address_line_1', 'award__latest_transaction__contract_data__legal_entity_address_line1'),
('recipient_address_line_2', 'award__latest_transaction__contract_data__legal_entity_address_line2'),
('recipient_city_name', 'award__latest_transaction__contract_data__legal_entity_city_name'),
('recipient_state_code', 'award__latest_transaction__contract_data__legal_entity_state_code'),
('recipient_state_name', 'award__latest_transaction__contract_data__legal_entity_state_descrip'),
('recipient_zip_4_code', 'award__latest_transaction__contract_data__legal_entity_zip4'),
('recipient_congressional_district',
'award__latest_transaction__contract_data__legal_entity_congressional'),
('recipient_phone_number', 'award__latest_transaction__contract_data__vendor_phone_number'),
('recipient_fax_number', 'award__latest_transaction__contract_data__vendor_fax_number'),
('primary_place_of_performance_country_code',
'award__latest_transaction__contract_data__place_of_perform_country_c'),
('primary_place_of_performance_country_name',
'award__latest_transaction__contract_data__place_of_perf_country_desc'),
('primary_place_of_performance_city_name',
'award__latest_transaction__contract_data__place_of_perform_city_name'),
('primary_place_of_performance_county_name',
'award__latest_transaction__contract_data__place_of_perform_county_na'),
('primary_place_of_performance_state_code',
'award__latest_transaction__contract_data__place_of_performance_state'),
('primary_place_of_performance_state_name',
'award__latest_transaction__contract_data__place_of_perfor_state_desc'),
('primary_place_of_performance_zip_4',
'award__latest_transaction__contract_data__place_of_performance_zip4a'),
('primary_place_of_performance_congressional_district',
'award__latest_transaction__contract_data__place_of_performance_congr'),
('award_or_idv_flag', 'award__latest_transaction__contract_data__pulled_from'),
('award_type_code', 'award__latest_transaction__contract_data__contract_award_type'),
('award_type', 'award__latest_transaction__contract_data__contract_award_type_desc'),
('idv_type_code', 'award__latest_transaction__contract_data__idv_type'),
('idv_type', 'award__latest_transaction__contract_data__idv_type_description'),
('multiple_or_single_award_idv_code',
'award__latest_transaction__contract_data__multiple_or_single_award_i'),
('multiple_or_single_award_idv', 'award__latest_transaction__contract_data__multiple_or_single_aw_desc'),
('type_of_idc_code', 'award__latest_transaction__contract_data__type_of_idc'),
('type_of_idc', 'award__latest_transaction__contract_data__type_of_idc_description'),
('type_of_contract_pricing_code', 'award__latest_transaction__contract_data__type_of_contract_pricing'),
('type_of_contract_pricing', 'award__latest_transaction__contract_data__type_of_contract_pric_desc'),
('award_description', 'award__latest_transaction__contract_data__award_description'),
('solicitation_identifier', 'award__latest_transaction__contract_data__solicitation_identifier'),
('number_of_actions', 'award__latest_transaction__contract_data__number_of_actions'),
('inherently_governmental_functions',
'award__latest_transaction__contract_data__inherently_government_func'),
('inherently_governmental_functions_description',
'award__latest_transaction__contract_data__inherently_government_desc'),
('product_or_service_code', 'award__latest_transaction__contract_data__product_or_service_code'),
('product_or_service_code_description',
'award__latest_transaction__contract_data__product_or_service_co_desc'),
('contract_bundling_code', 'award__latest_transaction__contract_data__contract_bundling'),
('contract_bundling', 'award__latest_transaction__contract_data__contract_bundling_descrip'),
('dod_claimant_program_code', 'award__latest_transaction__contract_data__dod_claimant_program_code'),
('dod_claimant_program_description',
'award__latest_transaction__contract_data__dod_claimant_prog_cod_desc'),
('naics_code', 'award__latest_transaction__contract_data__naics'),
('naics_description', 'award__latest_transaction__contract_data__naics_description'),
('recovered_materials_sustainability_code',
'award__latest_transaction__contract_data__recovered_materials_sustai'),
('recovered_materials_sustainability',
'award__latest_transaction__contract_data__recovered_materials_s_desc'),
('domestic_or_foreign_entity_code', 'award__latest_transaction__contract_data__domestic_or_foreign_entity'),
('domestic_or_foreign_entity', 'award__latest_transaction__contract_data__domestic_or_foreign_e_desc'),
('dod_acquisition_program_code', 'award__latest_transaction__contract_data__program_system_or_equipmen'),
('dod_acquisition_program_description',
'award__latest_transaction__contract_data__program_system_or_equ_desc'),
('information_technology_commercial_item_category_code',
'award__latest_transaction__contract_data__information_technology_com'),
('information_technology_commercial_item_category',
'award__latest_transaction__contract_data__information_technolog_desc'),
('epa_designated_product_code', 'award__latest_transaction__contract_data__epa_designated_product'),
('epa_designated_product', 'award__latest_transaction__contract_data__epa_designated_produc_desc'),
('country_of_product_or_service_origin_code',
'award__latest_transaction__contract_data__country_of_product_or_serv'),
('country_of_product_or_service_origin',
'award__latest_transaction__contract_data__country_of_product_or_desc'),
('place_of_manufacture_code', 'award__latest_transaction__contract_data__place_of_manufacture'),
('place_of_manufacture', 'award__latest_transaction__contract_data__place_of_manufacture_desc'),
('subcontracting_plan_code', 'award__latest_transaction__contract_data__subcontracting_plan'),
('subcontracting_plan', 'award__latest_transaction__contract_data__subcontracting_plan_desc'),
('extent_competed_code', 'award__latest_transaction__contract_data__extent_competed'),
('extent_competed', 'award__latest_transaction__contract_data__extent_compete_description'),
('solicitation_procedures_code', 'award__latest_transaction__contract_data__solicitation_procedures'),
('solicitation_procedures', 'award__latest_transaction__contract_data__solicitation_procedur_desc'),
('type_of_set_aside_code', 'award__latest_transaction__contract_data__type_set_aside'),
('type_of_set_aside', 'award__latest_transaction__contract_data__type_set_aside_description'),
('evaluated_preference_code', 'award__latest_transaction__contract_data__evaluated_preference'),
('evaluated_preference', 'award__latest_transaction__contract_data__evaluated_preference_desc'),
('research_code', 'award__latest_transaction__contract_data__research'),
('research', 'award__latest_transaction__contract_data__research_description'),
('fair_opportunity_limited_sources_code',
'award__latest_transaction__contract_data__fair_opportunity_limited_s'),
('fair_opportunity_limited_sources',
'award__latest_transaction__contract_data__fair_opportunity_limi_desc'),
('other_than_full_and_open_competition_code',
'award__latest_transaction__contract_data__other_than_full_and_open_c'),
('other_than_full_and_open_competition',
'award__latest_transaction__contract_data__other_than_full_and_o_desc'),
('number_of_offers_received', 'award__latest_transaction__contract_data__number_of_offers_received'),
('commercial_item_acquisition_procedures_code',
'award__latest_transaction__contract_data__commercial_item_acquisitio'),
('commercial_item_acquisition_procedures',
'award__latest_transaction__contract_data__commercial_item_acqui_desc'),
('small_business_competitiveness_demonstration_program',
'award__latest_transaction__contract_data__small_business_competitive'),
('commercial_item_test_program_code',
'award__latest_transaction__contract_data__commercial_item_test_progr'),
('commercial_item_test_program', 'award__latest_transaction__contract_data__commercial_item_test_desc'),
('a76_fair_act_action_code', 'award__latest_transaction__contract_data__a_76_fair_act_action'),
('a76_fair_act_action', 'award__latest_transaction__contract_data__a_76_fair_act_action_desc'),
('fed_biz_opps_code', 'award__latest_transaction__contract_data__fed_biz_opps'),
('fed_biz_opps', 'award__latest_transaction__contract_data__fed_biz_opps_description'),
('local_area_set_aside_code', 'award__latest_transaction__contract_data__local_area_set_aside'),
('local_area_set_aside', 'award__latest_transaction__contract_data__local_area_set_aside_desc'),
('clinger_cohen_act_planning_code', 'award__latest_transaction__contract_data__clinger_cohen_act_planning'),
('clinger_cohen_act_planning', 'award__latest_transaction__contract_data__clinger_cohen_act_pla_desc'),
('materials_supplies_articles_equipment_code',
'award__latest_transaction__contract_data__materials_supplies_article'),
('materials_supplies_articles_equipment',
'award__latest_transaction__contract_data__materials_supplies_descrip'),
('labor_standards_code', 'award__latest_transaction__contract_data__labor_standards'),
('labor_standards', 'award__latest_transaction__contract_data__labor_standards_descrip'),
('construction_wage_rate_requirements_code',
'award__latest_transaction__contract_data__construction_wage_rate_req'),
('construction_wage_rate_requirements',
'award__latest_transaction__contract_data__construction_wage_rat_desc'),
('interagency_contracting_authority_code',
'award__latest_transaction__contract_data__interagency_contracting_au'),
('interagency_contracting_authority',
'award__latest_transaction__contract_data__interagency_contract_desc'),
('other_statutory_authority', 'award__latest_transaction__contract_data__other_statutory_authority'),
('program_acronym', 'award__latest_transaction__contract_data__program_acronym'),
('parent_award_type_code', 'award__latest_transaction__contract_data__referenced_idv_type'),
('parent_award_type', 'award__latest_transaction__contract_data__referenced_idv_type_desc'),
('parent_award_single_or_multiple_code',
'award__latest_transaction__contract_data__referenced_mult_or_single'),
('parent_award_single_or_multiple', 'award__latest_transaction__contract_data__referenced_mult_or_si_desc'),
('major_program', 'award__latest_transaction__contract_data__major_program'),
('national_interest_action_code', 'award__latest_transaction__contract_data__national_interest_action'),
('national_interest_action', 'award__latest_transaction__contract_data__national_interest_desc'),
('cost_or_pricing_data_code', 'award__latest_transaction__contract_data__cost_or_pricing_data'),
('cost_or_pricing_data', 'award__latest_transaction__contract_data__cost_or_pricing_data_desc'),
('cost_accounting_standards_clause_code',
'award__latest_transaction__contract_data__cost_accounting_standards'),
('cost_accounting_standards_clause',
'award__latest_transaction__contract_data__cost_accounting_stand_desc'),
('gfe_gfp_code', 'award__latest_transaction__contract_data__government_furnished_prope'),
('gfe_gfp', 'award__latest_transaction__contract_data__government_furnished_prope'),
('sea_transportation_code', 'award__latest_transaction__contract_data__sea_transportation'),
('sea_transportation', 'award__latest_transaction__contract_data__sea_transportation_desc'),
('consolidated_contract_code', 'award__latest_transaction__contract_data__consolidated_contract'),
('consolidated_contract', 'award__latest_transaction__contract_data__consolidated_contract_desc'),
('performance_based_service_acquisition_code',
'award__latest_transaction__contract_data__performance_based_service'),
('performance_based_service_acquisition',
'award__latest_transaction__contract_data__performance_based_se_desc'),
('multi_year_contract_code', 'award__latest_transaction__contract_data__multi_year_contract'),
('multi_year_contract', 'award__latest_transaction__contract_data__multi_year_contract_desc'),
('contract_financing_code', 'award__latest_transaction__contract_data__contract_financing'),
('contract_financing', 'award__latest_transaction__contract_data__contract_financing_descrip'),
('purchase_card_as_payment_method_code',
'award__latest_transaction__contract_data__purchase_card_as_payment_m'),
('purchase_card_as_payment_method', 'award__latest_transaction__contract_data__purchase_card_as_paym_desc'),
('contingency_humanitarian_or_peacekeeping_operation_code',
'award__latest_transaction__contract_data__contingency_humanitarian_o'),
('contingency_humanitarian_or_peacekeeping_operation',
'award__latest_transaction__contract_data__contingency_humanitar_desc'),
('alaskan_native_owned_corporation_or_firm',
'award__latest_transaction__contract_data__alaskan_native_owned_corpo'),
('american_indian_owned_business', 'award__latest_transaction__contract_data__american_indian_owned_busi'),
('indian_tribe_federally_recognized',
'award__latest_transaction__contract_data__indian_tribe_federally_rec'),
('native_hawaiian_owned_business', 'award__latest_transaction__contract_data__native_hawaiian_owned_busi'),
('tribally_owned_business', 'award__latest_transaction__contract_data__tribally_owned_business'),
('veteran_owned_business', 'award__latest_transaction__contract_data__veteran_owned_business'),
('service_disabled_veteran_owned_business',
'award__latest_transaction__contract_data__service_disabled_veteran_o'),
('woman_owned_business', 'award__latest_transaction__contract_data__woman_owned_business'),
('women_owned_small_business', 'award__latest_transaction__contract_data__women_owned_small_business'),
('economically_disadvantaged_women_owned_small_business',
'award__latest_transaction__contract_data__economically_disadvantaged'),
('joint_venture_women_owned_small_business',
'award__latest_transaction__contract_data__joint_venture_women_owned'),
('joint_venture_economic_disadvantaged_women_owned_small_bus',
'award__latest_transaction__contract_data__joint_venture_economically'),
('minority_owned_business', 'award__latest_transaction__contract_data__minority_owned_business'),
('subcontinent_asian_asian_indian_american_owned_business',
'award__latest_transaction__contract_data__subcontinent_asian_asian_i'),
('asian_pacific_american_owned_business',
'award__latest_transaction__contract_data__asian_pacific_american_own'),
('black_american_owned_business', 'award__latest_transaction__contract_data__black_american_owned_busin'),
('hispanic_american_owned_business',
'award__latest_transaction__contract_data__hispanic_american_owned_bu'),
('native_american_owned_business', 'award__latest_transaction__contract_data__native_american_owned_busi'),
('other_minority_owned_business', 'award__latest_transaction__contract_data__other_minority_owned_busin'),
('contracting_officers_determination_of_business_size',
'award__latest_transaction__contract_data__contracting_officers_desc'),
('contracting_officers_determination_of_business_size_code',
'award__latest_transaction__contract_data__contracting_officers_deter'),
('emerging_small_business', 'award__latest_transaction__contract_data__emerging_small_business'),
('community_developed_corporation_owned_firm',
'award__latest_transaction__contract_data__community_developed_corpor'),
('labor_surplus_area_firm', 'award__latest_transaction__contract_data__labor_surplus_area_firm'),
('us_federal_government', 'award__latest_transaction__contract_data__us_federal_government'),
('federally_funded_research_and_development_corp',
'award__latest_transaction__contract_data__federally_funded_research'),
('federal_agency', 'award__latest_transaction__contract_data__federal_agency'),
('us_state_government', 'award__latest_transaction__contract_data__us_state_government'),
('us_local_government', 'award__latest_transaction__contract_data__us_local_government'),
('city_local_government', 'award__latest_transaction__contract_data__city_local_government'),
('county_local_government', 'award__latest_transaction__contract_data__county_local_government'),
('inter_municipal_local_government',
'award__latest_transaction__contract_data__inter_municipal_local_gove'),
('local_government_owned', 'award__latest_transaction__contract_data__local_government_owned'),
('municipality_local_government', 'award__latest_transaction__contract_data__municipality_local_governm'),
('school_district_local_government',
'award__latest_transaction__contract_data__school_district_local_gove'),
('township_local_government', 'award__latest_transaction__contract_data__township_local_government'),
('us_tribal_government', 'award__latest_transaction__contract_data__us_tribal_government'),
('foreign_government', 'award__latest_transaction__contract_data__foreign_government'),
('organizational_type', 'award__latest_transaction__contract_data__organizational_type'),
('corporate_entity_not_tax_exempt', 'award__latest_transaction__contract_data__corporate_entity_not_tax_e'),
('corporate_entity_tax_exempt', 'award__latest_transaction__contract_data__corporate_entity_tax_exemp'),
('partnership_or_limited_liability_partnership',
'award__latest_transaction__contract_data__partnership_or_limited_lia'),
('sole_proprietorship', 'award__latest_transaction__contract_data__sole_proprietorship'),
('small_agricultural_cooperative', 'award__latest_transaction__contract_data__small_agricultural_coopera'),
('international_organization', 'award__latest_transaction__contract_data__international_organization'),
('us_government_entity', 'award__latest_transaction__contract_data__us_government_entity'),
('community_development_corporation',
'award__latest_transaction__contract_data__community_development_corp'),
('domestic_shelter', 'award__latest_transaction__contract_data__domestic_shelter'),
('educational_institution', 'award__latest_transaction__contract_data__educational_institution'),
('foundation', 'award__latest_transaction__contract_data__foundation'),
('hospital_flag', 'award__latest_transaction__contract_data__hospital_flag'),
('manufacturer_of_goods', 'award__latest_transaction__contract_data__manufacturer_of_goods'),
('veterinary_hospital', 'award__latest_transaction__contract_data__veterinary_hospital'),
('hispanic_servicing_institution', 'award__latest_transaction__contract_data__hispanic_servicing_institu'),
('receives_contracts', 'award__latest_transaction__contract_data__contracts'),
('receives_grants', 'award__latest_transaction__contract_data__grants'),
('receives_contracts_and_grants', 'award__latest_transaction__contract_data__receives_contracts_and_gra'),
('airport_authority', 'award__latest_transaction__contract_data__airport_authority'),
('council_of_governments', 'award__latest_transaction__contract_data__council_of_governments'),
('housing_authorities_public_tribal',
'award__latest_transaction__contract_data__housing_authorities_public'),
('interstate_entity', 'award__latest_transaction__contract_data__interstate_entity'),
('planning_commission', 'award__latest_transaction__contract_data__planning_commission'),
('port_authority', 'award__latest_transaction__contract_data__port_authority'),
('transit_authority', 'award__latest_transaction__contract_data__transit_authority'),
('subchapter_scorporation', 'award__latest_transaction__contract_data__subchapter_s_corporation'),
('limited_liability_corporation', 'award__latest_transaction__contract_data__limited_liability_corporat'),
('foreign_owned_and_located', 'award__latest_transaction__contract_data__foreign_owned_and_located'),
('for_profit_organization', 'award__latest_transaction__contract_data__for_profit_organization'),
('nonprofit_organization', 'award__latest_transaction__contract_data__nonprofit_organization'),
('other_not_for_profit_organization',
'award__latest_transaction__contract_data__other_not_for_profit_organ'),
('the_ability_one_program', 'award__latest_transaction__contract_data__the_ability_one_program'),
('number_of_employees', 'award__latest_transaction__contract_data__number_of_employees'),
('annual_revenue', 'award__latest_transaction__contract_data__annual_revenue'),
('private_university_or_college', 'award__latest_transaction__contract_data__private_university_or_coll'),
('state_controlled_institution_of_higher_learning',
'award__latest_transaction__contract_data__state_controlled_instituti'),
('1862_land_grant_college', 'award__latest_transaction__contract_data__c1862_land_grant_college'),
('1890_land_grant_college', 'award__latest_transaction__contract_data__c1890_land_grant_college'),
('1994_land_grant_college', 'award__latest_transaction__contract_data__c1994_land_grant_college'),
('minority_institution', 'award__latest_transaction__contract_data__minority_institution'),
('historically_black_college', 'award__latest_transaction__contract_data__historically_black_college'),
('tribal_college', 'award__latest_transaction__contract_data__tribal_college'),
('alaskan_native_servicing_institution',
'award__latest_transaction__contract_data__alaskan_native_servicing_i'),
('native_hawaiian_servicing_institution',
'award__latest_transaction__contract_data__native_hawaiian_servicing'),
('school_of_forestry', 'award__latest_transaction__contract_data__school_of_forestry'),
('veterinary_college', 'award__latest_transaction__contract_data__veterinary_college'),
('dot_certified_disadvantage', 'award__latest_transaction__contract_data__dot_certified_disadvantage'),
('self_certified_small_disadvantaged_business',
'award__latest_transaction__contract_data__self_certified_small_disad'),
('small_disadvantaged_business', 'award__latest_transaction__contract_data__small_disadvantaged_busine'),
('c8a_program_participant', 'award__latest_transaction__contract_data__c8a_program_participant'),
('historically_underutilized_business_zone_hubzone_firm',
'award__latest_transaction__contract_data__historically_underutilized'),
('sba_certified_8a_joint_venture', 'award__latest_transaction__contract_data__sba_certified_8_a_joint_ve'),
('last_modified_date', 'award__latest_transaction__contract_data__last_modified')
]),
'd2': OrderedDict([
('award_id_fain', 'award__fain'),
('award_id_uri', 'award__uri'),
('sai_number', 'award__latest_transaction__assistance_data__sai_number'),
('obligated_amount', 'award__total_obligation'),
('non_federal_funding_amount', 'award__non_federal_funding_amount'),
('total_funding_amount', 'award__total_funding_amount'),
('face_value_of_loan', 'award__latest_transaction__assistance_data__face_value_loan_guarantee'),
('original_subsidy_cost', 'award__latest_transaction__original_loan_subsidy_cost'),
('total_subsidy_cost', 'award__total_subsidy_cost'),
('total_loan_value', 'award__total_loan_value'),
('period_of_performance_start_date', 'award__period_of_performance_start_date'),
('period_of_performance_current_end_date', 'award__period_of_performance_current_end_date'),
('awarding_agency_code', 'award__latest_transaction__assistance_data__awarding_agency_code'),
('awarding_agency_name', 'award__latest_transaction__assistance_data__awarding_agency_name'),
('awarding_sub_agency_code', 'award__latest_transaction__assistance_data__awarding_sub_tier_agency_c'),
('awarding_sub_agency_name', 'award__latest_transaction__assistance_data__awarding_sub_tier_agency_n'),
('awarding_office_code', 'award__latest_transaction__assistance_data__awarding_office_code'),
('awarding_office_name', 'award__latest_transaction__assistance_data__awarding_office_name'),
('funding_agency_code', 'award__latest_transaction__assistance_data__funding_agency_code'),
('funding_agency_name', 'award__latest_transaction__assistance_data__funding_agency_name'),
('funding_sub_agency_code', 'award__latest_transaction__assistance_data__funding_sub_tier_agency_co'),
('funding_sub_agency_name', 'award__latest_transaction__assistance_data__funding_sub_tier_agency_na'),
('funding_office_code', 'award__latest_transaction__assistance_data__funding_office_code'),
('funding_office_name', 'award__latest_transaction__assistance_data__funding_office_name'),
('recipient_duns', 'award__latest_transaction__assistance_data__awardee_or_recipient_uniqu'),
('recipient_name', 'award__latest_transaction__assistance_data__awardee_or_recipient_legal'),
('recipient_parent_duns', 'award__latest_transaction__assistance_data__ultimate_parent_unique_ide'),
('recipient_parent_name', 'award__latest_transaction__assistance_data__ultimate_parent_legal_enti'),
('recipient_country_code', 'award__latest_transaction__assistance_data__legal_entity_country_code'),
('recipient_country_name', 'award__latest_transaction__assistance_data__legal_entity_country_name'),
('recipient_address_line_1', 'award__latest_transaction__assistance_data__legal_entity_address_line1'),
('recipient_address_line_2', 'award__latest_transaction__assistance_data__legal_entity_address_line2'),
('recipient_city_code', 'award__latest_transaction__assistance_data__legal_entity_city_code'),
('recipient_city_name', 'award__latest_transaction__assistance_data__legal_entity_city_name'),
('recipient_county_code', 'award__latest_transaction__assistance_data__legal_entity_county_code'),
('recipient_county_name', 'award__latest_transaction__assistance_data__legal_entity_county_name'),
('recipient_state_code', 'award__latest_transaction__assistance_data__legal_entity_state_code'),
('recipient_state_name', 'award__latest_transaction__assistance_data__legal_entity_state_name'),
('recipient_zip_code', 'award__latest_transaction__assistance_data__legal_entity_zip5'),
('recipient_zip_last_4_code', 'award__latest_transaction__assistance_data__legal_entity_zip_last4'),
('recipient_congressional_district',
'award__latest_transaction__assistance_data__legal_entity_congressional'),
('recipient_foreign_city_name', 'award__latest_transaction__assistance_data__legal_entity_foreign_city'),
('recipient_foreign_province_name',
'award__latest_transaction__assistance_data__legal_entity_foreign_provi'),
('recipient_foreign_postal_code',
'award__latest_transaction__assistance_data__legal_entity_foreign_posta'),
('primary_place_of_performance_country_code',
'award__latest_transaction__assistance_data__place_of_perform_country_c'),
('primary_place_of_performance_country_name',
'award__latest_transaction__assistance_data__place_of_perform_country_n'),
('primary_place_of_performance_code',
'award__latest_transaction__assistance_data__place_of_performance_code'),
('primary_place_of_performance_city_name',
'award__latest_transaction__assistance_data__place_of_performance_city'),
('primary_place_of_performance_county_code',
'award__latest_transaction__assistance_data__place_of_perform_county_co'),
('primary_place_of_performance_county_name',
'award__latest_transaction__assistance_data__place_of_perform_county_na'),
('primary_place_of_performance_state_name',
'award__latest_transaction__assistance_data__place_of_perform_state_nam'),
('primary_place_of_performance_zip_4',
'award__latest_transaction__assistance_data__place_of_performance_zip4a'),
('primary_place_of_performance_congressional_district',
'award__latest_transaction__assistance_data__place_of_performance_congr'),
('primary_place_of_performance_foreign_location',
'award__latest_transaction__assistance_data__place_of_performance_forei'),
('cfda_number', 'award__latest_transaction__assistance_data__cfda_number'),
('cfda_title', 'award__latest_transaction__assistance_data__cfda_title'),
('assistance_type_code', 'award__latest_transaction__assistance_data__assistance_type'),
('assistance_type_description', 'award__latest_transaction__assistance_data__assistance_type_desc'),
('award_description', 'award__latest_transaction__assistance_data__award_description'),
('business_funds_indicator_code', 'award__latest_transaction__assistance_data__business_funds_indicator'),
('business_funds_indicator_description',
'award__latest_transaction__assistance_data__business_funds_ind_desc'),
('business_types_code', 'award__latest_transaction__assistance_data__business_types'),
('business_types_description', 'award__latest_transaction__assistance_data__business_types_desc'),
('record_type_code', 'award__latest_transaction__assistance_data__record_type'),
('record_type_description', 'award__latest_transaction__assistance_data__record_type_description'),
('last_modified_date', 'award__latest_transaction__assistance_data__modified_at')
])
},
'transaction': {
'd1': OrderedDict([
('award_id_piid', 'transaction__contract_data__piid'),
('modification_number', 'transaction__contract_data__award_modification_amendme'),
('transaction_number', 'transaction__contract_data__transaction_number'),
('parent_award_agency_id', 'transaction__contract_data__referenced_idv_agency_iden'),
('parent_award_agency_name', 'transaction__contract_data__referenced_idv_agency_desc'),
('parent_award_id', 'transaction__contract_data__parent_award_id'),
('parent_award_modification_number', 'transaction__contract_data__referenced_idv_modificatio'),
('federal_action_obligation', 'transaction__federal_action_obligation'),
('total_dollars_obligated', 'transaction__contract_data__total_obligated_amount'),
('base_and_exercised_options_value', 'transaction__contract_data__base_exercised_options_val'),
('current_total_value_of_award', 'transaction__contract_data__current_total_value_award'),
('base_and_all_options_value', 'transaction__contract_data__base_and_all_options_value'),
('potential_total_value_of_award', 'transaction__contract_data__potential_total_value_awar'),
('action_date', 'transaction__action_date'),
('period_of_performance_start_date', 'transaction__contract_data__period_of_performance_star'),
('period_of_performance_current_end_date', 'transaction__contract_data__period_of_performance_curr'),
('period_of_performance_potential_end_date', 'transaction__contract_data__period_of_perf_potential_e'),
('ordering_period_end_date', 'transaction__contract_data__ordering_period_end_date'),
('awarding_agency_code', 'transaction__contract_data__awarding_agency_code'),
('awarding_agency_name', 'transaction__contract_data__awarding_agency_name'),
('awarding_sub_agency_code', 'transaction__contract_data__awarding_sub_tier_agency_c'),
('awarding_sub_agency_name', 'transaction__contract_data__awarding_sub_tier_agency_n'),
('awarding_office_code', 'transaction__contract_data__awarding_office_code'),
('awarding_office_name', 'transaction__contract_data__awarding_office_name'),
('funding_agency_code', 'transaction__contract_data__funding_agency_code'),
('funding_agency_name', 'transaction__contract_data__funding_agency_name'),
('funding_sub_agency_code', 'transaction__contract_data__funding_sub_tier_agency_co'),
('funding_sub_agency_name', 'transaction__contract_data__funding_sub_tier_agency_na'),
('funding_office_code', 'transaction__contract_data__funding_office_code'),
('funding_office_name', 'transaction__contract_data__funding_office_name'),
('foreign_funding', 'transaction__contract_data__foreign_funding'),
('foreign_funding_description', 'transaction__contract_data__foreign_funding_desc'),
('sam_exception', 'transaction__contract_data__sam_exception'),
('sam_exception_description', 'transaction__contract_data__sam_exception_description'),
('recipient_duns', 'transaction__contract_data__awardee_or_recipient_uniqu'),
('recipient_name', 'transaction__contract_data__awardee_or_recipient_legal'),
('recipient_doing_business_as_name', 'transaction__contract_data__vendor_doing_as_business_n'),
('cage_code', 'transaction__contract_data__cage_code'),
('recipient_parent_name', 'transaction__contract_data__ultimate_parent_legal_enti'),
('recipient_parent_duns', 'transaction__contract_data__ultimate_parent_unique_ide'),
('recipient_country_code', 'transaction__contract_data__legal_entity_country_code'),
('recipient_country_name', 'transaction__contract_data__legal_entity_country_name'),
('recipient_address_line_1', 'transaction__contract_data__legal_entity_address_line1'),
('recipient_address_line_2', 'transaction__contract_data__legal_entity_address_line2'),
('recipient_city_name', 'transaction__contract_data__legal_entity_city_name'),
('recipient_state_code', 'transaction__contract_data__legal_entity_state_code'),
('recipient_state_name', 'transaction__contract_data__legal_entity_state_descrip'),
('recipient_zip_4_code', 'transaction__contract_data__legal_entity_zip4'),
('recipient_congressional_district', 'transaction__contract_data__legal_entity_congressional'),
('recipient_phone_number', 'transaction__contract_data__vendor_phone_number'),
('recipient_fax_number', 'transaction__contract_data__vendor_fax_number'),
('primary_place_of_performance_country_code', 'transaction__contract_data__place_of_perform_country_c'),
('primary_place_of_performance_country_name', 'transaction__contract_data__place_of_perf_country_desc'),
('primary_place_of_performance_city_name', 'transaction__contract_data__place_of_perform_city_name'),
('primary_place_of_performance_county_name', 'transaction__contract_data__place_of_perform_county_na'),
('primary_place_of_performance_state_code', 'transaction__contract_data__place_of_performance_state'),
('primary_place_of_performance_state_name', 'transaction__contract_data__place_of_perfor_state_desc'),
('primary_place_of_performance_zip_4', 'transaction__contract_data__place_of_performance_zip4a'),
('primary_place_of_performance_congressional_district',
'transaction__contract_data__place_of_performance_congr'),
('award_or_idv_flag', 'transaction__contract_data__pulled_from'),
('award_type_code', 'transaction__contract_data__contract_award_type'),
('award_type', 'transaction__contract_data__contract_award_type_desc'),
('idv_type_code', 'transaction__contract_data__idv_type'),
('idv_type', 'transaction__contract_data__idv_type_description'),
('multiple_or_single_award_idv_code', 'transaction__contract_data__multiple_or_single_award_i'),
('multiple_or_single_award_idv', 'transaction__contract_data__multiple_or_single_aw_desc'),
('type_of_idc_code', 'transaction__contract_data__type_of_idc'),
('type_of_idc', 'transaction__contract_data__type_of_idc_description'),
('type_of_contract_pricing_code', 'transaction__contract_data__type_of_contract_pricing'),
('type_of_contract_pricing', 'transaction__contract_data__type_of_contract_pric_desc'),
('award_description', 'transaction__contract_data__award_description'),
('action_type_code', 'transaction__action_type'),
('action_type', 'transaction__action_type_description'),
('solicitation_identifier', 'transaction__contract_data__solicitation_identifier'),
('number_of_actions', 'transaction__contract_data__number_of_actions'),
('inherently_governmental_functions', 'transaction__contract_data__inherently_government_func'),
('inherently_governmental_functions_description', 'transaction__contract_data__inherently_government_desc'),
('product_or_service_code', 'transaction__contract_data__product_or_service_code'),
('product_or_service_code_description', 'transaction__contract_data__product_or_service_co_desc'),
('contract_bundling_code', 'transaction__contract_data__contract_bundling'),
('contract_bundling', 'transaction__contract_data__contract_bundling_descrip'),
('dod_claimant_program_code', 'transaction__contract_data__dod_claimant_program_code'),
('dod_claimant_program_description', 'transaction__contract_data__dod_claimant_prog_cod_desc'),
('naics_code', 'transaction__contract_data__naics'),
('naics_description', 'transaction__contract_data__naics_description'),
('recovered_materials_sustainability_code', 'transaction__contract_data__recovered_materials_sustai'),
('recovered_materials_sustainability', 'transaction__contract_data__recovered_materials_s_desc'),
('domestic_or_foreign_entity_code', 'transaction__contract_data__domestic_or_foreign_entity'),
('domestic_or_foreign_entity', 'transaction__contract_data__domestic_or_foreign_e_desc'),
('dod_acquisition_program_code', 'transaction__contract_data__program_system_or_equipmen'),
('dod_acquisition_program_description', 'transaction__contract_data__program_system_or_equ_desc'),
('information_technology_commercial_item_category_code',
'transaction__contract_data__information_technology_com'),
('information_technology_commercial_item_category',
'transaction__contract_data__information_technolog_desc'),
('epa_designated_product_code', 'transaction__contract_data__epa_designated_product'),
('epa_designated_product', 'transaction__contract_data__epa_designated_produc_desc'),
('country_of_product_or_service_origin_code', 'transaction__contract_data__country_of_product_or_serv'),
('country_of_product_or_service_origin', 'transaction__contract_data__country_of_product_or_desc'),
('place_of_manufacture_code', 'transaction__contract_data__place_of_manufacture'),
('place_of_manufacture', 'transaction__contract_data__place_of_manufacture_desc'),
('subcontracting_plan_code', 'transaction__contract_data__subcontracting_plan'),
('subcontracting_plan', 'transaction__contract_data__subcontracting_plan_desc'),
('extent_competed_code', 'transaction__contract_data__extent_competed'),
('extent_competed', 'transaction__contract_data__extent_compete_description'),
('solicitation_procedures_code', 'transaction__contract_data__solicitation_procedures'),
('solicitation_procedures', 'transaction__contract_data__solicitation_procedur_desc'),
('type_of_set_aside_code', 'transaction__contract_data__type_set_aside'),
('type_of_set_aside', 'transaction__contract_data__type_set_aside_description'),
('evaluated_preference_code', 'transaction__contract_data__evaluated_preference'),
('evaluated_preference', 'transaction__contract_data__evaluated_preference_desc'),
('research_code', 'transaction__contract_data__research'),
('research', 'transaction__contract_data__research_description'),
('fair_opportunity_limited_sources_code', 'transaction__contract_data__fair_opportunity_limited_s'),
('fair_opportunity_limited_sources', 'transaction__contract_data__fair_opportunity_limi_desc'),
('other_than_full_and_open_competition_code', 'transaction__contract_data__other_than_full_and_open_c'),
('other_than_full_and_open_competition', 'transaction__contract_data__other_than_full_and_o_desc'),
('number_of_offers_received', 'transaction__contract_data__number_of_offers_received'),
('commercial_item_acquisition_procedures_code', 'transaction__contract_data__commercial_item_acquisitio'),
('commercial_item_acquisition_procedures', 'transaction__contract_data__commercial_item_acqui_desc'),
('small_business_competitiveness_demonstration_program',
'transaction__contract_data__small_business_competitive'),
('commercial_item_test_program_code', 'transaction__contract_data__commercial_item_test_progr'),
('commercial_item_test_program', 'transaction__contract_data__commercial_item_test_desc'),
('a76_fair_act_action_code', 'transaction__contract_data__a_76_fair_act_action'),
('a76_fair_act_action', 'transaction__contract_data__a_76_fair_act_action_desc'),
('fed_biz_opps_code', 'transaction__contract_data__fed_biz_opps'),
('fed_biz_opps', 'transaction__contract_data__fed_biz_opps_description'),
('local_area_set_aside_code', 'transaction__contract_data__local_area_set_aside'),
('local_area_set_aside', 'transaction__contract_data__local_area_set_aside_desc'),
('price_evaluation_adjustment_preference_percent_difference',
'transaction__contract_data__price_evaluation_adjustmen'),
('clinger_cohen_act_planning_code', 'transaction__contract_data__clinger_cohen_act_planning'),
('clinger_cohen_act_planning', 'transaction__contract_data__clinger_cohen_act_pla_desc'),
('materials_supplies_articles_equipment_code', 'transaction__contract_data__materials_supplies_article'),
('materials_supplies_articles_equipment', 'transaction__contract_data__materials_supplies_descrip'),
('labor_standards_code', 'transaction__contract_data__labor_standards'),
('labor_standards', 'transaction__contract_data__labor_standards_descrip'),
('construction_wage_rate_requirements_code', 'transaction__contract_data__construction_wage_rate_req'),
('construction_wage_rate_requirements', 'transaction__contract_data__construction_wage_rat_desc'),
('interagency_contracting_authority_code', 'transaction__contract_data__interagency_contracting_au'),
('interagency_contracting_authority', 'transaction__contract_data__interagency_contract_desc'),
('other_statutory_authority', 'transaction__contract_data__other_statutory_authority'),
('program_acronym', 'transaction__contract_data__program_acronym'),
('parent_award_type_code', 'transaction__contract_data__referenced_idv_type'),
('parent_award_type', 'transaction__contract_data__referenced_idv_type_desc'),
('parent_award_single_or_multiple_code', 'transaction__contract_data__referenced_mult_or_single'),
('parent_award_single_or_multiple', 'transaction__contract_data__referenced_mult_or_si_desc'),
('major_program', 'transaction__contract_data__major_program'),
('national_interest_action_code', 'transaction__contract_data__national_interest_action'),
('national_interest_action', 'transaction__contract_data__national_interest_desc'),
('cost_or_pricing_data_code', 'transaction__contract_data__cost_or_pricing_data'),
('cost_or_pricing_data', 'transaction__contract_data__cost_or_pricing_data_desc'),
('cost_accounting_standards_clause_code', 'transaction__contract_data__cost_accounting_standards'),
('cost_accounting_standards_clause', 'transaction__contract_data__cost_accounting_stand_desc'),
('gfe_gfp_code', 'transaction__contract_data__government_furnished_prope'),
('gfe_gfp', 'transaction__contract_data__government_furnished_desc'),
('sea_transportation_code', 'transaction__contract_data__sea_transportation'),
('sea_transportation', 'transaction__contract_data__sea_transportation_desc'),
('undefinitized_action_code', 'transaction__contract_data__undefinitized_action'),
('undefinitized_action', 'transaction__contract_data__undefinitized_action_desc'),
('consolidated_contract_code', 'transaction__contract_data__consolidated_contract'),
('consolidated_contract', 'transaction__contract_data__consolidated_contract_desc'),
('performance_based_service_acquisition_code', 'transaction__contract_data__performance_based_service'),
('performance_based_service_acquisition', 'transaction__contract_data__performance_based_se_desc'),
('multi_year_contract_code', 'transaction__contract_data__multi_year_contract'),
('multi_year_contract', 'transaction__contract_data__multi_year_contract_desc'),
('contract_financing_code', 'transaction__contract_data__contract_financing'),
('contract_financing', 'transaction__contract_data__contract_financing_descrip'),
('purchase_card_as_payment_method_code', 'transaction__contract_data__purchase_card_as_payment_m'),
('purchase_card_as_payment_method', 'transaction__contract_data__purchase_card_as_paym_desc'),
('contingency_humanitarian_or_peacekeeping_operation_code',
'transaction__contract_data__contingency_humanitarian_o'),
('contingency_humanitarian_or_peacekeeping_operation',
'transaction__contract_data__contingency_humanitar_desc'),
('alaskan_native_owned_corporation_or_firm', 'transaction__contract_data__alaskan_native_owned_corpo'),
('american_indian_owned_business', 'transaction__contract_data__american_indian_owned_busi'),
('indian_tribe_federally_recognized', | |
<reponame>mikiec84/aiida-bigdft-plugin
"""
This module is useful to process a logfile of BigDFT run, in yaml format.
It also provides some tools to extract typical informations about the run,
like the energy, the eigenvalues and so on.
"""
# This module needs: os, yaml, futile, matplotlib, numpy, BZ, DoS
EVAL = "eval"
SETUP = "let"
INITIALIZATION = "globals"
PATH = 'path'
PRINT = 'print'
GLOBAL = 'global'
FLOAT_SCALAR = 'scalar'
PRE_POST = [EVAL, SETUP, INITIALIZATION]
# Builtin paths to define the search paths
BUILTIN = {
'number_of_orbitals': {PATH: [['Total Number of Orbitals']],
PRINT: "Total Number of Orbitals", GLOBAL: True},
'posinp_file': {PATH: [['posinp', 'properties', 'source', ]],
PRINT: "source:", GLOBAL: True},
'XC_parameter': {PATH: [['dft', 'ixc'], ['DFT parameters:', 'XC ID:']],
PRINT: "ixc:", GLOBAL: True, FLOAT_SCALAR: True},
'grid_spacing': {PATH: [["dft", "hgrids"]],
PRINT: "hgrids:", GLOBAL: True},
'spin_polarization': {PATH: [["dft", "nspin"]],
PRINT: "nspin:", GLOBAL: True},
'total_magn_moment': {PATH: [["dft", "mpol"]],
PRINT: "mpol:", GLOBAL: True},
'system_charge': {PATH: [["dft", "qcharge"]],
PRINT: "qcharge:", GLOBAL: True},
'rmult': {PATH: [["dft", "rmult"]],
PRINT: "rmult:", GLOBAL: True},
# 'up_elec'::{PATH: [["occupation:","K point 1:","up:","Orbital \d+"]],
# PRINT: "Orbital \d+", GLOBAL: True},
'astruct': {PATH: [['Atomic structure']]},
'data_directory': {PATH: [['Data Writing directory']]},
'dipole': {PATH: [['Electric Dipole Moment (AU)', 'P vector']],
PRINT: "Dipole (AU)"},
'electrostatic_multipoles': {PATH: [['Multipole coefficients']]},
'energy': {PATH: [["Last Iteration", "FKS"], ["Last Iteration", "EKS"],
["Energy (Hartree)"]],
PRINT: "Energy", GLOBAL: False},
'evals': {PATH: [["Complete list of energy eigenvalues"],
["Ground State Optimization", -1, "Orbitals"],
["Ground State Optimization", -1,
"Hamiltonian Optimization", -1, "Subspace Optimization",
"Orbitals"]]},
'fermi_level': {PATH: [["Ground State Optimization", -1, "Fermi Energy"],
["Ground State Optimization", -1,
"Hamiltonian Optimization", -1,
"Subspace Optimization", "Fermi Energy"]],
PRINT: True, GLOBAL: False},
'forcemax': {PATH: [["Geometry", "FORCES norm(Ha/Bohr)", "maxval"],
['Clean forces norm (Ha/Bohr)', 'maxval']],
PRINT: "Max val of Forces"},
'forcemax_cv': {PATH: [['geopt', 'forcemax']],
PRINT: 'Convergence criterion on forces',
GLOBAL: True, FLOAT_SCALAR: True},
'force_fluct': {PATH: [["Geometry", "FORCES norm(Ha/Bohr)", "fluct"]],
PRINT: "Threshold fluctuation of Forces"},
'forces': {PATH: [['Atomic Forces (Ha/Bohr)']]},
'gnrm_cv': {PATH: [["dft", "gnrm_cv"]],
PRINT: "Convergence criterion on Wfn. Residue", GLOBAL: True},
'kpts': {PATH: [["K points"]],
PRINT: False, GLOBAL: True},
'kpt_mesh': {PATH: [['kpt', 'ngkpt']], PRINT: True, GLOBAL: True},
'magnetization': {PATH: [["Ground State Optimization", -1,
"Total magnetization"],
["Ground State Optimization", -1,
"Hamiltonian Optimization", -1,
"Subspace Optimization", "Total magnetization"]],
PRINT: "Total magnetization of the system"},
'memory_run': {PATH: [
['Accumulated memory requirements during principal run stages (MiB.KiB)']
]},
'memory_quantities': {PATH: [
['Memory requirements for principal quantities (MiB.KiB)']]},
'memory_peak': {PATH: [['Estimated Memory Peak (MB)']]},
'nat': {PATH: [['Atomic System Properties', 'Number of atoms']],
PRINT: "Number of Atoms", GLOBAL: True},
'pressure': {PATH: [['Pressure', 'GPa']], PRINT: True},
'sdos': {PATH: [['SDos files']], GLOBAL: True},
'support_functions': {PATH: [["Gross support functions moments",
'Multipole coefficients', 'values']]},
'symmetry': {PATH: [['Atomic System Properties', 'Space group']],
PRINT: "Symmetry group", GLOBAL: True}}
def get_logs(files):
"""
Return a list of loaded logfiles from files, which is a list
of paths leading to logfiles.
:param files: List of filenames indicating the logfiles
:returns: List of Logfile instances associated to filename
"""
from futile import YamlIO
logs = []
for filename in files:
logs += YamlIO.load(filename, doc_lists=True, safe_mode=True)
return logs
def floatify(scalar):
"""
Useful to make float from strings compatible from fortran
Args:
scalar (str, float): When string representing a float that might be
given in fortran notation, otherwise it might be a floating point
Returns:
float. The value associated to scalar as a floating point number
Example:
>>> # this would be the same with "1.e-4" or with 0.0001
>>> floatify('1.d-4')
1.e-4
"""
if isinstance(scalar, str):
return float(scalar.replace('d', 'e').replace('D', 'E'))
else:
return scalar
# This is a tentative function written to extract information from the runs
def document_quantities(doc, to_extract):
"""
Extract information from the runs.
.. warning::
This routine was designed for the previous parse_log.py script and it
is here only for backward compatibility purposes.
"""
analysis = {}
for quantity in to_extract:
if quantity in PRE_POST:
continue
# follow the levels indicated to find the quantity
field = to_extract[quantity]
if not isinstance(field, list) and not isinstance(field, dict) \
and field in BUILTIN:
paths = BUILTIN[field][PATH]
else:
paths = [field]
# now try to find the first of the different alternatives
for path in paths:
# print path,BUILTIN,BUILTIN.keys(),field in BUILTIN,field
value = doc
for key in path:
# as soon as there is a problem the quantity is null
try:
value = value[key]
except (KeyError, TypeError):
value = None
break
if value is not None:
break
analysis[quantity] = value
return analysis
def perform_operations(variables, ops, debug=False):
"""
Perform operations given by 'ops'.
'variables' is a dictionary of variables i.e. key=value.
.. warning::
This routine was designed for the previous parse_log.py script and it is
here only for backward compatibility purposes.
"""
# glstr=''
# if globs is not None:
# for var in globs:
# glstr+= "global "+var+"\n"
# if debug: print '###Global Strings: \n',glstr
# first evaluate the given variables
for key in variables:
command = key+"="+str(variables[key])
if debug:
print(command)
exec(command)
# then evaluate the given expression
if debug:
print(ops)
# exec(glstr+ops, globals(), locals())
exec(ops, globals(), locals())
def process_logfiles(files, instructions, debug=False):
"""
Process the logfiles in files with the dictionary 'instructions'.
.. warning::
This routine was designed for the previous parse_log.py script and it is
here only for backward compatibility purposes.
"""
import sys
glstr = 'global __LAST_FILE__ \n'
glstr += '__LAST_FILE__='+str(len(files))+'\n'
if INITIALIZATION in instructions:
for var in instructions[INITIALIZATION]:
glstr += "global "+var+"\n"
glstr += var + " = " + str(instructions[INITIALIZATION][var])+"\n"
# exec var +" = "+ str(instructions[INITIALIZATION][var])
exec(glstr, globals(), locals())
for f in files:
sys.stderr.write("#########processing "+f+"\n")
datas = get_logs([f])
for doc in datas:
doc_res = document_quantities(doc, instructions)
# print doc_res,instructions
if EVAL in instructions:
perform_operations(doc_res, instructions[EVAL], debug=debug)
def find_iterations(log):
"""
Identify the different block of the iterations of the wavefunctions
optimization.
.. todo::
Should be generalized and checked for mixing calculation and O(N)
logfiles
:param log: logfile load
:type log: dictionary
:returns: wavefunction residue per iterations, per each subspace
diagonalization
:rtype: numpy array of rank two
"""
import numpy
for itrp in log['Ground State Optimization']:
rpnrm = []
for itsp in itrp['Hamiltonian Optimization']:
gnrm_sp = []
for it in \
itsp['Subspace Optimization']['Wavefunctions Iterations']:
if 'gnrm' in it:
gnrm_sp.append(it['gnrm'])
rpnrm.append(numpy.array(gnrm_sp))
rpnrm = numpy.array(rpnrm)
return rpnrm
def plot_wfn_convergence(wfn_it, gnrm_cv, label=None):
"""
Plot the convergence of the wavefunction coming from the find_iterations
function. Cumulates the plot in matplotlib.pyplot module
:param wfn_it: list coming from :func:`find_iterations`
:param gnrm_cv: convergence criterion for the residue of the wfn_it list
:param label: label for the given plot
"""
import matplotlib.pyplot as plt
import numpy
plt.semilogy(numpy.ravel(wfn_it), label=label)
plt.legend(loc="upper right")
plt.axhline(gnrm_cv, color='k', linestyle='--')
it = 0
for itrp in wfn_it:
it += len(itrp)
plt.axvline(it, color='k', linestyle='--')
class Logfile():
"""
Import a Logfile from a filename in yaml format, a list of filenames,
an archive (compressed tar file), a dictionary or a list of dictionaries.
:param args: logfile names to be parsed
:type args: strings
:param kwargs: keyword arguments
* archive: name of the archive from which retrieve the logfiles
* member: name of the logfile within the archive. If absent, all the
files of the archive will be considered as args
* label: the label of the logfile instance
* dictionary: parsed logfile given as a dictionary, serialization of the
yaml logfile
:Example:
>>> l = Logfile('one.yaml','two.yaml')
>>> l = Logfile(archive='calc.tgz')
>>> l = Logfile(archive='calc.tgz',member='one.yaml')
>>> l = Logfile(dictionary=dict1)
>>> l = Logfile(dictionary=[dict1, dict2])
.. todo::
Document the automatically generated attributes, perhaps via an inner
function in futile python module
"""
def __init__(self, *args, **kwargs):
"""
Initialize the class
"""
import os
dicts = []
# Read the dictionary kwargs
arch = kwargs.get("archive")
member = kwargs.get("member")
label = kwargs.get("label")
dictionary = kwargs.get("dictionary")
if arch:
# An archive is detected
import tarfile
from futile import YamlIO
tar = tarfile.open(arch)
members = [tar.getmember(member)] if member else tar.getmembers()
# print members
for memb in members:
f = tar.extractfile(memb)
dicts.append(YamlIO.load(stream=f.read()))
# Add the label (name of the file)
# dicts[-1]['label'] = memb.name
srcdir = os.path.dirname(arch)
label = label if label is not None else arch
elif dictionary:
# Read the dictionary or a list of dictionaries or from a generator
# Need to return a list
| |
<reponame>nwfsc-fram/pyFieldSoftware<filename>py/observer/ImportBiospeciesProtocols.py
# -----------------------------------------------------------------------------
# Name: ImportBiospecProtocols.py
# Purpose: Import CSV file into biospecimens protocols
#
# Author: <NAME> <<EMAIL>>
#
# Created: August 23, 2016
# License: MIT
# ------------------------------------------------------------------------------
# TO RUN THE IMPORT, ENABLE test_perform_import UNIT TEST BELOW
import logging
import unittest
from apsw import ConstraintError
from playhouse.csv_loader import *
# noinspection PyPackageRequirements
from py.observer.ObserverDBModels import StratumLu, StratumGroups, Programs, ProgramStratumGroupMtx, \
FisheryStratumGroupsMtx, GeartypeStratumGroupMtx, SpeciesSamplingPlanLu, Lookups, Species, \
ProtocolGroups, ProtocolGroupMtx
# noinspection PyPackageRequirements
class BiospeciesProtocolsLoader:
"""
Create an in-memory CSV database and use it to populate protocols
"""
# csv_db = None
BioProtocols = None
def __init__(self, db_filename):
# Shut up peewee's SQL logging, if desired
# logger = logging.getLogger('peewee')
# logger.setLevel(logging.WARNING)
self.csv_db = SqliteDatabase(':memory:')
col_count = 16
fields = [TextField(null=True) for _ in range(0, col_count)]
self.BioProtocols = load_csv(self.csv_db, db_filename, fields=fields)
def import_to_db(self, clear_old=False):
"""
Import data into observer DB
@param clear_old: clear old tables
"""
logging.info('Importing {rec_cnt} records.'.format(rec_cnt=len(self.BioProtocols.select())))
if clear_old:
SpeciesSamplingPlanLu.raw('DELETE FROM SQLITE_SEQUENCE WHERE NAME="SPECIES_SAMPLING_PLAN_LU"').execute()
SpeciesSamplingPlanLu.delete().execute()
StratumLu.raw('DELETE FROM SQLITE_SEQUENCE WHERE NAME="STRATUM_LU"').execute()
StratumLu.delete().execute()
StratumGroups.raw('DELETE FROM SQLITE_SEQUENCE WHERE NAME="STRATUM_GROUPS"').execute()
StratumGroups.delete().execute()
ProtocolGroups.raw('DELETE FROM SQLITE_SEQUENCE WHERE NAME="PROTOCOL_GROUPS"').execute()
ProtocolGroups.delete().execute()
ProtocolGroupMtx.raw('DELETE FROM SQLITE_SEQUENCE WHERE NAME="PROTOCOL_GROUP_MTX"').execute()
ProtocolGroupMtx.delete().execute()
ProgramStratumGroupMtx.raw('DELETE FROM SQLITE_SEQUENCE WHERE NAME="PROGRAM_STRATUM_GROUP_MTX"').execute()
ProgramStratumGroupMtx.delete().execute()
FisheryStratumGroupsMtx.raw('DELETE FROM SQLITE_SEQUENCE WHERE NAME="FISHERY_STRATUM_GROUP_MTX"').execute()
FisheryStratumGroupsMtx.delete().execute()
GeartypeStratumGroupMtx.raw('DELETE FROM SQLITE_SEQUENCE WHERE NAME="GEARTYPE_STRATUM_GROUP_MTX"').execute()
GeartypeStratumGroupMtx.delete().execute()
self.create_default_groups()
for rec in self.BioProtocols.select(): # all rows in CSV
# noinspection PyUnusedLocal
protocol_id = self.build_protocol_group(protocol_list=rec.protocol)
fishery_id = self.build_fishery_group(fishery_list=rec.fishery, name=rec.fishery_group_name)
geartype_id = self.build_geartype_group(geartype_list=rec.gear_type, name=rec.gear_type_group_name)
program_id = self.get_program_group_id(rec.uber_program)
biolist_id = self.get_biolist_id(name=rec.biosampling_list)
disposition = 'D' if rec.disposition.lower()[0] == 'd' else 'R' # Translate for consistency
stratum_id = self.build_stratum(depth_name=rec.depth,
program_id=program_id,
fishery_id=fishery_id,
geartype_id=geartype_id,
disposition=disposition)
species_ids = rec.species_codes.split(',') if rec.species_codes else None
if species_ids is None:
logging.warning(f'No species codes for {rec.species_species_group}')
species_ids = [int(s.strip()) for s in species_ids] if species_ids else None
self.build_species_sampling_plan(species_ids=species_ids,
common_name=rec.species_species_group,
disposition=disposition,
protocol_group_id=protocol_id,
stratum_id=stratum_id,
biosample_list_lu_id=biolist_id)
@staticmethod
def build_stratum(depth_name, program_id, fishery_id, geartype_id, disposition):
"""
Build STRATUM_LU
@param depth_name: all, <30 fathoms, >30 fathoms -> determines RANGE_MIN, MAX, UNITS
@param program_id: Program Group FK
@param fishery_id: Fishery Group FK
@param geartype_id: Gear Type FK
@param disposition: 'DISCARD' /'RETAINED'
@return: STRATUM_ID
"""
depth_name_nosp = depth_name.lower().replace(' ', '')
range_min = 0.
range_max = -1.0
if depth_name_nosp == '<30fathoms':
range_max = 30.0
elif depth_name_nosp == '>30fathoms':
range_min = 30.001
new_stratum = StratumLu.create(name=depth_name,
program_group=program_id,
fishery_group=fishery_id,
gear_type_group=geartype_id,
disposition=disposition,
range_min=range_min,
range_max=range_max,
range_units='fathoms')
return new_stratum.stratum
@staticmethod
def get_program_group_id(program_str):
"""
Translate string to
@param program_str: program string, e.g. 'ncs,cs'
@return: record ID
"""
try:
program_str = program_str.lower()
if program_str == 'ncs,cs':
return StratumGroups.get(StratumGroups.group == 18).group
elif program_str == 'cs':
return StratumGroups.get(StratumGroups.group == 17).group
elif program_str == 'cs':
return StratumGroups.get(StratumGroups.group == 16).group
except StratumGroups.DoesNotExist:
logging.error('** cannot look up program id {}'.format(program_str))
@staticmethod
def create_default_groups():
"""
Automatically create groups in STRATUM_GROUPS
"""
# Fisheries, Programs, Gear Types
premade = [
{'group': 16, 'name': 'Non Catchshares', 'group_type': 'Programs'},
{'group': 17, 'name': 'Catchshares', 'group_type': 'Programs'},
{'group': 18, 'name': 'All', 'group_type': 'Programs'},
]
for g in premade:
StratumGroups.create(**g)
for prog in Programs.select():
ProgramStratumGroupMtx.create(group=18, program=prog.program)
if 'Catch Shares' == prog.program_name:
ProgramStratumGroupMtx.create(group=17, program=prog.program)
else:
ProgramStratumGroupMtx.create(group=16, program=prog.program)
def build_protocol_group(self, protocol_list):
"""
Build a PROTOCOL_GROUP
@param protocol_list: e.g. 'FL,FC,FORM'
@return: ID of protocol group
"""
try:
protocol_list = protocol_list.strip().upper().replace(' ', '') # ensure format like 'FL,FC,FORM'
query = ProtocolGroups.select().where(ProtocolGroups.name == protocol_list)
if query.exists():
item = ProtocolGroups.get(ProtocolGroups.name == protocol_list)
logging.debug('{} group already created, id = {}'.format(protocol_list, item.group))
return item.group # ID
# use NAME as protocol list for easy reference
pgroup = ProtocolGroups.create(name=protocol_list)
protocol_ids = self.lookup_protocols(protocol_list)
# Link ID's to this group
# noinspection PyTypeChecker
for prot_id in protocol_ids:
ProtocolGroupMtx.create(group=pgroup.group, protocol_lu=prot_id)
logging.info('Created {} -> {}'.format(protocol_list, protocol_ids))
return pgroup.group # ID
except Exception as err:
logging.error(f'Error building protocol group for {protocol_list}: {err}')
@staticmethod
def get_biolist_id(name):
"""
Given a biolist name lookup or create new entry in STRATUM_GROUPS
@param name: e.g. 'Biosample list 1'
@return: group_id of new/existing biolist name
"""
if not name:
return None
group_type = 'Biolist'
biolist_id, _ = StratumGroups.get_or_create(name=name.lower().strip(), group_type=group_type)
return biolist_id.group
def build_fishery_group(self, fishery_list, name):
"""
Build FISHERY_STRATUM_GROUPS_MTX and STRATUM_GROUPS
NAME will be composed of fishery_list and nonunique name on spreadsheet
@param fishery_list: e.g. '1,2,3'
@param name: name of fishery group from sheet, e.g. 'Nearshore', 'Trawl'
@return: ID of fishery group
"""
try:
fishery_list = fishery_list.strip().replace(' ', '') # ensure format like '1,2,3'
group_name = '{name} ({fishery_list})'.format(name=name, fishery_list=fishery_list)
query = StratumGroups.select().where(StratumGroups.name == group_name)
if query.exists():
item = StratumGroups.get(StratumGroups.name == group_name)
logging.debug('{} group already created, id = {}'.format(group_name, item.group))
return item.group # ID
fgroup = StratumGroups.create(name=group_name, group_type='Fishery')
f_ids = self.lookup_fisheries(fishery_list)
# Link ID's to this group
# noinspection PyTypeChecker
for f_lu_id in f_ids:
FisheryStratumGroupsMtx.create(group=fgroup.group, fishery_lu=f_lu_id)
logging.info('Created fisheries {} -> {}'.format(fishery_list, f_ids))
return fgroup.group # ID
except Exception as err:
logging.error('Error building fisheries group: {}'.format(err))
def build_geartype_group(self, geartype_list, name):
"""
Build GEARTYPE_STRATUM_GROUPS_MTX and STRATUM_GROUPS
NAME will be composed of geartype_list and nonunique name on spreadsheet
@param geartype_list: e.g. '1,2,3'
@param name: name of gear type group from sheet
@return: ID of geartype group
"""
try:
geartype_list = geartype_list.strip().replace(' ', '') # ensure format like '1,2,3'
group_name = '{name} ({geartype_list})'.format(name=name, geartype_list=geartype_list)
query = StratumGroups.select().where(StratumGroups.name == group_name)
if query.exists():
item = StratumGroups.get(StratumGroups.name == group_name)
logging.debug('{} group already created, id = {}'.format(group_name, item.group))
return item.group # ID
g_group = StratumGroups.create(name=group_name, group_type='Gear Type')
g_ids = self.lookup_geartypes(geartype_list)
# Link ID's to this group
# noinspection PyTypeChecker
for g_lu_id in g_ids:
GeartypeStratumGroupMtx.create(group=g_group.group, geartype_lu=g_lu_id)
logging.info('Created gear types {} -> {}'.format(geartype_list, g_ids))
return g_group.group # STRATUM_GROUPS GROUP_ID
except Exception as err:
logging.error('Error building gear type group: {}'.format(err))
@staticmethod
def build_species_sampling_plan(species_ids, common_name, disposition, protocol_group_id, stratum_id, biosample_list_lu_id):
"""
SPECIES_SAMPLE_PLAN_LU
@param species_ids: actual ID's of species to create plan for
@param common_name: name or special category to build
@param disposition: 'R' or 'D'
@param protocol_group_id:
@param stratum_id:
@return:
"""
ids = species_ids if species_ids else BiospeciesProtocolsLoader.get_species_id(common_name)
if ids:
for species_id in ids:
if not protocol_group_id:
raise Exception(f'NO PROTOCOL GROUP ID for name {common_name} {species_ids}')
plan_name = '{name} ({protname}, {stratname})'. \
format(name=common_name,
protname=ProtocolGroups.get(ProtocolGroups.group == protocol_group_id).name,
stratname=StratumLu.get(StratumLu.stratum == stratum_id).name
)
logging.info('Building SpeciesSamplingPlanLu {}'.format(plan_name))
try:
SpeciesSamplingPlanLu.create(plan_name=plan_name,
display_name=plan_name,
disposition=disposition,
species=species_id,
protocol_group=protocol_group_id,
stratum=stratum_id,
biosample_list_lu=biosample_list_lu_id
)
except ConstraintError:
logging.warning('Species already in plan')
# possibly TODO(?): weight method, count, biosample_list_lu
@staticmethod
def get_species_id(common_name):
"""
Return list of ID's for common name, or special cases such as Corals
@param common_name: common name to try and match
@return: [id, id...], None on fail
"""
matches = []
try:
matched = Species.get(fn.lower(Species.common_name) == common_name.lower())
matches.append(matched.species) # ID
return matches
except Species.DoesNotExist:
logging.warning('TODO: determine species for {}'.format(common_name))
return None
@staticmethod
def lookup_protocols(protocols_str):
"""
Given protocols, convert to list and return list of ID's
@param protocols_str: e.g. 'FL,WS'
@return: e.g. [8, 19]
"""
found_ids = []
try:
prot_abbrevs = protocols_str.strip().replace(' ', '').split(',')
for pa in prot_abbrevs:
prot_id = Lookups.get((Lookups.lookup_type == 'PROTOCOL') & (Lookups.lookup_value == pa.strip())).lookup
found_ids.append(prot_id)
return found_ids
except Lookups.DoesNotExist:
# noinspection PyUnboundLocalVariable
logging.error('*** Unable to look up protocol {} in group {} '
'(does not exist, check spreadsheet.)'.format(pa, protocols_str))
except Exception as e:
logging.error(f'{e}')
@staticmethod
def lookup_fisheries(fisheries_str):
"""
Given fisheries, convert to list and return list of ID's from LOOKUPS
@param fisheries_str: e.g. '15,16'
@return: e.g. [608, 688]
"""
found_ids = []
try:
fishery_ids = fisheries_str.strip().replace(' ', '').split(',')
for fish in fishery_ids:
fish_id = Lookups.get((Lookups.lookup_type == 'FISHERY') &
(Lookups.lookup_value == fish.strip())).lookup
found_ids.append(fish_id)
return found_ids
except Lookups.DoesNotExist:
# noinspection PyUnboundLocalVariable
logging.error('*** Unable to look up fishery {} in group {} '
'(does not exist, check db.)'.format(fish, fisheries_str))
@staticmethod
def lookup_geartypes(geartypes_str):
"""
Given gear types, convert to list and return list of ID's from LOOKUPS
@param geartypes_str: e.g. '15,16'
@return: e.g. [608, 688]
"""
found_ids = []
try:
gear_ids = geartypes_str.strip().replace(' ', '').split(',')
for geartype in gear_ids:
gear_id = Lookups.get(((Lookups.lookup_type == 'FG_GEAR_TYPE') |
(Lookups.lookup_type == 'TRAWL_GEAR_TYPE')) &
(Lookups.lookup_value == geartype.strip())).lookup
found_ids.append(gear_id)
return found_ids
except Lookups.DoesNotExist:
# noinspection PyUnboundLocalVariable
logging.error('*** Unable to look up gear type {} in group {} '
'(does not exist, check db.)'.format(geartype, geartypes_str))
def get_matching_species(self):
"""
Find species that match, return list of non matching
@return: [list of matching id #'s], [list of non matching name strings]
"""
matching_species = []
non_matching_species = []
for species in [x.species_species_group for x in self.BioProtocols.select()]:
try:
matched = Species.get(fn.lower(Species.common_name) == species.lower())
matching_species.append(matched.species) # | |
TRP n
1 250 PRO n
1 251 LYS n
1 252 ASP n
1 253 ARG n
1 254 ALA n
1 255 PRO n
1 256 LEU n
1 257 ILE n
1 258 LEU n
1 259 VAL n
1 260 THR n
1 261 TYR n
1 262 PHE n
1 263 THR n
1 264 GLN n
1 265 PRO n
1 266 GLN n
1 267 PRO n
1 268 LYS n
1 269 ALA n
1 270 GLU n
1 271 SER n
1 272 ARG n
1 273 ARG n
1 274 ASP n
1 275 VAL n
1 276 LEU n
1 277 ALA n
1 278 SER n
1 279 ALA n
1 280 ALA n
1 281 LYS n
1 282 ILE n
1 283 VAL n
1 284 THR n
1 285 ASP n
1 286 GLY n
1 287 LEU n
#
_entity_src_gen.entity_id 1
_entity_src_gen.pdbx_src_id 1
_entity_src_gen.pdbx_alt_source_flag sample
_entity_src_gen.pdbx_seq_type 'Biological sequence'
_entity_src_gen.pdbx_beg_seq_num 1
_entity_src_gen.pdbx_end_seq_num 287
_entity_src_gen.gene_src_common_name ?
_entity_src_gen.gene_src_genus ?
_entity_src_gen.pdbx_gene_src_gene blaCTX-M-64
_entity_src_gen.gene_src_species ?
_entity_src_gen.gene_src_strain ?
_entity_src_gen.gene_src_tissue ?
_entity_src_gen.gene_src_tissue_fraction ?
_entity_src_gen.gene_src_details ?
_entity_src_gen.pdbx_gene_src_fragment ?
_entity_src_gen.pdbx_gene_src_scientific_name '<NAME>'
_entity_src_gen.pdbx_gene_src_ncbi_taxonomy_id 562
_entity_src_gen.pdbx_gene_src_variant ?
_entity_src_gen.pdbx_gene_src_cell_line ?
_entity_src_gen.pdbx_gene_src_atcc ?
_entity_src_gen.pdbx_gene_src_organ ?
_entity_src_gen.pdbx_gene_src_organelle ?
_entity_src_gen.pdbx_gene_src_cell ?
_entity_src_gen.pdbx_gene_src_cellular_location ?
_entity_src_gen.host_org_common_name ?
_entity_src_gen.pdbx_host_org_scientific_name
;<NAME> 'BL21-Gold(DE3)pLysS AG'
;
_entity_src_gen.pdbx_host_org_ncbi_taxonomy_id 866768
_entity_src_gen.host_org_genus ?
_entity_src_gen.pdbx_host_org_gene ?
_entity_src_gen.pdbx_host_org_organ ?
_entity_src_gen.host_org_species ?
_entity_src_gen.pdbx_host_org_tissue ?
_entity_src_gen.pdbx_host_org_tissue_fraction ?
_entity_src_gen.pdbx_host_org_strain ?
_entity_src_gen.pdbx_host_org_variant ?
_entity_src_gen.pdbx_host_org_cell_line ?
_entity_src_gen.pdbx_host_org_atcc ?
_entity_src_gen.pdbx_host_org_culture_collection ?
_entity_src_gen.pdbx_host_org_cell ?
_entity_src_gen.pdbx_host_org_organelle ?
_entity_src_gen.pdbx_host_org_cellular_location ?
_entity_src_gen.pdbx_host_org_vector_type ?
_entity_src_gen.pdbx_host_org_vector ?
_entity_src_gen.host_org_details ?
_entity_src_gen.expression_system_id ?
_entity_src_gen.plasmid_name ?
_entity_src_gen.plasmid_details ?
_entity_src_gen.pdbx_description ?
#
_struct_ref.id 1
_struct_ref.db_name UNP
_struct_ref.db_code C8CP57_ECOLX
_struct_ref.pdbx_db_accession C8CP57
_struct_ref.pdbx_db_isoform ?
_struct_ref.entity_id 1
_struct_ref.pdbx_seq_one_letter_code
;QTADVQQKLAELERQSGGRLGVALINTADNSQILYRADERFPMCSTSKVMAAAAVLKQSETQKQLLNQPVEIKPADLVNY
NPIAEKHVNGTMTLAELSAAALQYSDNTAMNKLIAQLGGPGGVTAFARAIGDETFRLDRTEPTLNTAIPGDPRDTTTPRA
MAQTLRQLTLGHALGETQRAQLVTWLKGNTTGAASIRAGLPASWVVGDKTGSGGYGTTNDIAVIWPKDRAPLILVTYFTQ
PQPKAESRRDVLASAAKIVTDGL
;
_struct_ref.pdbx_align_begin 29
#
loop_
_struct_ref_seq.align_id
_struct_ref_seq.ref_id
_struct_ref_seq.pdbx_PDB_id_code
_struct_ref_seq.pdbx_strand_id
_struct_ref_seq.seq_align_beg
_struct_ref_seq.pdbx_seq_align_beg_ins_code
_struct_ref_seq.seq_align_end
_struct_ref_seq.pdbx_seq_align_end_ins_code
_struct_ref_seq.pdbx_db_accession
_struct_ref_seq.db_align_beg
_struct_ref_seq.pdbx_db_align_beg_ins_code
_struct_ref_seq.db_align_end
_struct_ref_seq.pdbx_db_align_end_ins_code
_struct_ref_seq.pdbx_auth_seq_align_beg
_struct_ref_seq.pdbx_auth_seq_align_end
1 1 5ZB7 A 25 ? 287 ? C8CP57 29 ? 291 ? 25 289
2 1 5ZB7 B 25 ? 287 ? C8CP57 29 ? 291 ? 25 289
#
loop_
_struct_ref_seq_dif.align_id
_struct_ref_seq_dif.pdbx_pdb_id_code
_struct_ref_seq_dif.mon_id
_struct_ref_seq_dif.pdbx_pdb_strand_id
_struct_ref_seq_dif.seq_num
_struct_ref_seq_dif.pdbx_pdb_ins_code
_struct_ref_seq_dif.pdbx_seq_db_name
_struct_ref_seq_dif.pdbx_seq_db_accession_code
_struct_ref_seq_dif.db_mon_id
_struct_ref_seq_dif.pdbx_seq_db_seq_num
_struct_ref_seq_dif.details
_struct_ref_seq_dif.pdbx_auth_seq_num
_struct_ref_seq_dif.pdbx_ordinal
1 5ZB7 HIS A 1 ? UNP C8CP57 ? ? 'expression tag' 1 1
1 5ZB7 HIS A 2 ? UNP C8CP57 ? ? 'expression tag' 2 2
1 5ZB7 HIS A 3 ? UNP C8CP57 ? ? 'expression tag' 3 3
1 5ZB7 HIS A 4 ? UNP C8CP57 ? ? 'expression tag' 4 4
1 5ZB7 HIS A 5 ? UNP C8CP57 ? ? 'expression tag' 5 5
1 5ZB7 HIS A 6 ? UNP C8CP57 ? ? 'expression tag' 6 6
1 5ZB7 SER A 7 ? UNP C8CP57 ? ? 'expression tag' 7 7
1 5ZB7 SER A 8 ? UNP C8CP57 ? ? 'expression tag' 8 8
1 5ZB7 GLY A 9 ? UNP C8CP57 ? ? 'expression tag' 9 9
1 5ZB7 LEU A 10 ? UNP C8CP57 ? ? 'expression tag' 10 10
1 5ZB7 VAL A 11 ? UNP C8CP57 ? ? 'expression tag' 11 11
1 5ZB7 PRO A 12 ? UNP C8CP57 ? ? 'expression tag' 12 12
1 5ZB7 ARG A 13 ? UNP C8CP57 ? ? 'expression tag' 13 13
1 5ZB7 GLY A 14 ? UNP C8CP57 ? ? 'expression tag' 14 14
1 5ZB7 SER A 15 ? UNP C8CP57 ? ? 'expression tag' 15 15
1 5ZB7 HIS A 16 ? UNP C8CP57 ? ? 'expression tag' 16 16
1 5ZB7 MET A 17 ? UNP C8CP57 ? ? 'expression tag' 17 17
1 5ZB7 ALA A 18 ? UNP C8CP57 ? ? 'expression tag' 18 18
1 5ZB7 SER A 19 ? UNP C8CP57 ? ? 'expression tag' 19 19
1 5ZB7 GLY A 20 ? UNP C8CP57 ? ? 'expression tag' 20 20
1 5ZB7 GLY A 21 ? UNP C8CP57 ? ? 'expression tag' 21 21
1 5ZB7 THR A 22 ? UNP C8CP57 ? ? 'expression tag' 22 22
1 5ZB7 GLU A 23 ? UNP C8CP57 ? ? 'expression tag' 23 23
1 5ZB7 LEU A 24 ? UNP C8CP57 ? ? 'expression tag' 24 24
2 5ZB7 HIS B 1 ? UNP C8CP57 ? ? 'expression tag' 1 25
2 5ZB7 HIS B 2 ? UNP C8CP57 ? ? 'expression tag' 2 26
2 5ZB7 HIS B 3 ? UNP C8CP57 ? ? 'expression tag' 3 27
2 5ZB7 HIS B 4 ? UNP C8CP57 ? ? 'expression tag' 4 28
2 5ZB7 HIS B 5 ? UNP C8CP57 ? ? 'expression tag' 5 29
2 5ZB7 HIS B 6 ? UNP C8CP57 ? ? 'expression tag' 6 30
2 5ZB7 SER B 7 ? UNP C8CP57 ? ? 'expression tag' 7 31
2 5ZB7 SER B 8 ? UNP C8CP57 ? ? 'expression tag' 8 32
2 5ZB7 GLY B 9 ? UNP C8CP57 ? ? 'expression tag' 9 33
2 5ZB7 LEU B 10 ? UNP C8CP57 ? ? 'expression tag' 10 34
2 5ZB7 VAL B 11 ? UNP C8CP57 ? ? 'expression tag' 11 35
2 5ZB7 PRO B 12 ? UNP C8CP57 ? ? 'expression tag' 12 36
2 5ZB7 ARG B 13 ? UNP C8CP57 ? ? 'expression tag' 13 37
2 5ZB7 GLY B 14 ? UNP C8CP57 ? ? 'expression tag' 14 38
2 5ZB7 SER B 15 ? UNP C8CP57 ? ? 'expression tag' 15 39
2 5ZB7 HIS B 16 ? UNP C8CP57 ? ? 'expression tag' 16 40
2 5ZB7 MET B 17 ? UNP C8CP57 ? ? 'expression tag' 17 41
2 5ZB7 ALA B 18 ? UNP C8CP57 ? ? 'expression tag' 18 42
2 5ZB7 SER B 19 ? UNP C8CP57 ? ? 'expression tag' 19 43
2 5ZB7 GLY B 20 ? UNP C8CP57 ? ? 'expression tag' 20 44
2 5ZB7 GLY B 21 ? UNP C8CP57 ? ? 'expression tag' 21 45
2 5ZB7 THR B 22 ? UNP C8CP57 ? ? 'expression tag' 22 46
2 5ZB7 GLU B 23 ? UNP C8CP57 ? ? 'expression tag' 23 47
2 5ZB7 LEU B 24 ? UNP C8CP57 ? ? 'expression tag' 24 48
#
loop_
_chem_comp.id
_chem_comp.type
_chem_comp.mon_nstd_flag
_chem_comp.name
_chem_comp.pdbx_synonyms
_chem_comp.formula
_chem_comp.formula_weight
ALA 'L-peptide linking' y ALANINE ? 'C3 H7 N O2' 89.093
ARG 'L-peptide linking' y ARGININE ? 'C6 H15 N4 O2 1' 175.209
ASN 'L-peptide linking' y ASPARAGINE ? 'C4 H8 N2 O3' 132.118
ASP 'L-peptide linking' y 'ASPARTIC ACID' ? 'C4 H7 N O4' 133.103
CYS 'L-peptide linking' y CYSTEINE ? 'C3 H7 N O2 S' 121.158
GLN 'L-peptide linking' y GLUTAMINE ? 'C5 H10 N2 O3' 146.144
GLU 'L-peptide linking' y 'GLUTAMIC ACID' ? 'C5 H9 N O4' 147.129
GLY 'peptide linking' y GLYCINE ? 'C2 H5 N O2' 75.067
HIS 'L-peptide linking' y HISTIDINE ? 'C6 H10 N3 O2 1' 156.162
HOH non-polymer . WATER ? 'H2 O' 18.015
ILE 'L-peptide linking' y ISOLEUCINE ? 'C6 H13 N O2' 131.173
LEU 'L-peptide linking' y LEUCINE ? 'C6 H13 N O2' 131.173
LYS 'L-peptide linking' y LYSINE ? 'C6 H15 N2 O2 1' 147.195
MET 'L-peptide linking' y METHIONINE ? 'C5 H11 N O2 S' 149.211
PHE 'L-peptide linking' y PHENYLALANINE ? 'C9 H11 N O2' 165.189
PRO 'L-peptide linking' y PROLINE ? 'C5 H9 N O2' 115.130
SER 'L-peptide linking' y SERINE ? 'C3 H7 N O3' 105.093
THR 'L-peptide linking' y THREONINE ? 'C4 H9 N O3' 119.119
TRP 'L-peptide linking' y TRYPTOPHAN ? 'C11 H12 N2 O2' 204.225
TYR 'L-peptide linking' y TYROSINE ? 'C9 H11 N O3' 181.189
VAL 'L-peptide linking' y VALINE ? 'C5 H11 N O2' 117.146
#
_exptl.absorpt_coefficient_mu ?
_exptl.absorpt_correction_T_max ?
_exptl.absorpt_correction_T_min ?
_exptl.absorpt_correction_type ?
_exptl.absorpt_process_details ?
_exptl.entry_id 5ZB7
_exptl.crystals_number 1
_exptl.details ?
_exptl.method 'X-RAY DIFFRACTION'
_exptl.method_details ?
#
_exptl_crystal.colour ?
_exptl_crystal.density_diffrn ?
_exptl_crystal.density_Matthews 1.84
_exptl_crystal.density_method ?
_exptl_crystal.density_percent_sol 33.20
_exptl_crystal.description ?
_exptl_crystal.F_000 | |
Delete a record from the reference dataset table
:param internal_id: the django id for the record
:return:
"""
self.increment_minor_version()
self.get_record_by_internal_id(internal_id).delete()
if self.external_database is not None:
self.sync_to_external_database(self.external_database.memorable_name)
self.modified_date = datetime.utcnow()
self.save()
@transaction.atomic
def delete_all_records(self):
"""
Delete all records from the reference dataset table
:return:
"""
self.increment_minor_version()
self.get_records().delete()
if self.external_database is not None:
self.sync_to_external_database(self.external_database.memorable_name)
self.modified_date = datetime.utcnow()
self.save()
def sync_to_external_database(self, external_database):
"""
Run a full sync of records from the local django db to `external_database`
:param external_database:
:return:
"""
model_class = self.get_record_model_class()
saved_ids = []
for record in self.get_records():
record_data = {}
for field in self.fields.all():
if field.data_type != ReferenceDatasetField.DATA_TYPE_FOREIGN_KEY:
record_data[field.column_name] = getattr(record, field.column_name)
else:
record_data[field.relationship_name] = getattr(record, field.relationship_name)
if model_class.objects.using(external_database).filter(pk=record.id).exists():
with external_model_class(model_class) as mc:
mc.objects.using(external_database).filter(pk=record.id).update(**record_data)
else:
with external_model_class(model_class) as mc:
mc.objects.using(external_database).create(
id=record.id, reference_dataset_id=self.id, **record_data
)
saved_ids.append(record.id)
with external_model_class(model_class) as mc:
mc.objects.using(external_database).exclude(pk__in=saved_ids).delete()
def increment_schema_version(self):
self.schema_version += 1
self.save()
def increment_major_version(self):
self.major_version += 1
self.minor_version = 0
self.save()
def increment_minor_version(self):
self.minor_version += 1
self.save()
def get_database_names(self):
if self.external_database is not None:
return ["default", self.external_database.memorable_name]
return ["default"]
def get_absolute_url(self):
return "{}#{}".format(reverse("datasets:dataset_detail", args=(self.uuid,)), self.slug)
def get_admin_edit_url(self):
return reverse("admin:datasets_referencedataset_change", args=(self.id,))
@staticmethod
def get_type_display():
"""
Allow for reference dataset type name display in api responses to match datasets.
"""
return "Reference Dataset"
def user_has_bookmarked(self, user):
return self.referencedatasetbookmark_set.filter(user=user).exists()
def toggle_bookmark(self, user):
if self.user_has_bookmarked(user):
self.referencedatasetbookmark_set.filter(user=user).delete()
else:
self.referencedatasetbookmark_set.create(user=user)
def bookmark_count(self):
return self.referencedatasetbookmark_set.count()
def get_column_config(self):
"""
Return column configuration for the reference dataset in the
format expected by ag-grid.
"""
col_defs = []
for field in self.fields.all():
column_name = (
field.column_name
if field.data_type != field.DATA_TYPE_FOREIGN_KEY
else f"{field.relationship_name}_{field.linked_reference_dataset_field.column_name}"
)
data_type = (
field.data_type
if field.data_type != field.DATA_TYPE_FOREIGN_KEY
else field.linked_reference_dataset_field.data_type
)
col_def = {
"headerName": field.name,
"field": column_name,
"sortable": True,
"filter": "agTextColumnFilter",
}
if data_type in [
field.DATA_TYPE_INT,
field.DATA_TYPE_FLOAT,
]:
col_def["filter"] = "agNumberColumnFilter"
elif data_type in [field.DATA_TYPE_DATE, field.DATA_TYPE_DATETIME]:
col_def["filter"] = "agDateColumnFilter"
col_defs.append(col_def)
return col_defs
def get_grid_data(self):
"""
Return all records of this reference dataset in a JSON
serializable format for use by ag-grid.
"""
records = []
for record in self.get_records():
record_data = {}
for field in self.fields.all():
if field.data_type != ReferenceDatasetField.DATA_TYPE_FOREIGN_KEY:
record_data[field.column_name] = getattr(record, field.column_name)
# ISO format dates for js compatibility
if isinstance(record_data[field.column_name], datetime):
record_data[field.column_name] = record_data[field.column_name].isoformat()
else:
relationship = getattr(record, field.relationship_name)
record_data[
f"{field.relationship_name}_{field.linked_reference_dataset_field.column_name}"
] = (
getattr(
relationship,
field.linked_reference_dataset_field.column_name,
)
if relationship
else None
)
records.append(record_data)
return records
def get_metadata_table_hash(self):
"""
Hash reference dataset records as the user would see them. This allows
us to include linked dataset fields in the hash.
"""
hashed_data = hashlib.md5()
for record in self.get_records():
data = {}
for field in self.fields.all():
if field.data_type != ReferenceDatasetField.DATA_TYPE_FOREIGN_KEY:
data[field.column_name] = str(getattr(record, field.column_name))
else:
relationship = getattr(record, field.relationship_name)
data[field.linked_reference_dataset_field.column_name] = (
str(
getattr(
relationship,
field.linked_reference_dataset_field.column_name,
)
)
if relationship
else None
)
hashed_data.update(json.dumps(data).encode("utf-8"))
return hashed_data.digest()
class ReferenceDataSetBookmark(models.Model):
user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
reference_dataset = models.ForeignKey(ReferenceDataset, on_delete=models.CASCADE)
class Meta:
db_table = "app_referencedatasetbookmark"
unique_together = ("user", "reference_dataset")
class ReferenceDatasetRecordBase(models.Model):
reference_dataset = models.ForeignKey(
ReferenceDataset, on_delete=models.CASCADE, related_name="records"
)
updated_date = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
def __str__(self):
return str(getattr(self, self.reference_dataset.display_name_field.column_name, None))
class ReferenceDatasetField(TimeStampedUserModel):
DATA_TYPE_CHAR = 1
DATA_TYPE_INT = 2
DATA_TYPE_FLOAT = 3
DATA_TYPE_DATE = 4
DATA_TYPE_TIME = 5
DATA_TYPE_DATETIME = 6
DATA_TYPE_BOOLEAN = 7
DATA_TYPE_FOREIGN_KEY = 8
DATA_TYPE_UUID = 9
_DATA_TYPES = (
(DATA_TYPE_CHAR, "Character field"),
(DATA_TYPE_INT, "Integer field"),
(DATA_TYPE_FLOAT, "Float field"),
(DATA_TYPE_DATE, "Date field"),
(DATA_TYPE_TIME, "Time field"),
(DATA_TYPE_DATETIME, "Datetime field"),
(DATA_TYPE_BOOLEAN, "Boolean field"),
(DATA_TYPE_FOREIGN_KEY, "Linked Reference Dataset field"),
(DATA_TYPE_UUID, "Universal unique identifier field"),
)
DATA_TYPE_MAP = {
DATA_TYPE_CHAR: "varchar(255)",
DATA_TYPE_INT: "integer",
DATA_TYPE_FLOAT: "float",
DATA_TYPE_DATE: "date",
DATA_TYPE_TIME: "time",
DATA_TYPE_DATETIME: "timestamp",
DATA_TYPE_BOOLEAN: "boolean",
DATA_TYPE_FOREIGN_KEY: "integer",
DATA_TYPE_UUID: "uuid",
}
_DATA_TYPE_FORM_FIELD_MAP = {
DATA_TYPE_CHAR: forms.CharField,
DATA_TYPE_INT: forms.IntegerField,
DATA_TYPE_FLOAT: forms.FloatField,
DATA_TYPE_DATE: forms.DateField,
DATA_TYPE_TIME: forms.TimeField,
DATA_TYPE_DATETIME: forms.DateTimeField,
DATA_TYPE_BOOLEAN: forms.BooleanField,
DATA_TYPE_FOREIGN_KEY: forms.ModelChoiceField,
DATA_TYPE_UUID: forms.UUIDField,
}
_DATA_TYPE_MODEL_FIELD_MAP = {
DATA_TYPE_CHAR: models.CharField,
DATA_TYPE_INT: models.IntegerField,
DATA_TYPE_FLOAT: models.FloatField,
DATA_TYPE_DATE: models.DateField,
DATA_TYPE_TIME: models.TimeField,
DATA_TYPE_DATETIME: models.DateTimeField,
DATA_TYPE_BOOLEAN: models.BooleanField,
DATA_TYPE_FOREIGN_KEY: models.ForeignKey,
DATA_TYPE_UUID: models.UUIDField,
}
EDITABLE_DATA_TYPES = (
DATA_TYPE_CHAR,
DATA_TYPE_INT,
DATA_TYPE_FLOAT,
DATA_TYPE_DATE,
DATA_TYPE_TIME,
DATA_TYPE_DATETIME,
DATA_TYPE_BOOLEAN,
DATA_TYPE_FOREIGN_KEY,
)
reference_dataset = models.ForeignKey(
ReferenceDataset, on_delete=models.CASCADE, related_name="fields"
)
data_type = models.IntegerField(choices=_DATA_TYPES)
is_identifier = models.BooleanField(
default=False, help_text="This field is the unique identifier for the record"
)
is_display_name = models.BooleanField(
default=False,
help_text="This field is the name that will be displayed in the upload "
"record form when referenced by other datasets",
)
name = models.CharField(max_length=255, help_text="The display name for the field")
column_name = models.CharField(
max_length=255,
blank=True,
null=True,
help_text="Descriptive name for the field. This name is used in the Data Workspace "
"database. Leave blank for linked reference dataset fields",
validators=[
RegexValidator(
regex=r"^[a-z][a-z0-9_\.]*$",
message="Column names must be lowercase and must "
"start with a letter and contain only "
"letters, numbers, underscores and full stops.",
)
],
)
description = models.TextField(blank=True, null=True)
required = models.BooleanField(default=False)
linked_reference_dataset = models.ForeignKey(
ReferenceDataset,
on_delete=models.PROTECT,
related_name="linked_fields",
null=True,
blank=True,
) # No longer used
linked_reference_dataset_field = models.ForeignKey(
"self",
on_delete=models.PROTECT,
related_name="linked_dataset_fields",
null=True,
blank=True,
)
relationship_name = models.CharField(
max_length=255,
blank=True,
null=True,
help_text="For use with linked reference dataset fields only. Give a name for the "
'linked reference dataset, which will be appended with "_id" to form a foreign key '
"in the database table. Where multiple fields are selected from the same linked "
"reference dataset, the same name should be used",
validators=[
RegexValidator(
regex=r"^[a-zA-Z][a-zA-Z0-9_\.]*$",
message="Relationship names must start with a letter and contain only "
"letters, numbers, underscores and full stops.",
)
],
)
sort_order = models.PositiveIntegerField(default=0, blank=False, null=False)
class Meta:
db_table = "app_referencedatasetfield"
unique_together = (
("reference_dataset", "name"),
("reference_dataset", "column_name"),
)
verbose_name = "Reference dataset field"
ordering = ("sort_order",)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Stash the current data type and name so they can be compared on save
self._original_data_type = self.data_type
self._original_column_name = self.column_name
def __str__(self):
return "{}: {}".format(self.reference_dataset.name, self.name)
def _add_column_to_db(self):
"""
Add a column to the refdata table in the db
:return:
"""
super().save()
self.reference_dataset.increment_schema_version()
model_class = self.reference_dataset.get_record_model_class()
for database in self.reference_dataset.get_database_names():
with connections[database].schema_editor() as editor:
if self.data_type != self.DATA_TYPE_FOREIGN_KEY:
editor.add_field(model_class, model_class._meta.get_field(self.column_name))
else:
editor.add_field(
model_class,
model_class._meta.get_field(self.relationship_name),
)
def _update_db_column_name(self):
"""
Alter the db column name in the associated table
:return:
"""
# Get a copy of the existing model class (pre-save)
model_class = self.reference_dataset.get_record_model_class()
# Get a copy of the current field
from_field = model_class._meta.get_field(self._original_column_name)
# Save the changes to the field
super().save()
# Increment the schema version
self.reference_dataset.increment_schema_version()
# Get a copy of the updated model class (post-save)
model_class = self.reference_dataset.get_record_model_class()
# Get a copy of the new field
to_field = model_class._meta.get_field(self.column_name)
# Migrate from old field to new field
with transaction.atomic():
for database in self.reference_dataset.get_database_names():
with connections[database].schema_editor() as editor:
editor.alter_field(model_class, from_field, to_field)
def _update_db_column_data_type(self):
super().save()
self.reference_dataset.increment_schema_version()
for database in self.reference_dataset.get_database_names():
with connections[database].cursor() as cursor:
cursor.execute(
sql.SQL(
"""
ALTER TABLE {table_name}
ALTER COLUMN {column_name} TYPE {data_type}
USING {column_name}::text::{data_type}
"""
).format(
table_name=sql.Identifier(self.reference_dataset.table_name),
column_name=sql.Identifier(self.column_name),
data_type=sql.SQL(self.get_postgres_datatype()),
)
)
@transaction.atomic
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
"""
On ReferenceDatasetField save update the associated table.
:param force_insert:
:param force_update:
:param using:
:param update_fields:
:return:
"""
ref_dataset = self.reference_dataset
# Ensure a reference dataset field cannot link to a field that is itself linked
# to another reference dataset field
if (
self.linked_reference_dataset_field
and self.linked_reference_dataset_field.data_type == self.DATA_TYPE_FOREIGN_KEY
):
raise ValidationError(
"Unable to link reference dataset fields to another field that is itself linked"
)
# Ensure a reference dataset field cannot link to a field in a dataset that has a
# linked field pointing to a field in the current dataset (circular link)
circular_reference_datasets = ReferenceDatasetField.objects.filter(
linked_reference_dataset_field__reference_dataset=self.reference_dataset
).values_list("reference_dataset_id", flat=True)
if (
self.linked_reference_dataset_field
and self.linked_reference_dataset_field.reference_dataset.id
in circular_reference_datasets
):
raise ValidationError(
"Unable to link reference dataset fields to another field that points back to this dataset (circular link)"
)
# If this is a newly created field add it to the db
if self.id is None:
# For linked reference dataset fields, the foreign key column to be
# added is derived from the field's relationship_name. As we allow
# multiple fields with the same relationship_name, the column may
# already exist so we catch the database error.
try:
with transaction.atomic():
self._add_column_to_db()
except DatabaseError:
pass
else:
# Otherwise update where necessary
if self._original_column_name != | |
predicted and extracted limb
location in the image and used to compute the extracted limb location.
The resulting predicted surface points, predicted image points, observed image points, and scan directions
in the camera frame are then all returned as numpy arrays.
:param image_interpolator: A callable which returns the interpolated image values for provides [y,x] locations
in the image
:param camera_temperature: The temperature of the camera in degrees at the time the image was captured
:param target: The target we are looking for limb points for
:param scan_center: The center where all of our scan lines will start
:param line_of_sight_sun: The line of sight of the sun in the image
:return: The predicted surface points in the camera frame as a 3xn array, the predicted limbs in the image as a
2xn array, the observed limbs in the image as a 2xn array, and the scan directions in the camera frame
as a 3xn array of unit vectors where n is the :attr:`number_of_scan_lines`
"""
# predict the limb locations
predicted_limbs_camera, scan_dirs, scan_dirs_camera = self.predict_limbs(scan_center, line_of_sight_sun,
target, camera_temperature)
predicted_limbs_pixels = self.camera.model.project_onto_image(predicted_limbs_camera,
temperature=camera_temperature)
# set the distance to search along each scan line 2 times the apparent radius of the target in the image
# noinspection PyArgumentList
apparent_radius_pixels = np.linalg.norm(predicted_limbs_pixels - scan_center.reshape(2, 1), axis=0).max()
search_dist = 2 * apparent_radius_pixels
# Create an array of where we want to interpolate the image at/shoot rays through
search_distance_array = np.linspace(-search_dist, search_dist, self.number_of_sample_points)
# Create an interpolator to figure out the distance from the scan center for the subpixel locations of
# the correlation
distance_interpolator = interp1d(np.arange(self.number_of_sample_points), search_distance_array)
# Get the center of each scan line
center = (self.number_of_sample_points - 1) // 2
# Only take the middle of the predicted scan lines since we know the limb will lie in that region
template_selection = self.number_of_sample_points // 4
# Determine the deltas to apply to the limb locations
search_deltas = scan_dirs[:2].T.reshape((-1, 2, 1), order='F') * search_distance_array
# Get the pixels that we are sampling in the image along each scan line
search_points_image = search_deltas + predicted_limbs_pixels.reshape((1, 2, -1), order='F')
# Flatten everything to just 2d matrices instead of nd matrices
sp_flat = np.hstack(search_points_image)
# Select the template portion
sp_flat_template = np.hstack(search_points_image[...,
center - template_selection:center + template_selection + 1])
# Compute the direction vector through each pixel we are sampling in the template
direction_vectors = self.camera.model.pixels_to_unit(sp_flat_template, temperature=camera_temperature)
# Build the rays we are going to trace to determine our predicted scan lines
render_rays = Rays(np.zeros(3), direction_vectors)
# Get the predicted scan line illumination inputs
illum_inputs = self.scene.get_illumination_inputs(render_rays)
# Compute the scan line illuminations
illums = self.brdf(illum_inputs).reshape(search_points_image.shape[0], 2 * template_selection + 1)
# Apply the psf to the predicted illuminations and store the scan lines
self.predicted_illums = self.psf(illums)
# Extract the scan line DN values from the image
self.extracted_illums = image_interpolator(sp_flat[::-1].T).reshape(search_points_image.shape[0],
search_points_image.shape[-1])
# Do the 1d correlations between the extracted and predicted scan lines
self.correlation_lines = fft_correlator_1d(self.extracted_illums, self.predicted_illums)
# Find the peak of each correlation line
self.correlation_peaks = self.peak_finder(self.correlation_lines)
distances = distance_interpolator(self.correlation_peaks.ravel()).reshape(self.correlation_peaks.shape)
observed_limbs_pixels = distances.reshape(1, -1) * scan_dirs + predicted_limbs_pixels
return predicted_limbs_camera, predicted_limbs_pixels, observed_limbs_pixels, scan_dirs_camera
class EllipseMatching(RelNavEstimator):
"""
This class implements GIANT's version of limb based OpNav for regular bodies.
The class provides an interface to perform limb based OpNav for each target body that is predicted to be in an
image. It does this by looping through each target object contained in the :attr:`.Scene.target_objs` attribute
that is requested. For each of the targets, the algorithm:
#. If using limb scanning to extract the limbs, and requested with :attr:`recenter`, identifies the center of
brightness for each target using the :mod:`.moment_algorithm` and moves the a priori target to be along that line
of sight
#. Extracts the observed limbs from the image and pairs them to the target
#. Estimates the relative position between the target and the image using the observed limbs and the steps discussed
in the :mod:.ellipse_matching` documentation
#. Uses the estimated position to get the predicted limb surface location and predicted limb locations in the image
When all of the required data has been successfully loaded into an instance of this class, the :meth:`estimate`
method is used to perform the estimation for the requested image. The results are stored into the
:attr:`observed_bearings` attribute for the observed limb locations and the :attr:`observed_positions` attribute for
the estimated relative position between the target and the camera. In addition, the predicted location for the limbs
for each target are stored in the :attr:`computed_bearings` attribute and the a priori relative position between the
target and the camera is stored in the :attr:`computed_positions` attribute. Finally, the details about the fit are
stored as a dictionary in the appropriate element in the :attr:`details` attribute. Specifically, these
dictionaries will contain the following keys.
=========================== ========================================================================================
Key Description
=========================== ========================================================================================
``'Covariance'`` The 3x3 covariance matrix for the estimated relative position in the camera frame based
on the residuals. This is only available if successful
``'Surface Limb Points'`` The surface points that correspond to the limb points in the target fixed target
centered frame.
``'Failed'`` A message indicating why the fit failed. This will only be present if the fit failed
(so you could do something like ``'Failed' in limb_matching.details[target_ind]`` to
check if something failed. The message should be a human readable description of what
called the failure.
=========================== ========================================================================================
.. warning::
Before calling the :meth:`estimate` method be sure that the scene has been updated to correspond to the correct
image time. This class does not update the scene automatically.
"""
technique: str = 'ellipse_matching'
"""
The name of the technique identifier in the :class:`.RelativeOpNav` class.
"""
observable_type: List[RelNavObservablesType] = [RelNavObservablesType.LIMB, RelNavObservablesType.RELATIVE_POSITION]
"""
The type of observables this technique generates.
"""
def __init__(self, scene: Scene, camera: Camera, image_processing: ImageProcessing,
limb_scanner: Optional[LimbScanner] = None,
extraction_method: Union[LimbExtractionMethods, str] = LimbExtractionMethods.EDGE_DETECTION,
interpolator: type = RegularGridInterpolator,
recenter: bool = True):
"""
:param scene: The :class:`.Scene` object containing the target, light, and obscuring objects.
:param camera: The :class:`.Camera` object containing the camera model and images to be utilized
:param image_processing: The :class:`.ImageProcessing` object to be used to process the images
:param limb_scanner: The :class:`.LimbScanner` object containing the limb scanning settings.
:param extraction_method: The method to use to extract the observed limbs from the image. Should be
``'LIMB_SCANNING'`` or ``'EDGE_DETECTION'``. See :class:`.LimbExtractionMethods` for
details.
:param interpolator: The type of image interpolator to use if the extraction method is set to LIMB_SCANNING.
:param recenter: A flag to estimate the center using the moment algorithm to get a fast rough estimate of the
center-of-figure
"""
# store the scene and camera in the class instance using the super class's init method
super().__init__(scene, camera, image_processing)
# interpret the limb extraction method into the enum
if isinstance(extraction_method, str):
extraction_method = extraction_method.upper()
self.extraction_method: LimbExtractionMethods = LimbExtractionMethods(extraction_method)
"""
The method to use to extract observed limb points from the image.
The valid options are provided in the :class:`LimbExtractionMethods` enumeration
"""
self._limb_scanner: LimbScanner = limb_scanner
"""
The limb scanning instance to use.
"""
self.interpolator: type = interpolator
"""
The type of interpolator to use for the image.
This is ignored if the :attr:`extraction_method` is not set to ``'LIMB_SCANNING'``.
"""
self._edge_detection_limbs: List[NONEARRAY] = [None] * len(self.scene.target_objs)
"""
The extracted limbs from the image in pixels before they have been paired to a target
Until :meth:`estimate` is called this list will be filled with ``None``.
"""
self.limbs_camera: List[NONEARRAY] = [None] * len(self.scene.target_objs)
"""
The limb surface points with respect to the center of the target
Until :meth:`estimate` is called this list will be filled with ``None``.
Each element of this list corresponds to the same element in the :attr:`.Scene.target_objs` list.
"""
self._image_interp: Optional[Callable] = None
"""
The interpolator for the image to use.
This is set on the call to estimate
"""
self._limbs_extracted: bool = False
"""
This flag specifies | |
<filename>encore/events/tests/test_event_manager.py
#
# (C) Copyright 2011 Enthought, Inc., Austin, TX
# All right reserved.
#
# This file is open source software distributed according to the terms in LICENSE.txt
#
# Standard library imports.
import unittest
import mock
import weakref
import threading
# Local imports.
from encore.events.event_manager import EventManager, BaseEvent
from encore.events.api import (get_event_manager, set_event_manager,
BaseEventManager)
import encore.events.package_globals as package_globals
class TestEventManager(unittest.TestCase):
def setUp(self):
self.evt_mgr = EventManager()
def test_register(self):
""" Test if event is successfully registered.
"""
self.evt_mgr.register(BaseEvent)
self.assertTrue(BaseEvent in self.evt_mgr.get_event())
def test_emit(self):
""" Test if events are succesfully emitted.
"""
callback = mock.Mock()
self.evt_mgr.connect(BaseEvent, callback)
evt1 = BaseEvent()
self.evt_mgr.emit(evt1)
self.assertEqual(callback.call_count, 1)
self.assertEqual(callback.call_args, ((evt1, ), {}))
callback2 = mock.Mock()
self.evt_mgr.connect(BaseEvent, callback2)
evt2 = BaseEvent()
self.evt_mgr.emit(evt2)
self.assertEqual(callback.call_count, 2)
self.assertEqual(callback.call_args, ((evt2, ), {}))
self.assertEqual(callback2.call_count, 1)
self.assertEqual(callback2.call_args, ((evt2, ), {}))
# Exceptions in listeners should still propagate events.
def callback3(evt):
raise RuntimeError('i\'m just like this')
callback3 = mock.Mock(wraps=callback3)
callback4 = mock.Mock()
self.evt_mgr.connect(BaseEvent, callback3)
self.evt_mgr.connect(BaseEvent, callback4)
evt3 = BaseEvent()
self.evt_mgr.emit(evt3)
self.assertEqual(callback.call_count, 3)
self.assertEqual(callback2.call_count, 2)
self.assertEqual(callback3.call_count, 1)
self.assertEqual(callback4.call_count, 1)
def test_connect(self):
""" Test if adding connections works.
"""
callback = mock.Mock()
self.evt_mgr.connect(BaseEvent, callback)
self.assertEqual(
list(self.evt_mgr.get_listeners(BaseEvent)), [callback])
callback2 = mock.Mock()
self.evt_mgr.connect(BaseEvent, callback2)
self.assertEqual(
list(self.evt_mgr.get_listeners(BaseEvent)),
[callback, callback2])
def test_listeners(self):
""" Test if correct listeners are returned.
"""
self.assertEqual(list(self.evt_mgr.get_listeners(BaseEvent)), [])
class MyEvt(BaseEvent):
def __init__(self, name=1):
super(MyEvt, self).__init__()
self.name = name
def callback_bound(self, evt):
pass
def callback_unbound(self):
pass
callback = mock.Mock()
obj = MyEvt()
self.evt_mgr.connect(BaseEvent, callback)
self.evt_mgr.connect(MyEvt, MyEvt.callback_unbound)
self.evt_mgr.connect(MyEvt, obj.callback_bound)
self.assertEqual(
list(self.evt_mgr.get_listeners(MyEvt)),
[callback, MyEvt.callback_unbound, obj.callback_bound])
callback2 = mock.Mock()
self.evt_mgr.connect(BaseEvent, callback2, filter={'name': 0})
# get listeners with filtering
self.assertEqual(
list(self.evt_mgr.get_listeners(MyEvt(0))),
[callback, MyEvt.callback_unbound, obj.callback_bound, callback2])
self.assertEqual(
list(self.evt_mgr.get_listeners(MyEvt(1))),
[callback, MyEvt.callback_unbound, obj.callback_bound])
def test_disconnect(self):
""" Test if disconnecting listeners works.
"""
callback = mock.Mock()
self.evt_mgr.connect(BaseEvent, callback)
evt1 = BaseEvent()
self.evt_mgr.emit(evt1)
self.assertEqual(callback.call_count, 1)
self.assertEqual(callback.call_args, ((evt1, ), {}))
self.evt_mgr.disconnect(BaseEvent, callback)
self.evt_mgr.emit(BaseEvent())
self.assertEqual(callback.call_count, 1)
self.assertEqual(callback.call_args, ((evt1, ), {}))
def test_disable(self):
""" Test if temporarily disabling an event works.
"""
class MyEvt(BaseEvent):
def __init__(self):
super(MyEvt, self).__init__()
callback = mock.Mock()
self.evt_mgr.connect(BaseEvent, callback)
callback2 = mock.Mock()
self.evt_mgr.connect(MyEvt, callback2)
evt1 = BaseEvent()
self.evt_mgr.emit(evt1)
self.assertEqual(callback.call_count, 1)
self.assertEqual(callback.call_args, ((evt1, ), {}))
# Disabling BaseEvent.
self.evt_mgr.disable(BaseEvent)
self.evt_mgr.emit(BaseEvent())
self.assertEqual(callback.call_count, 1)
# Disabling BaseEvent should also disable MyEvt.
self.evt_mgr.emit(MyEvt())
self.assertEqual(callback.call_count, 1)
self.assertEqual(callback2.call_count, 0)
# Reenabling BaseEvent should fire notifications.
self.evt_mgr.enable(BaseEvent)
self.evt_mgr.emit(MyEvt())
self.assertEqual(callback.call_count, 2)
self.assertEqual(callback2.call_count, 1)
# Disabling MyEvt should not disable BaseEvent but only MyEvt.
self.evt_mgr.disable(MyEvt)
self.evt_mgr.emit(BaseEvent())
self.assertEqual(callback.call_count, 3)
self.evt_mgr.emit(MyEvt())
self.assertEqual(callback.call_count, 3)
self.assertEqual(callback2.call_count, 1)
# Reenabling MyEvent should notify callback2.
self.evt_mgr.enable(MyEvt)
self.evt_mgr.emit(MyEvt())
self.assertEqual(callback.call_count, 4)
self.assertEqual(callback2.call_count, 2)
# Test for disable before any method is registered.
class MyEvt2(BaseEvent):
pass
self.evt_mgr.disable(MyEvt2)
callback = mock.Mock()
self.evt_mgr.connect(MyEvt2, callback)
self.evt_mgr.emit(MyEvt2())
self.assertFalse(callback.called)
def test_mark_as_handled(self):
""" Test if mark_as_handled() works.
"""
class MyEvent(BaseEvent):
def __init__(self, veto=False):
super(MyEvent, self).__init__()
self.veto = veto
def callback(evt):
if evt.veto:
evt.mark_as_handled()
callback = mock.Mock(wraps=callback)
self.evt_mgr.connect(MyEvent, callback, priority=2)
callback2 = mock.Mock()
self.evt_mgr.connect(MyEvent, callback2, priority=1)
evt1 = MyEvent()
self.evt_mgr.emit(evt1)
self.assertEqual(callback.call_count, 1)
self.assertEqual(callback2.call_count, 1)
evt2 = MyEvent(veto=True)
self.evt_mgr.emit(evt2)
self.assertEqual(callback.call_count, 2)
self.assertEqual(callback.call_args, ((evt2, ), {}))
self.assertEqual(callback2.call_count, 1)
self.assertEqual(callback2.call_args, ((evt1, ), {}))
def test_filtering(self):
""" Test if event filtering on arguments works.
"""
depth = 5
class A(object):
count = depth
def __init__(self):
A.count -= 1
if A.count:
self.a = A()
else:
self.a = 0
class MyEvent(BaseEvent):
def __init__(self, prop1="f0", prop2=True, prop3=None):
super(MyEvent, self).__init__()
self.prop1 = prop1
self.prop2 = prop2
self.prop3 = prop3
callbacks = [mock.Mock() for i in range(8)]
self.evt_mgr.connect(MyEvent, callbacks[0])
self.evt_mgr.connect(MyEvent, callbacks[1], filter={'prop1': 'f2'})
self.evt_mgr.connect(MyEvent, callbacks[2], filter={'prop2': False})
self.evt_mgr.connect(
MyEvent, callbacks[3], filter={'prop3': BaseEvent})
self.evt_mgr.connect(
MyEvent, callbacks[4], filter={'prop1': 'f2',
'prop2': False})
self.evt_mgr.connect(MyEvent, callbacks[5], filter={'prop1.real': 0})
self.evt_mgr.connect(
MyEvent, callbacks[6], filter={'prop1.a.a.a.a.a': 0})
self.evt_mgr.connect(
MyEvent, callbacks[7], filter={'prop1.a.a.a.a': 0})
def check_count(evt, *counts):
self.evt_mgr.emit(evt)
for callback, count in zip(callbacks, counts):
self.assertEqual(callback.call_count, count)
# Notify only 0,1
check_count(MyEvent(prop1='f2'), 1, 1, 0, 0, 0, 0, 0, 0)
# Notify only 0, 1, 2, 4
check_count(MyEvent(prop1='f2', prop2=False), 2, 2, 1, 0, 1, 0, 0, 0)
# Notify only 0, 3
check_count(MyEvent(prop3=BaseEvent), 3, 2, 1, 1, 1, 0, 0, 0)
# Notify only 0; (extended filter fail on AttributeError for 5)
check_count(MyEvent(prop1=1), 4, 2, 1, 1, 1, 0, 0, 0)
# Notify only 0 and 5 (extended attribute filter)
check_count(MyEvent(prop1=1j), 5, 2, 1, 1, 1, 1, 0, 0)
# Notify only 0 and 5 (extended attribute filter)
check_count(MyEvent(prop1=A()), 6, 2, 1, 1, 1, 1, 1, 0)
def test_exception(self):
""" Test if exception in handler causes subsequent notifications.
"""
class MyEvt(BaseEvent):
def __init__(self, err=False):
super(MyEvt, self).__init__()
self.err = err
def callback(evt):
if evt.err:
raise Exception('you did it')
callback = mock.Mock(wraps=callback)
self.evt_mgr.connect(MyEvt, callback)
callback2 = mock.Mock()
self.evt_mgr.connect(MyEvt, callback2)
self.evt_mgr.emit(MyEvt(err=False))
self.assertEqual(callback.call_count, 1)
self.assertEqual(callback2.call_count, 1)
self.evt_mgr.emit(MyEvt(err=True))
self.assertEqual(callback.call_count, 2)
self.assertEqual(callback2.call_count, 2)
def test_priority(self):
""" Test if setting priority of handlers works.
"""
class Callback(object):
calls = []
def __init__(self, name):
self.name = name
def __call__(self, evt):
self.calls.append(self.name)
callback = mock.Mock(wraps=Callback(name=1))
self.evt_mgr.connect(BaseEvent, callback, priority=1)
callback2 = mock.Mock(wraps=Callback(name=2))
self.evt_mgr.connect(BaseEvent, callback2, priority=2)
callback3 = mock.Mock(wraps=Callback(name=3))
self.evt_mgr.connect(BaseEvent, callback3, priority=0)
self.evt_mgr.emit(BaseEvent())
self.assertEqual(callback.call_count, 1)
self.assertEqual(callback2.call_count, 1)
self.assertEqual(callback3.call_count, 1)
self.assertEqual(Callback.calls, [2, 1, 3])
def test_subclass(self):
""" Test if subclass event notifies superclass listeners.
Cases to test:
1. subclass event should notify superclass listeners
even when the subclass event is not registered/connected
even when the superclass event is added before/after subclass
2. superclass event should not notify subclass listeners
"""
class MyEvt(BaseEvent):
pass
class MyEvt2(MyEvt):
pass
callback = mock.Mock()
callback2 = mock.Mock()
self.evt_mgr.connect(MyEvt, callback)
self.evt_mgr.connect(MyEvt2, callback2)
# No callback called on BaseEvent
self.evt_mgr.emit(BaseEvent())
self.assertEqual(callback.call_count, 0)
self.assertEqual(callback2.call_count, 0)
# Only callback called on MyEvt
self.evt_mgr.emit(MyEvt())
self.assertEqual(callback.call_count, 1)
self.assertEqual(callback2.call_count, 0)
# Both callbacks called on MyEvt2
self.evt_mgr.emit(MyEvt2())
self.assertEqual(callback.call_count, 2)
self.assertEqual(callback2.call_count, 1)
# Add a new subclass event
class MyEvt3(MyEvt2):
pass
# Subclass event not registered
# Both callbacks called on MyEvt3
self.evt_mgr.emit(MyEvt3())
self.assertEqual(callback.call_count, 3)
self.assertEqual(callback2.call_count, 2)
def test_event_hierarchy(self):
""" Test whether the correct hierarchy of event classes is returned.
"""
class MyEvt(BaseEvent):
pass
class MyEvt2(MyEvt):
pass
class MyEvt3(MyEvt):
pass
class MyEvt4(MyEvt2, MyEvt3):
pass
self.assertEqual(
self.evt_mgr.get_event_hierarchy(BaseEvent), (BaseEvent, ))
self.assertEqual(
self.evt_mgr.get_event_hierarchy(MyEvt), (MyEvt, BaseEvent))
self.assertEqual(
self.evt_mgr.get_event_hierarchy(MyEvt2),
(MyEvt2, MyEvt, BaseEvent))
self.assertEqual(
self.evt_mgr.get_event_hierarchy(MyEvt3),
(MyEvt3, MyEvt, BaseEvent))
self.assertEqual(
self.evt_mgr.get_event_hierarchy(MyEvt4),
(MyEvt4, MyEvt2, MyEvt3, MyEvt, BaseEvent))
def test_prepost_emit(self):
""" Test whether pre/post methods of event are called correctly on emit.
"""
call_seq = []
class MyEvt(BaseEvent):
def pre_emit(self):
call_seq.append(0)
def post_emit(self):
call_seq.append(2)
def callback(evt):
call_seq.append(1)
evt = MyEvt()
self.evt_mgr.connect(BaseEvent, callback)
self.evt_mgr.emit(evt)
self.assertEqual(call_seq, list(range(3)))
def test_reentrant_disconnect_emit(self):
""" Test listener is called even if it is disconnected before notify.
"""
data = []
def callback(evt):
data.append(0)
self.evt_mgr.disconnect(BaseEvent, callback2)
self.evt_mgr.disconnect(BaseEvent, callback3)
data.append(1)
def callback2(evt):
data.append(2)
def callback3(evt):
data.append(3)
self.evt_mgr.connect(BaseEvent, callback)
self.evt_mgr.connect(BaseEvent, callback2)
self.evt_mgr.connect(BaseEvent, callback3)
self.evt_mgr.emit(BaseEvent())
self.assertEqual(data, [0, 1, 2, 3])
def test_lambda_connect(self):
""" Test if lambda functions w/o references are not garbage collected.
"""
data = []
self.evt_mgr.connect(BaseEvent, lambda evt: data.append(1))
self.evt_mgr.emit(BaseEvent())
self.assertEqual(data, [1])
def test_method_weakref(self):
""" Test if methods do not prevent garbage collection of objects.
"""
data = []
class MyHeavyObject(object):
def callback(self, evt):
data.append(1)
obj = MyHeavyObject()
obj_wr = weakref.ref(obj)
self.evt_mgr.connect(BaseEvent, obj.callback)
del obj
# Now there should be no references to obj.
self.assertEqual(obj_wr(), None)
self.evt_mgr.emit(BaseEvent())
self.assertEqual(data, [])
def test_method_call(self):
""" Test if instance methods are called.
"""
data = []
class MyHeavyObject(BaseEvent):
def callback(self, evt):
data.append(1)
def callback_unbound(self):
data.append(2)
obj = MyHeavyObject()
obj_wr = weakref.ref(obj)
self.evt_mgr.connect(BaseEvent, obj.callback)
self.evt_mgr.connect(BaseEvent, MyHeavyObject.callback_unbound)
self.assertTrue(obj_wr() is not None)
self.evt_mgr.emit(obj)
self.assertEqual(data, [1, 2])
def test_method_collect(self):
""" Test if object garbage collection disconnects listener method.
"""
data = []
class MyHeavyObject(object):
def callback(self, evt):
data.append(1)
obj = MyHeavyObject()
obj_wr = weakref.ref(obj)
self.evt_mgr.connect(BaseEvent, obj.callback)
del obj
# Now there should be no references to obj.
self.assertEqual(obj_wr(), None)
self.evt_mgr.emit(BaseEvent())
self.assertEqual(data, [])
self.assertEqual(len(list(self.evt_mgr.get_listeners(BaseEvent))), 0)
def test_method_disconnect(self):
""" Test if method disconnect works.
"""
data = []
class MyHeavyObject(object):
def callback(self, evt):
data.append(1)
obj = MyHeavyObject()
obj_wr = weakref.ref(obj)
self.evt_mgr.connect(BaseEvent, obj.callback)
self.evt_mgr.disconnect(BaseEvent, obj.callback)
del obj
# Now there should be no references to obj.
self.assertEqual(obj_wr(), None)
self.evt_mgr.emit(BaseEvent())
self.assertEqual(data, [])
def test_method_disconnect2(self):
""" Test if method disconnect on unconnected method fails.
"""
data = []
class MyHeavyObject(object):
def callback(self, evt):
data.append(1)
def callback2(self, evt):
data.append(2)
obj = MyHeavyObject()
self.evt_mgr.connect(BaseEvent, obj.callback)
with self.assertRaises(Exception):
self.evt_mgr.disconnect(BaseEvent, obj.callback2)
self.evt_mgr.emit(BaseEvent())
self.assertEqual(data, [1])
def test_no_block(self):
""" Test if non-blocking emit works.
"""
data = []
lock = threading.Lock()
lock.acquire()
def callback(evt):
# callback will wait until | |
not found.')
path = request.data.get('p', '')
file_id = seafile_api.get_file_id_by_path(repo_id, path)
if not path or not file_id:
return api_error(status.HTTP_400_BAD_REQUEST,
'Path is missing or invalid.')
username = request.user.username
# check file access permission
parent_dir = os.path.dirname(path)
if check_folder_permission(request, repo_id, parent_dir) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
# check file lock
try:
is_locked, locked_by_me = check_file_lock(repo_id, path, username)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
operation = request.data.get('operation', '')
if operation.lower() == 'lock':
if is_locked:
return api_error(status.HTTP_403_FORBIDDEN, 'File is already locked')
# lock file
expire = request.data.get('expire', FILE_LOCK_EXPIRATION_DAYS)
try:
seafile_api.lock_file(repo_id, path.lstrip('/'), username, expire)
return Response('success', status=status.HTTP_200_OK)
except SearpcError as e:
logger.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Internal Server Error')
if operation.lower() == 'unlock':
if not is_locked:
return api_error(status.HTTP_403_FORBIDDEN, 'File is not locked')
if not locked_by_me:
return api_error(status.HTTP_403_FORBIDDEN, 'You can not unlock this file')
# unlock file
try:
seafile_api.unlock_file(repo_id, path.lstrip('/'))
return Response('success', status=status.HTTP_200_OK)
except SearpcError as e:
logger.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Internal Server Error')
else:
return api_error(status.HTTP_400_BAD_REQUEST,
"Operation can only be lock or unlock")
def delete(self, request, repo_id, format=None):
# delete file
repo = get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, 'Library not found.')
path = request.GET.get('p', None)
if not path:
return api_error(status.HTTP_400_BAD_REQUEST, 'Path is missing.')
parent_dir = os.path.dirname(path)
if check_folder_permission(request, repo_id, parent_dir) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
parent_dir = os.path.dirname(path)
file_name = os.path.basename(path)
try:
seafile_api.del_file(repo_id, parent_dir,
file_name, request.user.username)
except SearpcError as e:
logger.error(e)
return api_error(HTTP_520_OPERATION_FAILED,
"Failed to delete file.")
return reloaddir_if_necessary(request, repo, parent_dir)
class FileDetailView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
def get(self, request, repo_id, format=None):
# argument check
path = request.GET.get('p', None)
if not path:
error_msg = 'p invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
path = normalize_file_path(path)
# resource check
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
commit_id = request.GET.get('commit_id', None)
try:
if commit_id:
obj_id = seafile_api.get_file_id_by_commit_and_path(repo_id,
commit_id, path)
else:
obj_id = seafile_api.get_file_id_by_path(repo_id, path)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
if not obj_id:
error_msg = 'File %s not found.' % path
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
parent_dir = os.path.dirname(path)
permission = check_folder_permission(request, repo_id, parent_dir)
if not permission:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# get real path for sub repo
if repo.is_virtual:
real_path = posixpath.join(repo.origin_path, path.lstrip('/'))
real_repo_id = repo.origin_repo_id
else:
real_path = path
real_repo_id = repo_id
file_name = os.path.basename(path)
entry = {}
entry["type"] = "file"
entry["id"] = obj_id
entry["name"] = file_name
entry["permission"] = permission
file_type, file_ext = get_file_type_and_ext(file_name)
if file_type == MARKDOWN:
is_draft = is_draft_file(repo_id, path)
has_draft = False
if not is_draft:
has_draft = has_draft_file(repo_id, path)
draft = get_file_draft(repo_id, path, is_draft, has_draft)
entry['is_draft'] = is_draft
entry['has_draft'] = has_draft
entry['draft_file_path'] = draft['draft_file_path']
entry['draft_id'] = draft['draft_id']
# fetch file contributors and latest contributor
try:
# get real path for sub repo
dirent = seafile_api.get_dirent_by_path(real_repo_id, real_path)
except Exception as e:
logger.error(e)
dirent = None
last_modified = dirent.mtime if dirent else ''
latest_contributor = dirent.modifier if dirent else ''
entry["mtime"] = last_modified
entry["last_modified"] = timestamp_to_isoformat_timestr(last_modified)
entry["last_modifier_email"] = latest_contributor
entry["last_modifier_name"] = email2nickname(latest_contributor)
entry["last_modifier_contact_email"] = email2contact_email(latest_contributor)
try:
file_size = get_file_size(real_repo_id, repo.version, obj_id)
except Exception as e:
logger.error(e)
file_size = 0
entry["size"] = file_size
starred_files = UserStarredFiles.objects.filter(repo_id=repo_id,
path=path)
entry["starred"] = True if len(starred_files) > 0 else False
file_comments = FileComment.objects.get_by_file_path(repo_id, path)
comment_total = file_comments.count()
entry["comment_total"] = comment_total
entry["can_edit"], _ = can_edit_file(file_name, file_size, repo)
return Response(entry)
class FileRevert(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def put(self, request, repo_id, format=None):
path = request.data.get('p', None)
commit_id = request.data.get('commit_id', None)
if not path:
error_msg = 'path invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if not commit_id:
error_msg = 'commit_id invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if not seafile_api.get_repo(repo_id):
error_msg = 'library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
if not seafile_api.get_file_id_by_commit_and_path(repo_id, commit_id, path):
error_msg = 'file %s not found.' % path
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
if check_folder_permission(request, repo_id, '/') != 'rw':
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
username = request.user.username
try:
seafile_api.revert_file(repo_id, commit_id, path, username)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
return Response({'success': True})
class FileRevision(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, repo_id, format=None):
path = request.GET.get('p', None)
if path is None:
return api_error(status.HTTP_400_BAD_REQUEST, 'Path is missing.')
file_name = os.path.basename(path)
commit_id = request.GET.get('commit_id', None)
try:
obj_id = seafserv_threaded_rpc.get_file_id_by_commit_and_path(
repo_id, commit_id, path)
except:
return api_error(status.HTTP_404_NOT_FOUND, 'Revision not found.')
return get_repo_file(request, repo_id, obj_id, file_name, 'download')
class FileHistory(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
def get(self, request, repo_id, format=None):
""" Get file history.
"""
path = request.GET.get('p', None)
if path is None:
error_msg = 'p invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
file_id = seafile_api.get_file_id_by_path(repo_id, path)
if not file_id:
error_msg = 'File %s not found.' % path
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
permission = check_folder_permission(request, repo_id, path)
if permission not in get_available_repo_perms():
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
try:
commits = get_file_revisions_after_renamed(repo_id, path)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
for commit in commits:
creator_name = commit.creator_name
user_info = {}
user_info['email'] = creator_name
user_info['name'] = email2nickname(creator_name)
user_info['contact_email'] = Profile.objects.get_contact_email_by_user(creator_name)
commit._dict['user_info'] = user_info
return HttpResponse(json.dumps({"commits": commits},
cls=SearpcObjEncoder), status=200, content_type=json_content_type)
class FileSharedLinkView(APIView):
"""
Support uniform interface for file shared link.
"""
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def put(self, request, repo_id, format=None):
repo = seaserv.get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, "Library does not exist")
if repo.encrypted:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
path = request.data.get('p', None)
if not path:
return api_error(status.HTTP_400_BAD_REQUEST, 'Path is missing')
username = request.user.username
password = request.data.get('password', None)
share_type = request.data.get('share_type', 'download')
if password and len(password) < config.SHARE_LINK_PASSWORD_MIN_LENGTH:
return api_error(status.HTTP_400_BAD_REQUEST, 'Password is too short')
if share_type.lower() == 'download':
if parse_repo_perm(check_folder_permission(request, repo_id, path)).can_download is False:
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied')
if not request.user.permissions.can_generate_share_link():
error_msg = 'Can not generate share link.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
try:
expire_days = int(request.data.get('expire', 0))
except ValueError:
error_msg = 'expire invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if expire_days <= 0:
if SHARE_LINK_EXPIRE_DAYS_DEFAULT > 0:
expire_days = SHARE_LINK_EXPIRE_DAYS_DEFAULT
if SHARE_LINK_EXPIRE_DAYS_MIN > 0:
if expire_days < SHARE_LINK_EXPIRE_DAYS_MIN:
error_msg = _('Expire days should be greater or equal to %s') % \
SHARE_LINK_EXPIRE_DAYS_MIN
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if SHARE_LINK_EXPIRE_DAYS_MAX > 0:
if expire_days > SHARE_LINK_EXPIRE_DAYS_MAX:
error_msg = _('Expire days should be less than or equal to %s') % \
SHARE_LINK_EXPIRE_DAYS_MAX
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if expire_days <= 0:
expire_date = None
else:
expire_date = timezone.now() + relativedelta(days=expire_days)
is_dir = False
if path == '/':
is_dir = True
else:
try:
real_path = repo.origin_path + path if repo.origin_path else path
dirent = seafile_api.get_dirent_by_path(repo.store_id, real_path)
except SearpcError as e:
logger.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, "Internal Server Error")
if not dirent:
return api_error(status.HTTP_400_BAD_REQUEST, 'Invalid path')
if stat.S_ISDIR(dirent.mode):
is_dir = True
if is_dir:
# generate dir download link
fs = FileShare.objects.get_dir_link_by_path(username, repo_id, path)
if fs is None:
fs = FileShare.objects.create_dir_link(username, repo_id, path,
password, expire_date)
if is_org_context(request):
org_id = request.user.org.org_id
OrgFileShare.objects.set_org_file_share(org_id, fs)
else:
# generate file download link
fs = FileShare.objects.get_file_link_by_path(username, repo_id, path)
if fs is None:
fs = FileShare.objects.create_file_link(username, repo_id, path,
password, expire_date)
if is_org_context(request):
org_id = request.user.org.org_id
OrgFileShare.objects.set_org_file_share(org_id, fs)
token = fs.token
shared_link = gen_shared_link(token, fs.s_type)
elif share_type.lower() == 'upload':
if not seafile_api.get_dir_id_by_path(repo_id, path):
return api_error(status.HTTP_400_BAD_REQUEST, 'Invalid path')
if check_folder_permission(request, repo_id, path) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied')
if not request.user.permissions.can_generate_upload_link():
error_msg = 'Can not generate upload link.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# generate upload link
uls = UploadLinkShare.objects.get_upload_link_by_path(username, repo_id, path)
if uls is None:
uls = UploadLinkShare.objects.create_upload_link_share(
username, repo_id, path, password)
token = uls.token
shared_link = gen_shared_upload_link(token)
else:
return api_error(status.HTTP_400_BAD_REQUEST,
"Operation can only be download or upload.")
resp = Response(status=status.HTTP_201_CREATED)
resp['Location'] = shared_link
return resp
########## Directory related
class DirMetaDataView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def get(self, request, repo_id, format=None):
# recource check
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
path = request.GET.get('p', '/')
path = normalize_dir_path(path)
dir_id = seafile_api.get_dir_id_by_path(repo_id, | |
<reponame>erteck/textHighlighter
#!/usr/bin/env python3
""" Handles the visible area of the :class:`~tools.manual.faceviewer.frame.FacesViewer` canvas. """
import logging
import tkinter as tk
import cv2
import numpy as np
from PIL import Image, ImageTk
from lib.align import AlignedFace
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class Viewport():
""" Handles the display of faces and annotations in the currently viewable area of the canvas.
Parameters
----------
canvas: :class:`tkinter.Canvas`
The :class:`~tools.manual.faceviewer.frame.FacesViewer` canvas
tk_edited_variable: :class:`tkinter.BooleanVar`
The variable that indicates that a face has been edited
"""
def __init__(self, canvas, tk_edited_variable):
logger.debug("Initializing: %s: (canvas: %s, tk_edited_variable: %s)",
self.__class__.__name__, canvas, tk_edited_variable)
self._canvas = canvas
self._grid = canvas.grid
self._centering = "face"
self._tk_selected_editor = canvas._display_frame.tk_selected_action
self._landmark_mapping = dict(mouth_inner=(60, 68),
mouth_outer=(48, 60),
right_eyebrow=(17, 22),
left_eyebrow=(22, 27),
right_eye=(36, 42),
left_eye=(42, 48),
nose=(27, 36),
jaw=(0, 17),
chin=(8, 11))
self._landmarks = dict()
self._tk_faces = dict()
self._objects = VisibleObjects(self)
self._hoverbox = HoverBox(self)
self._active_frame = ActiveFrame(self, tk_edited_variable)
self._tk_selected_editor.trace(
"w", lambda *e: self._active_frame.reload_annotations())
@property
def face_size(self):
""" int: The pixel size of each thumbnail """
return self._grid.face_size
@property
def mesh_kwargs(self):
""" dict: The color and state keyword arguments for the objects that make up a single
face's mesh annotation based on the current user selected options. Key is the object
type (`polygon` or `line`), value are the keyword arguments for that type. """
state = "normal" if self._canvas.optional_annotations["mesh"] else "hidden"
color = self._canvas.control_colors["Mesh"]
kwargs = dict(polygon=dict(fill="", outline=color, state=state),
line=dict(fill=color, state=state))
return kwargs
@property
def hover_box(self):
""" :class:`HoverBox`: The hover box for the viewport. """
return self._hoverbox
@property
def selected_editor(self):
""" str: The currently selected editor. """
return self._tk_selected_editor.get().lower()
def toggle_mesh(self, state):
""" Toggles the mesh optional annotations on and off.
Parameters
----------
state: ["hidden", "normal"]
The state to set the mesh annotations to
"""
logger.debug("Toggling mesh annotations to: %s", state)
self._canvas.itemconfig("viewport_mesh", state=state)
self.update()
def toggle_mask(self, state, mask_type):
""" Toggles the mask optional annotation on and off.
Parameters
----------
state: ["hidden", "normal"]
Whether the mask should be displayed or hidden
mask_type: str
The type of mask to overlay onto the face
"""
logger.debug("Toggling mask annotations to: %s. mask_type: %s", state, mask_type)
for (frame_idx, face_idx), det_face in zip(
self._objects.visible_grid[:2].transpose(1, 2, 0).reshape(-1, 2),
self._objects.visible_faces.flatten()):
if frame_idx == -1:
continue
key = "_".join([str(frame_idx), str(face_idx)])
mask = None if state == "hidden" else self._obtain_mask(det_face, mask_type)
self._tk_faces[key].update_mask(mask)
self.update()
@classmethod
def _obtain_mask(cls, detected_face, mask_type):
""" Obtain the mask for the correct "face" centering that is used in the thumbnail display.
Parameters
-----------
detected_face: :class:`lib.align.DetectedFace`
The Detected Face object to obtain the mask for
mask_type: str
The type of mask to obtain
Returns
-------
:class:`numpy.ndarray` or ``None``
The single channel mask of requested mask type, if it exists, otherwise ``None``
"""
mask = detected_face.mask.get(mask_type)
if not mask:
return None
if mask.stored_centering != "face":
face = AlignedFace(detected_face.landmarks_xy)
mask.set_sub_crop(face.pose.offset["face"] - face.pose.offset[mask.stored_centering],
centering="face")
return mask.mask.squeeze()
def reset(self):
""" Reset all the cached objects on a face size change. """
self._landmarks = dict()
self._tk_faces = dict()
def update(self, refresh_annotations=False):
""" Update the viewport.
Parameters
----------
refresh_annotations: bool, optional
``True`` if mesh annotations should be re-calculated otherwise ``False``.
Default: ``False``
Obtains the objects that are currently visible. Updates the visible area of the canvas
and reloads the active frame's annotations. """
self._objects.update()
self._update_viewport(refresh_annotations)
self._active_frame.reload_annotations()
def _update_viewport(self, refresh_annotations):
""" Update the viewport
Parameters
----------
refresh_annotations: bool
``True`` if mesh annotations should be re-calculated otherwise ``False``
Clear out cached objects that are not currently in view. Populate the cache for any
faces that are now in view. Populate the correct face image and annotations for each
object in the viewport based on current location. If optional mesh annotations are
enabled, then calculates newly displayed meshes. """
if not self._grid.is_valid:
return
self._discard_tk_faces()
if self._canvas.optional_annotations["mesh"]: # Display any hidden end of row meshes
self._canvas.itemconfig("viewport_mesh", state="normal")
for collection in zip(self._objects.visible_grid.transpose(1, 2, 0),
self._objects.images,
self._objects.meshes,
self._objects.visible_faces):
for (frame_idx, face_idx, pnt_x, pnt_y), image_id, mesh_ids, face in zip(*collection):
top_left = np.array((pnt_x, pnt_y))
if frame_idx == self._active_frame.frame_index:
logger.trace("Skipping active frame: %s", frame_idx)
continue
if frame_idx == -1:
logger.debug("Blanking non-existant face")
self._canvas.itemconfig(image_id, image="")
for area in mesh_ids.values():
for mesh_id in area:
self._canvas.itemconfig(mesh_id, state="hidden")
continue
tk_face = self.get_tk_face(frame_idx, face_idx, face)
self._canvas.itemconfig(image_id, image=tk_face.photo)
if (self._canvas.optional_annotations["mesh"]
or frame_idx == self._active_frame.frame_index):
landmarks = self.get_landmarks(frame_idx, face_idx, face, top_left,
refresh=refresh_annotations)
self._locate_mesh(mesh_ids, landmarks)
def _discard_tk_faces(self):
""" Remove any :class:`TKFace` objects from the cache that are not currently displayed. """
keys = ["{}_{}".format(pnt_x, pnt_y)
for pnt_x, pnt_y in self._objects.visible_grid[:2].T.reshape(-1, 2)]
for key in list(self._tk_faces):
if key not in keys:
del self._tk_faces[key]
logger.trace("keys: %s allocated_faces: %s", keys, len(self._tk_faces))
def get_tk_face(self, frame_index, face_index, face):
""" Obtain the :class:`TKFace` object for the given face from the cache. If the face does
not exist in the cache, then it is generated and added prior to returning.
Parameters
----------
frame_index: int
The frame index to obtain the face for
face_index: int
The face index of the face within the requested frame
face: :class:`~lib.align.DetectedFace`
The detected face object, containing the thumbnail jpg
Returns
-------
:class:`TKFace`
An object for displaying in the faces viewer canvas populated with the aligned mesh
landmarks and face thumbnail
"""
is_active = frame_index == self._active_frame.frame_index
key = "_".join([str(frame_index), str(face_index)])
if key not in self._tk_faces or is_active:
logger.trace("creating new tk_face: (key: %s, is_active: %s)", key, is_active)
if is_active:
image = AlignedFace(face.landmarks_xy,
image=self._active_frame.current_frame,
centering=self._centering,
size=self.face_size).face
else:
image = AlignedFace(face.landmarks_xy,
image=cv2.imdecode(face.thumbnail, cv2.IMREAD_UNCHANGED),
centering=self._centering,
size=self.face_size,
is_aligned=True).face
tk_face = self._get_tk_face_object(face, image, is_active)
self._tk_faces[key] = tk_face
else:
logger.trace("tk_face exists: %s", key)
tk_face = self._tk_faces[key]
return tk_face
def _get_tk_face_object(self, face, image, is_active):
""" Obtain an existing unallocated, or a newly created :class:`TKFace` and populate it with
face information from the requested frame and face index.
If the face is currently active, then the face is generated from the currently displayed
frame, otherwise it is generated from the jpg thumbnail.
Parameters
----------
face: :class:`lib.align.DetectedFace`
A detected face object to create the :class:`TKFace` from
image: :class:`numpy.ndarray`
The jpg thumbnail or the 3 channel image for the face
is_active: bool
``True`` if the face in the currently active frame otherwise ``False``
Returns
-------
:class:`TKFace`
An object for displaying in the faces viewer canvas populated with the aligned face
image with a mask applied, if required.
"""
get_mask = (self._canvas.optional_annotations["mask"] or
(is_active and self.selected_editor == "mask"))
mask = self._obtain_mask(face, self._canvas.selected_mask) if get_mask else None
tk_face = TKFace(image, size=self.face_size, mask=mask)
logger.trace("face: %s, tk_face: %s", face, tk_face)
return tk_face
def get_landmarks(self, frame_index, face_index, face, top_left, refresh=False):
""" Obtain the landmark points for each mesh annotation.
First tries to obtain the aligned landmarks from the cache. If the landmarks do not exist
in the cache, or a refresh has been requested, then the landmarks are calculated from the
detected face object.
Parameters
----------
frame_index: int
The frame index to obtain the face for
face_index: int
The face index of the face within the requested frame
face: :class:`lib.align.DetectedFace`
The detected face object to obtain landmarks for
top_left: tuple
The top left (x, y) points of the face's bounding box within the viewport
refresh: bool, optional
Whether to force a reload of the face's aligned landmarks, even if they already exist
within the cache. Default: ``False``
Returns
-------
dict
The key is the tkinter canvas object type for each part of the mesh annotation
(`polygon`, `line`). The value is a list containing the (x, y) coordinates of each
part of the mesh annotation, from the top left corner location.
"""
key = "{}_{}".format(frame_index, face_index)
landmarks = self._landmarks.get(key, None)
if not landmarks or refresh:
aligned = AlignedFace(face.landmarks_xy,
centering=self._centering,
size=self.face_size)
landmarks = dict(polygon=[], line=[])
for area, val in self._landmark_mapping.items():
points = aligned.landmarks[val[0]:val[1]] + top_left
shape = "polygon" if area.endswith("eye") or area.startswith("mouth") else "line"
landmarks[shape].append(points)
self._landmarks[key] = landmarks
return landmarks
def _locate_mesh(self, mesh_ids, landmarks):
""" Place the mesh annotation canvas objects in the correct location.
Parameters
----------
mesh_ids: list
The list of mesh id objects to set coordinates for
landmarks: dict
The mesh point groupings and whether each group should be a line or a polygon
"""
for key, area in landmarks.items():
for coords, mesh_id in zip(area, mesh_ids[key]):
self._canvas.coords(mesh_id, *coords.flatten())
def | |
-- If the word begins the string OR is preceded by a space,
# (User|words|here) -- AND it appears in the list exactly,
# (?=\s|$) -- AND it is followed by a space OR ends the string...
pattern = re.compile(r'(^|\s)(' + remove_string + r')(?=\s|$)',
re.UNICODE)
# ...Then swap the word and the preceding (but not following) space for
# an empty string
text = pattern.sub("", text)
return text
def remove_stopwords(text: str, removal_string: str) -> str:
"""Removes stopwords from the text.
:param text: A unicode string representing the whole text that is being
manipulated.
:param removal_string: A unicode string representing the list of stopwords.
:return: A unicode string representing the text that has been stripped of
the stopwords chosen by the user.
"""
remove_list = split_stop_keep_word_string(input_string=removal_string)
scrubbed_text = delete_words(text, remove_list)
return scrubbed_text
def keep_words(text: str, non_removal_string: str) -> str:
"""Removes words that are not in non_removal_string from the text.
:param text: A unicode string representing the whole text that is being
manipulated.
:param non_removal_string: A unicode string representing the list of keep
words.
:return: A unicode string representing the text that has been stripped of
everything but the words chosen by the user.
"""
# A list containing the words in non_removal_string.
keep_list = split_stop_keep_word_string(input_string=non_removal_string)
split_lines = text.split("\n")
# A list of words in the user's text. Words are case-sensitive and include
# punctuation if those scrubbing options were not selected beforehand.
word_list = [word
for line in split_lines
for word in re.split(r'\s', line, re.UNICODE)
if word != '']
# Hack to remove any unseparated tokens
unsplit_spans = []
for item in word_list:
if re.search(r'\s', item):
unsplit_spans = unsplit_spans + re.split(r'\s+', item)
else:
unsplit_spans.append(item)
word_list = unsplit_spans
# remove_list is a copy of word_list without the keepwords
remove_list = [word for word in word_list if word not in keep_list]
scrubbed_text = delete_words(text, remove_list)
return scrubbed_text
def get_remove_whitespace_map(spaces: bool,
tabs: bool,
new_lines: bool) -> Dict[int, type(None)]:
"""Get the white space removal map.
:param spaces: A boolean indicating whether spaces should be removed.
:param tabs: A boolean indicating whether or not tabs should be removed.
:param new_lines: A boolean indicating whether new lines should be removed.
:return: A dictionary that contains all the whitespaces that should be
removed (tabs, spaces or newlines) mapped to None.
"""
remove_whitespace_map = {}
if spaces:
remove_whitespace_map.update({32: None, 160: None, 5760: None,
8192: None, 8193: None, 8194: None,
8195: None, 8196: None, 8197: None,
8198: None, 8199: None, 8200: None,
8201: None, 8202: None, 8239: None,
8287: None, 12288: None})
if tabs:
remove_whitespace_map.update({9: None})
if new_lines:
remove_whitespace_map.update({10: None, 11: None, 12: None, 13: None,
133: None, 8232: None, 8233: None})
return remove_whitespace_map
def save_character_deletion_map(deletion_map: Dict[int, type(None)],
storage_folder: str, filename: str):
"""Saves a character deletion map in the storage folder.
:param deletion_map: A character deletion map to be saved.
:param storage_folder: A string representing the path of the storage
folder.
:param filename: A string representing the name of the file the map
should be saved in.
"""
general_functions.write_file_to_disk(
contents=deletion_map, dest_folder=storage_folder, filename=filename)
def load_character_deletion_map(storage_folder: str,
filename: str) -> Dict[int, type(None)]:
"""Loads a character map that was previously saved in the storage folder.
:param storage_folder: A string representing the path of the storage
folder.
:param filename: A string representing the name of the file that is being
loaded.
:return: The character deletion map that was saved in the folder (empty
if there is no map to load).
"""
return general_functions.load_file_from_disk(
loc_folder=storage_folder, filename=filename)
def save_scrub_optional_upload(file_string: str, storage_folder: str,
filename: str):
"""Saves the contents of a user option file into the storage folder.
:param file_string: A string representing a whole file to be saved.
:param storage_folder: A string representing the path of the storage
folder.
:param filename: A string representing the name of the file that is being
saved.
"""
general_functions.write_file_to_disk(
contents=file_string, dest_folder=storage_folder, filename=filename)
def load_scrub_optional_upload(storage_folder: str, filename: str) -> str:
"""Loads a option file that was previously saved in the storage folder.
:param storage_folder: A string representing the path of the storage
folder.
:param filename: A string representing the name of the file that is being
loaded.
:return: The file string that was saved in the folder (empty if there is
no string to load).
"""
try:
return general_functions.load_file_from_disk(
loc_folder=storage_folder, filename=filename)
except FileNotFoundError:
return ""
def handle_gutenberg(text: str) -> str:
"""Removes Project Gutenberg boilerplate from text.
:param text: A Project Gutenberg document.
:return: The input text document without the Gutenberg boilerplate.
"""
# find end of front boiler plate, assuming something like:
# *** START OF THIS PROJECT GUTENBERG EBOOK FRANKENSTEIN ***
# This is a "non-greedy" regex pattern, meaning it will stop looking
# and return after the first "***" (instead of deleting some of the text
# if it finds "***" outside of the boilerplate.
re_start_gutenberg = re.compile(
r"\*\*\* START OF THIS PROJECT GUTENBERG.*?\*\*\*",
re.IGNORECASE | re.UNICODE | re.MULTILINE)
match = re.search(re_start_gutenberg, text)
if match:
end_boiler_front = match.end()
# text saved without front boilerplate
text = text[end_boiler_front:]
else:
re_start_gutenberg = re.compile(
r"Copyright.*\n\n\n", re.IGNORECASE | re.UNICODE)
match = re.search(re_start_gutenberg, text)
if match:
end_boiler_front = match.end()
# text saved without front boilerplate
text = text[end_boiler_front:]
# now let's find the start of the ending boilerplate
re_end_gutenberg = re.compile(
r"End of.*?Project Gutenberg",
re.IGNORECASE | re.UNICODE | re.MULTILINE)
match = re.search(re_end_gutenberg, text)
if match:
start_boiler_end = match.start()
# text saved without end boilerplate
text = text[:start_boiler_end]
return text
def prepare_additional_options(opt_uploads: Dict[str, FileStorage],
storage_options: List[str], storage_folder: str,
storage_filenames: List[str]) -> List[str]:
"""Gathers all the strings used by the "Additional Options" scrub section.
:param opt_uploads: A dictionary (specifically ImmutableMultiDict)
containing the additional scrubbing option files that have been
uploaded.
:param storage_options: A list of strings representing additional options
that have been chosen by the user.
:param storage_folder: A string representing the path of the storage
folder.
:param storage_filenames: A list of filename strings that will be used to
load and save the user's selections.
:return: An array containing strings of all the additional scrubbing
option text fields and files.
"""
file_strings = {'consolidations_file[]': '', 'lemmas_file[]': '',
'special_characters_file[]': '', 'stop_words_file[]': '',
'consolidations': '', 'lemmas': '',
'special_characters': '', 'stop_words': ''}
for index, key in enumerate(sorted(opt_uploads)):
if opt_uploads[key].filename:
file_content = opt_uploads[key].read()
file_strings[key] = general_functions.decode_bytes(file_content)
opt_uploads[key].seek(0)
elif key.strip('[]') in storage_options:
file_strings[key] = load_scrub_optional_upload(
storage_folder, storage_filenames[index])
else:
session['scrubbingoptions']['file_uploads'][key] = ''
file_strings[key] = ""
# Create an array of option strings:
# cons_file_string, lem_file_string, sc_file_string, sw_kw_file_string,
# cons_manual, lem_manual, sc_manual, and sw_kw_manual
all_options = [file_strings.get('consolidations_file[]'),
file_strings.get('lemmas_file[]'),
file_strings.get('special_characters_file[]'),
file_strings.get('stop_words_file[]'),
request.form['consolidations'],
request.form['lemmas'],
request.form['special_characters'],
request.form['stop_words']]
return all_options
def scrub(text: str, gutenberg: bool, lower: bool, punct: bool, apos: bool,
hyphen: bool, amper: bool, digits: bool, tags: bool,
spaces: bool, tabs: bool, new_lines: bool,
opt_uploads: Dict[str, FileStorage], storage_options: List[str],
storage_folder: str, previewing: bool = False) -> str:
"""Scrubs the text according to the specifications chosen by the user.
This function calls call_rlhandler, handle_tags(), remove_punctuation(),
and remove_stopwords(), which manipulate the text.
:param text: A unicode string representing the whole text that is being
manipulated.
:param gutenberg: A boolean indicating whether the text is a Project
Gutenberg file.
:param lower: A boolean indicating whether or not the text is converted to
lowercase.
:param punct: A boolean indicating whether to remove punctuation from the
text.
:param apos: A boolean indicating whether to keep apostrophes in the text.
:param hyphen: A boolean indicating whether to keep hyphens in the text.
:param amper: A boolean indicating whether to keep ampersands in the text.
:param digits: A boolean indicating whether to remove digits from the text.
:param tags: A boolean indicating whether Scrub Tags has been checked.
:param spaces: A boolean indicating whether spaces should be removed.
:param tabs: A boolean indicating whether tabs should be removed.
:param new_lines: A boolean indicating whether newlines should be removed.
:param opt_uploads: A dictionary (specifically ImmutableMultiDict)
containing the additional scrubbing option files that have been
uploaded.
:param storage_options: A list of strings representing additional options
that have been chosen by the user.
:param storage_folder: A string | |
print("######################################################################")
print("# Parallel n-split k-stratified-fold continuous SVM Scikitlearn MVPA #")
print("# (c) <NAME> 2012, jeanremi.king [at] gmail [dot] com #")
print("######################################################################")
# Implementation of a multivariate pattern analysis based on the scikit-learn
# toolbox (http://scikit-learn.org/stable/). It reads two .mat files
# (filenameX, filenamey) created by 'jr_classify.m'
#
# Function:
# skl_king_parallel.py filenameX filenamey [number_of_cores]
#
# Inputs:
# in filenameX:
# Xm: samples x features x classification matrix (e.g. trials x
# chans x time)
# in filenamey:
# y: vector indicating the class of each sample. Negative values
# will be used for generalization only. 0 indicates to-be-
# ignored samples.
# y2: cost/weights applied on each sample
# path: export directory
# nameX: export filename X
# namey: export filename y
# folding:type of folding(e.g. stratified)
# n_splits:number of splits
# n_folds: number of folds
# C: SVM penalization parameter
# compute_probas: compute logit fit
# compute_predict: compute traditional SVM
# fs_n: number of univariate features selected for classification
# dims: classification performed on dims dimensions
# dims_tg:classification generalized on dims_tg dimensions
#
# Ouputs:
# predict: prediction matrix (split x samples x dims x dimsg)
# predictg:same as predict for generalized samples
# probas: probas matrix (split x samples x dims x dimsg x class)
# probasg: same as probas for generalized samples
# coef: weight hyperplan vector
# all_folds:folding report (split x fold x samples)
# y_all: original y
# y: training y
# yg: generalized y
# filenameX:
# filenamey:
#
# Results are reported in: path + nameX + '_' + namey + "_results.mat"
###############################################################################
# (c) <NAME>: jeanremi.king [at] gmail [dot] com
###############################################################################
# update 2012 11 29: fix 3rd dimension issue
# update 2012 11 13: fix bug str output on some python versions
# update 2012 11 02: change stratified kfolding y by y2
# update 2012 11 02: add np.copy to Xtrain and Xtest
# update 2012 11 01: correct feature selection coef bug when at 100 %
# update 2012 10 23: correct leaveoneout bug
# update 2012 10 23: correct major n_split new_order error
# update 2012 10 18: correct python/matlab dim incompatibility
# update 2012 10 18: correct error fs between 99 and 100 && remove Kbest
# update 2012 10 17: correct error n_features shape and add nice
# update 2012 10 01: correct prediction error+change loading results option
# update 2012 09 14: handle fs float error
# update 2012 09 14: pass n_cores to sys.arg
# version 2012 09 13: implementation of parallelization
###############################################################################
print("LIBRARY")
import sys as sys
import numpy as np
from scipy import stats
from sklearn import svm
from sklearn.cross_validation import StratifiedKFold, LeaveOneOut, KFold
from sklearn.feature_selection import SelectPercentile, SelectKBest, f_classif
from sklearn.externals.joblib import Parallel, delayed
import scipy.io as sio
from sklearn.preprocessing import Scaler
import cudaica as ci # GPU
###############################################################################
print("INPUT DATA")
#-- get argument to load specific file
filenameX = str(sys.argv[1])
filenamey = str(sys.argv[2])
if len(sys.argv) <= 3:
n_cores = -1
else:
n_cores = int(sys.argv[3])
print("cores: " + str(n_cores))
print(filenameX)
print(filenamey)
#-- Load data into python
mat = sio.loadmat(filenameX)
Xm_all = mat["Xm"] # data
if np.size(Xm_all.shape) == 2: # fix 3rd dimension issue
X = np.zeros(np.append(Xm_all.shape, 1))
X[:, :, 0] = Xm_all
Xm_all = X
#-- load classification parameters
mat = sio.loadmat(filenamey)
dims = mat["dims"] # select time windows to compute
dims = np.reshape(dims, dims.size) - 1 # reshape for skl compatibility
dims_tg = mat["dims_tg"] - 1 # svm penalization parameter
mat = sio.loadmat(filenamey, squeeze_me=True)
path = mat["path"]
nameX = mat["nameX"]
namey = mat["namey"]
folding = mat["folding"]
n_splits = mat["n_splits"] # svm penalization parameter
n_folds = mat["n_folds"] # fold number
svm_C = mat["C"] # svm penalization parameter
compute_probas = mat["compute_probas"] # svm penalization parameter
compute_predict = mat["compute_predict"] # svm penalization parameter
fs_n = mat["fs"] # feature selection
y_all = mat["y"] # class used for train and test
print(Xm_all.shape)
print(y_all.shape)
y2_all = mat["y2"] # class used for sample weights
#-- build training and generalizing classes
Xm = Xm_all[y_all > 0, :, :] # training categories
Xmg = Xm_all[y_all < 0, :, :] # generalization categories
y = y_all[y_all > 0]
yg = y_all[y_all < 0]
y2 = y2_all[y_all > 0]
n_samples, n_features, unused = Xm.shape
n_samplesg, unused, unused = Xmg.shape
n_featuresg = n_features
n_dims = dims.shape[0]
n_dimsg = n_dims
n_dims_tg = dims_tg.shape[1]
n_dimsg_tg = dims_tg.shape[1]
n_classes = np.unique(y).shape[0]
#deal with sample_weight
sample_weight = np.ones(y.shape[0])
classes = np.unique(y2)
for c in range(classes.shape[0]):
sample_weight[y2 == classes[c]] = 1. / (np.sum(y2 == classes[c]))
###############################################################################
print("PREPARE CLASSIFICATION")
#-- classifier
clf = svm.SVC(kernel='linear', probability=True, C=svm_C)
#-- normalizer
scaler = Scaler()
#-- feature selection
if fs_n < 99.00:
fs = SelectPercentile(f_classif, percentile=fs_n)
elif fs_n > 99 and fs_n < 101:
fs = SelectKBest(f_classif, k=n_features)
else:
print("cfg.fs / fs_n must be > 0 and <= 100")
#-- results initialization
if compute_predict:
predict = np.zeros([n_splits, n_samples, n_dims, n_dims_tg]) ** np.nan
predictg = np.zeros([n_splits, n_samplesg, n_dimsg, n_dimsg_tg, n_folds]) ** np.nan
else:
predict = []
predictg = []
if compute_probas:
probas = np.zeros([n_splits, n_samples, n_dims, n_dims_tg, n_classes]) ** np.nan
probasg = np.zeros([n_splits, n_samplesg, n_dimsg, n_dimsg_tg, n_classes, n_folds]) ** np.nan
else:
probas = []
probasg = []
coef = np.empty([n_splits, n_folds, n_dims, n_classes * (n_classes - 1) / 2, n_features]) ** 0
all_folds = np.zeros([n_splits, n_folds, n_samples]) ** np.nan
###############################################################################
#-- Define parallel cross validation
def my_pipeline(train, test,
Xm_shfl, y_shfl, sw_shfl, Xmg,
dims, fs, scaler, clf,
n_samples, n_dims, n_dims_tg, n_classes, wts, sph):
# component transformation
[n_trials, n_features, n_samples] = Xm_shfl.shape
Xm_shfl = Xm_shfl.transpose([1, 2, 0])
Xm_shfl = np.reshape(Xm_shfl, [n_features, n_samples * n_trials])
Xm_shfl = sph * wts * Xm_shfl
Xm_shfl = np.reshape(Xm_shfl, [n_features, n_samples, n_trials])
Xm_shfl = Xm_shfl.transpose([2, 0, 1])
Xmg = Xmg.transpose([1, 2, 0])
Xmg = np.reshape(Xmg, [n_features, n_samples * n_trials])
Xmg = sph * wts * Xmg
Xmg = np.reshape(Xmg, [n_features, n_samples, n_trials])
Xmg = Xmg.transpose([2, 0, 1])
# indicate opened fold
sys.stdout.write("<")
sys.stdout.flush()
# initialize results within a given fold
if compute_predict:
predict = np.zeros([n_samples, n_dims, n_dims_tg]) ** np.nan
predictg = np.zeros([n_samplesg, n_dimsg, n_dimsg_tg]) ** np.nan
else:
predict = []
predictg = []
if compute_probas:
probas = np.zeros([n_samples, n_dims, n_dims_tg, n_classes]) ** np.nan
probasg = np.zeros([n_samplesg, n_dimsg, n_dimsg_tg, n_classes]) ** np.nan
else:
probas = []
probasg = []
coef = np.empty([n_dims, n_classes * (n_classes - 1) / 2, n_features]) ** 0
# apply different classification along dimension 0
for d in range(0, dims.shape[0]):
Xtrain = np.copy(Xm_shfl[train, :, dims[d]])
ytrain = y_shfl[train]
sw_train = sw_shfl[train]
# (deal with NaN samples in training)
ytrain = ytrain[~np.isnan(np.nansum(Xtrain, axis=1))]
sw_train = sw_train[~np.isnan(np.nansum(Xtrain, axis=1))]
Xtrain = Xtrain[~np.isnan(np.nansum(Xtrain, axis=1)), :]
if np.unique(ytrain).shape[0] > 1:
# feature selection
fs.fit(Xtrain, ytrain)
Xtrain = fs.transform(Xtrain)
# normalization
scaler.fit(Xtrain)
Xtrain = scaler.transform(Xtrain)
# SVM fit
clf.fit(Xtrain, ytrain, sample_weight=sw_train)
# retrieve features selected during univariate selection
if fs_n > 99 and fs_n < 101:
#uni_features = sorted(range(len(fs.pvalues_)),key=lambda x:fs.pvalues_[x])
uni_features = range(0, clf.coef_.shape[1])
else:
uni_features = fs.pvalues_ <= stats.scoreatpercentile(fs.pvalues_, fs.percentile)
# retrieve hyperplan (unselected features as 0)
coef[d, :, uni_features] = scaler.inverse_transform(clf.coef_).T
# generalize across all time points
for d_tg in range(0, n_dims_tg):
# select data
Xtest = np.copy(Xm_shfl[test, :, dims_tg[d, d_tg]])
# handles NaNs
test_nan = np.isnan(np.nansum(Xtest, axis=1))
Xtest = Xtest[~test_nan, :]
# feature selection from training
Xtest = fs.transform(Xtest)
# normalize from training
Xtest = scaler.transform(Xtest)
# generalize test samples
if (Xtest.shape[0] - np.sum(test_nan)) > 0:
if compute_predict:
predict[test[~test_nan], d, d_tg] = clf.predict(Xtest)
if compute_probas:
probas[test[~test_nan], d, d_tg, :] = clf.predict_proba(Xtest)
# predict on generalization sample
# select data
Xtestg = Xmg[:, :, dims_tg[d, d_tg]]
# handles NaNs
test_nan = np.isnan(np.nansum(Xtestg, axis=1))
if (Xtestg.shape[0] - np.sum(test_nan)) > 0:
Xtestg = Xtestg[~test_nan, :]
# preproc feature selection and normalization
Xtestg = fs.transform(Xtestg)
Xtestg = scaler.transform(Xtestg)
# compute prediction
if compute_predict:
predictg[~test_nan, d, d_tg] = clf.predict(Xtestg)
if compute_probas:
probasg[~test_nan, d, d_tg, :] = clf.predict_proba(Xtestg)
# summarize fold results
out = {
'coef': coef,
'predict': predict,
'predictg': predictg,
'probas': probas,
'probasg': probasg}
# indicate end of fold
sys.stdout.write(">")
sys.stdout.flush()
return out
###############################################################################
print("CLASSIFY")
#-- Shuffle split
for split in range(n_splits):
print("split " + str(split))
#-- shuffle order in case this is not the first split
new_order = np.array(range(y.shape[0]))
if split > 0:
np.random.shuffle(new_order)
y_shfl = np.copy(y)
y_shfl = y_shfl[new_order]
y2_shfl = np.copy(y2)
y2_shfl = y2_shfl[new_order]
Xm_shfl = np.copy(Xm)
Xm_shfl = Xm_shfl[new_order, :, :]
sw_shfl = np.copy(sample_weight)
sw_shfl = sw_shfl[new_order]
else:
y_shfl = np.copy(y)
y2_shfl = np.copy(y2)
Xm_shfl = np.copy(Xm)
sw_shfl = np.copy(sample_weight)
#-- define crossvalidation
if folding == 'stratified':
cv = StratifiedKFold(y2_shfl, k=n_folds)
elif folding == 'kfolding':
cv = KFold(n=y2_shfl.shape[0], k=n_folds)
elif folding == 'leaveoneout':
n_folds = y_shfl.shape[0]
cv = LeaveOneOut(n=y_shfl.shape[0])
else:
print("unknown crossvalidation method!")
# GPU transform
print "GPU ICA"
wtss = np.ndarray(shape=(n_features, n_features, n_folds), dtype=np.float64, order='F')
sphs = np.ndarray(shape=(n_features, n_features, n_folds), dtype=np.float64, order='F')
for fold, (train, test) in enumerate(cv):
print fold
| |
<filename>test/test_utils/ec2.py
import os
import time
import re
from inspect import signature
import boto3
from retrying import retry
from fabric import Connection
from botocore.config import Config
from botocore.exceptions import ClientError
from test.test_utils import is_pr_context, is_mainline_context
from . import DEFAULT_REGION, UL_AMI_LIST, LOGGER, BENCHMARK_RESULTS_S3_BUCKET
EC2_INSTANCE_ROLE_NAME = "ec2TestInstanceRole"
# List of instance types for which if instance spin-up fails, the test is skipped instead of failing.
ICE_SKIP_INSTANCE_LIST = ["p3dn.24xlarge"]
# List of instance types which are too powerful for minor tests
HEAVY_INSTANCE_LIST = ["p3dn.24xlarge", "p4d.24xlarge"]
def filter_only_multi_gpu(instance_type_list):
filtered_list = [
instance_type for instance_type in instance_type_list if get_instance_num_gpus(instance_type=instance_type) > 1
]
return filtered_list
def filter_only_single_gpu(instance_type_list):
filtered_list = [
instance_type for instance_type in instance_type_list if get_instance_num_gpus(instance_type=instance_type) == 1
]
return filtered_list
def filter_not_heavy_instance_types(instance_type_list):
filtered_list = [
instance_type for instance_type in instance_type_list if instance_type not in HEAVY_INSTANCE_LIST
]
return filtered_list
def get_ec2_instance_type(default, processor, filter_function=lambda x: x, efa=False, arch_type=""):
"""
Get EC2 instance type from associated EC2_[CPU|GPU]_INSTANCE_TYPE env variable, or set it to a default
for contexts where the variable is not present (i.e. PR, Nightly, local testing)
:param default: Default instance type to use - Should never be p3dn
:param processor: "cpu" or "gpu"
:param filter_function: filter_function(instance_type_list) A function that takes the list to be generated by
the logic of the get_ec2_instance_type function, and filters the list to only produce "acceptable" instances.
For example, this can be a function that only returns multi-gpu instance types from a given list of instance types.
:return: one item list of instance type -- this is used to parametrize tests, and parameter is required to be
a list.
"""
allowed_processors = ("cpu", "gpu", "neuron", "hpu")
if processor not in allowed_processors:
raise RuntimeError(
f"Aborting EC2 test run. Unrecognized processor type {processor}. "
f"Please choose from {allowed_processors}"
)
if default in HEAVY_INSTANCE_LIST and not efa:
raise RuntimeError(f"Default instance type should never be one of {HEAVY_INSTANCE_LIST}, but it is {default}")
instance_type = os.getenv(f"EC2_{processor.upper()}_INSTANCE_TYPE")
if arch_type == "graviton":
instance_type = os.getenv(f"EC2_{processor.upper()}_{arch_type.upper()}_INSTANCE_TYPE")
if not instance_type and is_mainline_context():
return []
instance_list = filter_function([instance_type] if instance_type else [])
if not instance_list:
instance_list = [default]
return instance_list
def get_ec2_accelerator_type(default, processor):
"""
Get EC2 instance type from associated EC2_EIA_INSTANCE_TYPE env variable, or set it to a default
for contexts where the variable is not present (i.e. PR, Nightly, local testing)
:param default: Default accelerator instance type to use
:param processor: "eia"
:return: one item list of instance type -- this is used to parametrize tests, and parameter is required to be
a list.
"""
allowed_processors = ("eia",)
if processor not in allowed_processors:
raise RuntimeError(
f"Aborting EC2 test run. Unrecognized processor type {processor}. "
f"Please choose from {allowed_processors}"
)
accelerator_type = os.getenv(f"EC2_{processor.upper()}_INSTANCE_TYPE")
if not accelerator_type:
if is_mainline_context():
return []
return [default]
return [accelerator_type]
def launch_instance(
ami_id,
instance_type,
ei_accelerator_type,
ec2_key_name=None,
region=DEFAULT_REGION,
user_data=None,
iam_instance_profile_name=None,
instance_name="",
):
"""
Launch an instance
:param ami_id: AMI ID to be used for launched instance
:param instance_type: Instance type of launched instance
:param region: Region where instance will be launched
:param user_data: Script to run when instance is launched as a str
:param iam_instance_profile_arn: EC2 Role to be attached
:param instance_name: Tag to display as Name on EC2 Console
:return: <dict> Information about the instance that was launched
"""
if not ami_id:
raise Exception("No ami_id provided")
if not ec2_key_name:
raise Exception("Ec2 Key name must be provided")
client = boto3.Session(region_name=region).client("ec2")
# Construct the dictionary with the arguments for API call
arguments_dict = {
"KeyName": ec2_key_name,
"ImageId": ami_id,
"InstanceType": instance_type,
"MaxCount": 1,
"MinCount": 1,
"TagSpecifications": [
{"ResourceType": "instance", "Tags": [{"Key": "Name", "Value": f"CI-CD {instance_name}"}],},
],
"BlockDeviceMappings": [{"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": 70,}}]
}
if user_data:
arguments_dict["UserData"] = user_data
if iam_instance_profile_name:
arguments_dict["IamInstanceProfile"] = {"Name": iam_instance_profile_name}
if ei_accelerator_type:
arguments_dict["ElasticInferenceAccelerators"] = ei_accelerator_type
availability_zones = {
"us-west": ["us-west-2a", "us-west-2b", "us-west-2c"],
"us-east": ["us-east-1a", "us-east-1b", "us-east-1c"],
}
for a_zone in availability_zones[region]:
arguments_dict["Placement"] = {"AvailabilityZone": a_zone}
try:
response = client.run_instances(**arguments_dict)
if response and len(response["Instances"]) >= 1:
break
except ClientError as e:
print(f"Failed to launch in {a_zone} with Error: {e}")
continue
else:
response = client.run_instances(**arguments_dict)
if not response or len(response["Instances"]) < 1:
raise Exception(
"Unable to launch the instance. \
Did not return any response"
)
return response["Instances"][0]
def get_ec2_client(region):
return boto3.client("ec2", region_name=region, config=Config(retries={"max_attempts": 10}))
def get_instance_from_id(instance_id, region=DEFAULT_REGION):
"""
Get instance information using instance ID
:param instance_id: Instance ID to be queried
:param region: Region where query will be performed
:return: <dict> Information about instance with matching instance ID
"""
if not instance_id:
raise Exception("No instance id provided")
client = boto3.Session(region_name=region).client("ec2")
instance = client.describe_instances(InstanceIds=[instance_id])
if not instance:
raise Exception(
"Unable to launch the instance. \
Did not return any reservations object"
)
return instance["Reservations"][0]["Instances"][0]
@retry(stop_max_attempt_number=16, wait_fixed=60000)
def get_public_ip(instance_id, region=DEFAULT_REGION):
"""
Get Public IP of instance using instance ID
:param instance_id: Instance ID to be queried
:param region: Region where query will be performed
:return: <str> IP Address of instance with matching instance ID
"""
instance = get_instance_from_id(instance_id, region)
if not instance["PublicIpAddress"]:
raise Exception("IP address not yet available")
return instance["PublicIpAddress"]
@retry(stop_max_attempt_number=16, wait_fixed=60000)
def get_public_ip_from_private_dns(private_dns, region=DEFAULT_REGION):
"""
Get Public IP of instance using private DNS
:param private_dns:
:param region:
:return: <str> IP Address of instance with matching private DNS
"""
client = boto3.Session(region_name=region).client("ec2")
response = client.describe_instances(Filters={"Name": "private-dns-name", "Value": [private_dns]})
return response.get("Reservations")[0].get("Instances")[0].get("PublicIpAddress")
@retry(stop_max_attempt_number=16, wait_fixed=60000)
def get_instance_user(instance_id, region=DEFAULT_REGION):
"""
Get "ubuntu" or "ec2-user" based on AMI used to launch instance
:param instance_id: Instance ID to be queried
:param region: Region where query will be performed
:return: <str> user name
"""
instance = get_instance_from_id(instance_id, region)
user = "ubuntu" if instance["ImageId"] in UL_AMI_LIST else "ec2-user"
return user
def get_instance_state(instance_id, region=DEFAULT_REGION):
"""
Get state of instance using instance ID
:param instance_id: Instance ID to be queried
:param region: Region where query will be performed
:return: <str> State of instance with matching instance ID
"""
instance = get_instance_from_id(instance_id, region)
return instance["State"]["Name"]
@retry(stop_max_attempt_number=16, wait_fixed=60000)
def check_instance_state(instance_id, state="running", region=DEFAULT_REGION):
"""
Compares the instance state with the state argument.
Retries 8 times with 120 seconds gap between retries.
:param instance_id: Instance ID to be queried
:param state: Expected instance state
:param region: Region where query will be performed
:return: <str> State of instance with matching instance ID
"""
instance_state = get_instance_state(instance_id, region)
if state != instance_state:
raise Exception(f"Instance {instance_id} not in {state} state")
return instance_state
def get_system_state(instance_id, region=DEFAULT_REGION):
"""
Returns health checks state for instances
:param instance_id: Instance ID to be queried
:param region: Region where query will be performed
:return: <tuple> System state and Instance state of instance with matching instance ID
"""
if not instance_id:
raise Exception("No instance id provided")
client = boto3.Session(region_name=region).client("ec2")
response = client.describe_instance_status(InstanceIds=[instance_id])
if not response:
raise Exception(
"Unable to launch the instance. \
Did not return any reservations object"
)
instance_status_list = response["InstanceStatuses"]
if not instance_status_list:
raise Exception(
"Unable to launch the instance. \
Did not return any reservations object"
)
if len(instance_status_list) < 1:
raise Exception(
"The instance id seems to be incorrect {}. \
reservations seems to be empty".format(
instance_id
)
)
instance_status = instance_status_list[0]
return (
instance_status["SystemStatus"]["Status"],
instance_status["InstanceStatus"]["Status"],
)
@retry(stop_max_attempt_number=96, wait_fixed=10000)
def check_system_state(instance_id, system_status="ok", instance_status="ok", region=DEFAULT_REGION):
"""
Compares the system state (Health Checks).
Retries 96 times with 10 seconds gap between retries
:param instance_id: Instance ID to be queried
:param system_status: Expected system state
:param instance_status: Expected instance state
:param region: Region where query will be performed
:return: <tuple> System state and Instance state of instance with matching instance ID
"""
instance_state = get_system_state(instance_id, region=region)
if system_status != instance_state[0] or instance_status != instance_state[1]:
raise Exception(
"Instance {} not in \
required state".format(
instance_id
)
)
return instance_state
def terminate_instance(instance_id, region=DEFAULT_REGION):
"""
Terminate EC2 instances with matching instance ID
:param instance_id: Instance ID to be terminated
:param region: Region where instance is located
"""
if not instance_id:
raise Exception("No instance id provided")
client = boto3.Session(region_name=region).client("ec2")
response = client.terminate_instances(InstanceIds=[instance_id])
if not response:
raise Exception("Unable to terminate instance. No response received.")
instances_terminated = response["TerminatingInstances"]
if not instances_terminated:
raise Exception("Failed to terminate instance.")
if instances_terminated[0]["InstanceId"] != instance_id:
raise Exception("Failed to terminate instance. Unknown error.")
def get_instance_type_details(instance_type, region=DEFAULT_REGION):
"""
Get instance type details for a given instance type
:param instance_type: Instance type to be queried
:param region: Region where query will be performed
:return: <dict> | |
#!/Users/fa/anaconda/bin/python
'''
Evaluation code for the SICK dataset (SemEval 2014 Task 1)
'''
import sys
#sys.path = ['../gensim', '../models', '../utils'] + sys.path
sys.path = ['../', '../featuremodels', '../utils', '../monolingual-word-aligner'] + sys.path
# Local imports
import gensim, utils
from featuremodels import models as md
import math
#from gensim.models.fastsent import FastSent
from string import punctuation
from sklearn.preprocessing import normalize
import sklearn
from gensim.models import Word2Vec
from gensim import utils, matutils
import numpy as np
import copy
from sklearn.metrics import mean_squared_error as mse
from scipy.stats import pearsonr
from scipy.stats import spearmanr
from sklearn.utils import shuffle
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.optimizers import Adam
from keras.models import model_from_json
from keras.models import load_model
from scipy.stats.stats import pearsonr
from sklearn import svm
from sklearn.linear_model import Ridge
import itertools
import pandas as pd
import pickle
import csv
## This flag is used to mark cases (sentences or sentence pairs) that a model cannot successfully vectorize
errorFlag = ["error flag"]
## lower case and removes punctuation from the input text
def process(s): return [i.lower().translate(None, punctuation).strip() for i in s]
## find features (a vector) describing the relation between two sentences
def pairFeatures(models, a,b):
print "using method pairFeatures!"
result = list()
for sentenceA,sentenceB in itertools.izip(a,b):
try:
vector = list()
for index , model in enumerate(models):
a = "".join(sentenceA.split()).lower()
b = "".join(sentenceB.split()).lower()
if a==b and isinstance(model, md.align):
part = [1.0, 1.0]
else:
part = model.pairFeatures(sentenceA, sentenceB)
vector.extend(part)
#print sentenceA, " & " , sentenceB , " Model " , index , ":" , part
result.append(vector)
except Exception, e:
#print("ERROR: " + sentenceA + " & " + sentenceB)
print "Couldn't do it: %s" % e
print "sentence A: %s" % sentenceA
result.append(errorFlag)
return result
def train(models, trainSet, devSet, df, seed=1234):
## Takes an input model that can calculate similarity features for sentence pairs
## Returns a linear regression classifier on provided (gold) similarity scores
#trainSet[0], trainSet[1], trainSet[2] = shuffle(trainSet[0], trainSet[1], trainSet[2], random_state=seed)
print 'Computing feature vectors directly through model.pairFeatures() ...'
trainF = np.asarray( pairFeatures([models[0]], process(trainSet[0]), process(trainSet[1])) )
trainY = encode_labels(trainSet[2])
index = [i for i, j in enumerate(trainF) if j == errorFlag]
trainF = np.asarray([x for i, x in enumerate(trainF) if i not in index])
trainY = np.asarray([x for i, x in enumerate(trainY) if i not in index])
trainAlign = np.asarray([x for i, x in enumerate(trainSet[2]) if i not in index])
trainS = np.asarray([x for i, x in enumerate(trainSet[0]) if i not in index])
#devF = np.asarray( pairFeatures(models, process(devSet[0]), process(devSet[1])) )
#devY = encode_labels(devSet[2])
#index = [i for i, j in enumerate(devF) if j == errorFlag]
#devF = np.asarray([x for i, x in enumerate(devF) if i not in index])
#devY = np.asarray([x for i, x in enumerate(devY) if i not in index])
#devS = np.asarray([x for i, x in enumerate(devSet[2]) if i not in index])
#devAlign = np.asarray([x for i, x in enumerate(devSet[2]) if i not in index])
## Tarin the ensemble model (linear SVR) on the predicted outputs from these models using the same data
currmodel = None
if isinstance(models[0], md.bow):
print 'Compiling Keras Logit model...'
lrmodel = prepare_model(dim= trainF.shape[1])#, ninputs=trainF.shape[0])
bestlrmodel = train_model(lrmodel, trainF, trainY, devF, devY, devS)
r = np.arange(1,6)
yhat = np.dot(bestlrmodel.predict_proba(devF, verbose=0), r)
pr = pearsonr(yhat, devS)[0]
sr = spearmanr(yhat, devS)[0]
se = mse(yhat, devS)
currmodel = bestlrmodel
df['bow'] = np.dot(bestlrmodel.predict_proba(trainF, verbose=0), r)
if isinstance(models[0], md.featureBased):
print 'Compiling FB svr model...'
bestsvrmodel = svm.SVR()
print(trainF.shape)
print(trainY.shape)
bestsvrmodel.fit(trainF, trainSet[2])
yhat = bestsvrmodel.predict(devF)
pr = pearsonr(yhat, devS)[0]
sr = spearmanr(yhat, devS)[0]
se = mse(yhat, devS)
currmodel = bestsvrmodel
df['fb'] = bestsvrmodel.predict(trainF)
if isinstance(models[0], md.align):
print 'Compiling word aligner model...'
alignermodel = svm.SVR()
print(trainF.shape)
alignermodel.fit(trainF, trainAlign)
currmodel = alignermodel
'''
bestRmodel = Ridge(alpha=1.0)
bestsvrmodel.fit(trainF, trainSet[2])
yhat = alignermodel.predict(devF)
pr = pearsonr(yhat, devS)[0]
sr = spearmanr(yhat, devS)[0]
se = mse(yhat, devS)
currmodel = alignermodel
df['aligner'] = alignermodel.predict(trainF)
df['target'] = trainAlign
'''
print("\n************ SUMMARY DEV***********")
print 'Train data size: ' + str(len(trainY))
#print 'Dev data size: ' + str(len(devY))
#print 'Dev Pearson: ' + str(pr)
#print 'Dev Spearman: ' + str(sr)
#print 'Dev MSE: ' + str(se)
print("********************************")
return currmodel, df
def test(models, classifier, testSet):
## Takes a linear regression classifier already trained for scoring similarity between two sentences based on the model
## Returns predicted scores for the input dataset together with error of calssification
print 'Computing feature vectors directly through model.pairFeatures() ...'
testF = np.asarray( pairFeatures(models, process(testSet[0]), process(testSet[1])) )
index = [i for i, j in enumerate(testF) if j == errorFlag]
testF = np.asarray([x for i, x in enumerate(testF) if i not in index])
testS = np.asarray([x for i, x in enumerate(testSet[2]) if i not in index])
if isinstance(models[0], md.bow):
r = np.arange(1,6)
yhat = np.dot(classifier.predict_proba(testF, verbose=0), r)
pr = pearsonr(yhat, testS)[0]
sr = spearmanr(yhat, testS)[0]
se = mse(yhat, testS)
else:
yhat = classifier.predict(testF)
pr = pearsonr(yhat, testS)[0]
sr = spearmanr(yhat, testS)[0]
se = mse(yhat, testS)
print("\n************ SUMMARY TEST***********")
print 'Test data size: ' + str(len(testS))
print 'Test Pearson: ' + str(pr)
print 'Test Spearman: ' + str(sr)
print 'Test MSE: ' + str(se)
print("********************************")
sentenceA = np.asarray([x for i, x in enumerate(process(testSet[0])) if i not in index])
sentenceB = np.asarray([x for i, x in enumerate(process(testSet[1])) if i not in index])
a = [ (sentenceA[i], sentenceB[i], testS[i], yhat[i], np.abs(testS[i] - yhat[i]) ) for i,s in enumerate(sentenceA) ]
b = pd.DataFrame(a, columns = ['target','response','score','prediction','error'])
#print(b.sort(['error', 'score']))
return b
def prepare_model(dim, nclass=5):
"""
Set up and compile the model architecture (Logistic regression)
"""
lrmodel = Sequential()
lrmodel.add(Dense(nclass, input_dim=dim)) #set this to twice the size of sentence vector or equal to the final feature vector size
lrmodel.add(Activation('softmax'))
lrmodel.compile(loss='categorical_crossentropy', optimizer='adam')
return lrmodel
def train_model(lrmodel, X, Y, devX, devY, devscores):
"""
Train model, using pearsonr on dev for early stopping
"""
done = False
best = -1.0
r = np.arange(1,6)
while not done:
# Every 100 epochs, check Pearson on development set
lrmodel.fit(X, Y, verbose=0, shuffle=False, validation_data=(devX, devY))
yhat = np.dot(lrmodel.predict_proba(devX, verbose=0), r)
score = pearsonr(yhat, devscores)[0]
if score > best:
print 'Dev Pearson: = ' + str(score)
best = score
## FA: commented out the following line because of the new keras version problem with deepcopy
## FA: not the model scored right after the best model will be returned (not too bad though, usually the difference is so small)
#bestlrmodel = copy.deepcopy(lrmodel)
else:
done = True
## FA: changed here:
#yhat = np.dot(bestlrmodel.predict_proba(devX, verbose=0), r)
yhat = np.dot(lrmodel.predict_proba(devX, verbose=0), r)
score = pearsonr(yhat, devscores)[0]
print 'Dev Pearson: ' + str(score)
## FA: changed here:
#return bestlrmodel
return lrmodel
def encode_labels(labels, nclass=5):
"""
Label encoding from Tree LSTM paper (Tai, Socher, Manning)
"""
Y = np.zeros((len(labels), nclass)).astype('float32')
for j, y in enumerate(labels):
for i in range(nclass):
if i+1 == np.floor(y) + 1:
Y[j,i] = y - np.floor(y)
if i+1 == np.floor(y):
Y[j,i] = np.floor(y) - y + 1
return Y
def load_data_SICK(loc='../data/SICK/'):
"""
Load the SICK semantic-relatedness dataset
"""
trainA, trainB, devA, devB, testA, testB = [],[],[],[],[],[]
trainS, devS, testS = [],[],[]
with open(loc + 'SICK_train.txt', 'rb') as f:
for line in f:
text = line.strip().split('\t')
trainA.append(text[1])
trainB.append(text[2])
trainS.append(text[3])
with open(loc + 'SICK_trial.txt', 'rb') as f:
for line in f:
text = line.strip().split('\t')
devA.append(text[1])
devB.append(text[2])
devS.append(text[3])
with open(loc + 'SICK_test_annotated.txt', 'rb') as f:
for line in f:
text = line.strip().split('\t')
testA.append(text[1])
testB.append(text[2])
testS.append(text[3])
trainS = [float(s) for s in trainS[1:]]
devS = [float(s) for s in devS[1:]]
testS = [float(s) for s in testS[1:]]
return [trainA[1:], trainB[1:], trainS], [devA[1:], devB[1:], devS], [testA[1:], testB[1:], testS]
def load_data_STS(loc='../data/SICK/'):
"""
Load the SICK semantic-relatedness dataset
"""
trainA, trainB, devA, devB, testA, testB = [],[],[],[],[],[]
trainS, devS, testS = [],[],[]
with open(loc + 'ftrain.csv', 'rb') as f:
for line in f:
text = line.strip().split('\t')
trainA.append(text[1])
trainB.append(text[2])
trainS.append(text[3])
with open(loc + 'SICK_trial.txt', 'rb') as f:
for line in f:
text = line.strip().split('\t')
devA.append(text[1])
devB.append(text[2])
devS.append(text[3])
with open(loc + 'tf2017.csv', 'rb') as f:
for line in f:
text = line.strip().split('\t')
testA.append(text[1])
testB.append(text[2])
testS.append(text[3])
trainS = pd.read_csv(loc + 'ftrain.csv', sep='\t').loc[:,'relatedness_score'].tolist()
devS = [float(s) for s in | |
# "x": "NEW", # Current execution type
# "X": "NEW", # Current order status
# "r": "NONE", # Order reject reason; will be an error code.
# "i": 4293153, # Order ID
# "l": "0.00000000", # Last executed quantity
# "z": "0.00000000", # Cumulative filled quantity
# "L": "0.00000000", # Last executed price
# "n": "0", # Commission amount
# "N": null, # Commission asset
# "T": 1499405658657, # Transaction time
# "t": -1, # Trade ID
# "I": 8641984, # Ignore
# "w": True, # Is the order on the book?
# "m": False, # Is self trade the maker side?
# "M": False, # Ignore
# "O": 1499405658657, # Order creation time
# "Z": "0.00000000", # Cumulative quote asset transacted quantity
# "Y": "0.00000000" # Last quote asset transacted quantity(i.e. lastPrice * lastQty),
# "Q": "0.00000000" # Quote Order Qty
# }
#
# future
#
# {
# "s":"BTCUSDT", # Symbol
# "c":"TEST", # Client Order Id
# # special client order id:
# # starts with "autoclose-": liquidation order
# # "adl_autoclose": ADL auto close order
# "S":"SELL", # Side
# "o":"TRAILING_STOP_MARKET", # Order Type
# "f":"GTC", # Time in Force
# "q":"0.001", # Original Quantity
# "p":"0", # Original Price
# "ap":"0", # Average Price
# "sp":"7103.04", # Stop Price. Please ignore with TRAILING_STOP_MARKET order
# "x":"NEW", # Execution Type
# "X":"NEW", # Order Status
# "i":8886774, # Order Id
# "l":"0", # Order Last Filled Quantity
# "z":"0", # Order Filled Accumulated Quantity
# "L":"0", # Last Filled Price
# "N":"USDT", # Commission Asset, will not push if no commission
# "n":"0", # Commission, will not push if no commission
# "T":1568879465651, # Order Trade Time
# "t":0, # Trade Id
# "b":"0", # Bids Notional
# "a":"9.91", # Ask Notional
# "m":false, # Is self trade the maker side?
# "R":false, # Is self reduce only
# "wt":"CONTRACT_PRICE", # Stop Price Working Type
# "ot":"TRAILING_STOP_MARKET", # Original Order Type
# "ps":"LONG", # Position Side
# "cp":false, # If Close-All, pushed with conditional order
# "AP":"7476.89", # Activation Price, only puhed with TRAILING_STOP_MARKET order
# "cr":"5.0", # Callback Rate, only puhed with TRAILING_STOP_MARKET order
# "rp":"0" # Realized Profit of the trade
# }
#
executionType = self.safe_string(order, 'x')
orderId = self.safe_string(order, 'i')
marketId = self.safe_string(order, 's')
symbol = self.safe_symbol(marketId)
timestamp = self.safe_integer(order, 'O')
T = self.safe_integer(order, 'T')
lastTradeTimestamp = None
if executionType == 'NEW':
if timestamp is None:
timestamp = T
elif executionType == 'TRADE':
lastTradeTimestamp = T
fee = None
feeCost = self.safe_float(order, 'n')
if (feeCost is not None) and (feeCost > 0):
feeCurrencyId = self.safe_string(order, 'N')
feeCurrency = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
price = self.safe_float(order, 'p')
amount = self.safe_float(order, 'q')
side = self.safe_string_lower(order, 'S')
type = self.safe_string_lower(order, 'o')
filled = self.safe_float(order, 'z')
cumulativeQuote = self.safe_float(order, 'Z')
remaining = amount
average = self.safe_float(order, 'ap')
cost = cumulativeQuote
if filled is not None:
if cost is None:
if price is not None:
cost = filled * price
if amount is not None:
remaining = max(amount - filled, 0)
if (average is None) and (cumulativeQuote is not None) and (filled > 0):
average = cumulativeQuote / filled
rawStatus = self.safe_string(order, 'X')
status = self.parse_order_status(rawStatus)
trades = None
clientOrderId = self.safe_string(order, 'C')
if (clientOrderId is None) or (len(clientOrderId) == 0):
clientOrderId = self.safe_string(order, 'c')
stopPrice = self.safe_float_2(order, 'P', 'sp')
timeInForce = self.safe_string(order, 'f')
return {
'info': order,
'symbol': symbol,
'id': orderId,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'type': type,
'timeInForce': timeInForce,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': trades,
}
def handle_order_update(self, client, message):
#
# spot
#
# {
# "e": "executionReport", # Event type
# "E": 1499405658658, # Event time
# "s": "ETHBTC", # Symbol
# "c": "mUvoqJxFIILMdfAW5iGSOW", # Client order ID
# "S": "BUY", # Side
# "o": "LIMIT", # Order type
# "f": "GTC", # Time in force
# "q": "1.00000000", # Order quantity
# "p": "0.10264410", # Order price
# "P": "0.00000000", # Stop price
# "F": "0.00000000", # Iceberg quantity
# "g": -1, # OrderListId
# "C": null, # Original client order ID; This is the ID of the order being canceled
# "x": "NEW", # Current execution type
# "X": "NEW", # Current order status
# "r": "NONE", # Order reject reason; will be an error code.
# "i": 4293153, # Order ID
# "l": "0.00000000", # Last executed quantity
# "z": "0.00000000", # Cumulative filled quantity
# "L": "0.00000000", # Last executed price
# "n": "0", # Commission amount
# "N": null, # Commission asset
# "T": 1499405658657, # Transaction time
# "t": -1, # Trade ID
# "I": 8641984, # Ignore
# "w": True, # Is the order on the book?
# "m": False, # Is self trade the maker side?
# "M": False, # Ignore
# "O": 1499405658657, # Order creation time
# "Z": "0.00000000", # Cumulative quote asset transacted quantity
# "Y": "0.00000000" # Last quote asset transacted quantity(i.e. lastPrice * lastQty),
# "Q": "0.00000000" # Quote Order Qty
# }
#
# future
#
# {
# "e":"ORDER_TRADE_UPDATE", # Event Type
# "E":1568879465651, # Event Time
# "T":1568879465650, # Trasaction Time
# "o": {
# "s":"BTCUSDT", # Symbol
# "c":"TEST", # Client Order Id
# # special client order id:
# # starts with "autoclose-": liquidation order
# # "adl_autoclose": ADL auto close order
# "S":"SELL", # Side
# "o":"TRAILING_STOP_MARKET", # Order Type
# "f":"GTC", # Time in Force
# "q":"0.001", # Original Quantity
# "p":"0", # Original Price
# "ap":"0", # Average Price
# "sp":"7103.04", # Stop Price. Please ignore with TRAILING_STOP_MARKET order
# "x":"NEW", # Execution Type
# "X":"NEW", # Order Status
# "i":8886774, # Order Id
# "l":"0", # Order Last Filled Quantity
# "z":"0", # Order Filled Accumulated Quantity
# "L":"0", # Last Filled Price
# "N":"USDT", # Commission Asset, will not push if no commission
# "n":"0", # Commission, will not push if no commission
# "T":1568879465651, # Order Trade Time
# "t":0, # Trade Id
# "b":"0", # Bids Notional
# "a":"9.91", # Ask Notional
# "m":false, # Is self trade the maker side?
# "R":false, # Is self reduce only
# "wt":"CONTRACT_PRICE", # Stop Price Working Type
# "ot":"TRAILING_STOP_MARKET", # Original Order Type
# "ps":"LONG", # Position Side
# "cp":false, # If Close-All, pushed with conditional order
# "AP":"7476.89", # Activation Price, only puhed with TRAILING_STOP_MARKET order
# "cr":"5.0", # Callback Rate, only puhed with TRAILING_STOP_MARKET order
# "rp":"0" # Realized Profit of the trade
# }
# }
#
e = self.safe_string(message, 'e')
if e == 'ORDER_TRADE_UPDATE':
message = self.safe_value(message, 'o', message)
self.handle_my_trade(client, message)
self.handle_order(client, message)
async def watch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
await self.authenticate(params)
defaultType = self.safe_string_2(self.options, 'watchMyTrades', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
url = self.urls['api']['ws'][type] + '/' + self.options[type]['listenKey']
messageHash = 'myTrades'
if symbol is not None:
messageHash += ':' + symbol
client = self.client(url)
self.set_balance_cache(client, type)
message = None
trades = await self.watch(url, messageHash, message, type)
if self.newUpdates:
limit = trades.getLimit(symbol, limit)
return self.filter_by_symbol_since_limit(trades, symbol, since, limit, True)
def handle_my_trade(self, client, message):
messageHash = 'myTrades'
executionType = self.safe_string(message, 'x')
if executionType == 'TRADE':
trade = self.parse_trade(message)
orderId = self.safe_string(trade, 'order')
tradeFee = self.safe_value(trade, 'fee')
symbol = self.safe_string(trade, 'symbol')
if orderId is not None and tradeFee is not None and symbol is not None:
cachedOrders = self.orders
if cachedOrders is not None:
orders = self.safe_value(cachedOrders.hashmap, symbol, {})
order = self.safe_value(orders, orderId)
if order is not None:
# accumulate order fees
fees = self.safe_value(order, 'fees')
fee = self.safe_value(order, 'fee')
if fees is not None:
insertNewFeeCurrency = True
for i in range(0, len(fees)):
orderFee = fees[i]
if orderFee['currency'] | |
"""This module contains functions relevant to the ALARA activation code and the Chebyshev Rational Approximation Method
"""
from __future__ import print_function
from pyne.xs.data_source import SimpleDataSource
from pyne.data import N_A, decay_const, decay_children, branch_ratio
from pyne.nucname import serpent, alara, znum, anum
from pyne import nucname
from pyne.material import Material, from_atom_frac
from pyne.mesh import Mesh, MeshError, HAVE_PYMOAB
import os
import collections
from warnings import warn
from pyne.utils import QAWarning, to_sec
import numpy as np
import tables as tb
warn(__name__ + " is not yet QA compliant.", QAWarning)
try:
basestring
except NameError:
basestring = str
if HAVE_PYMOAB:
from pyne.mesh import mesh_iterate
else:
warn("The PyMOAB optional dependency could not be imported. "
"Some aspects of the mesh module may be incomplete.", QAWarning)
def mesh_to_fluxin(flux_mesh, flux_tag, fluxin="fluxin.out",
reverse=False, sub_voxel=False, cell_fracs=None,
cell_mats=None):
"""This function creates an ALARA fluxin file from fluxes tagged on a PyNE
Mesh object. Fluxes are printed in the order of the flux_mesh.__iter__().
Parameters
----------
flux_mesh : PyNE Mesh object
Contains the mesh with fluxes tagged on each volume element.
flux_tag : string
The name of the tag of the flux mesh. Flux values for different energy
groups are assumed to be represented as vector tags.
fluxin : string
The name of the ALARA fluxin file to be output.
reverse : bool
If true, fluxes will be printed in the reverse order as they appear in
the flux vector tagged on the mesh.
sub_voxel: bool, optional
If true, sub-voxel r2s work flow will be sued. Flux of a voxel will
be duplicated c times. Where c is the cell numbers of that voxel.
cell_fracs : structured array, optional
The output from dagmc.discretize_geom(). A sorted, one dimensional
array, each entry containing the following fields:
:idx: int
The volume element index.
:cell: int
The geometry cell number.
:vol_frac: float
The volume fraction of the cell withing the mesh ve.
:rel_error: float
The relative error associated with the volume fraction.
The array must be sorted with respect to both idx and cell, with
cell changing fastest.
cell_mats : dict, optional
Maps geometry cell numbers to PyNE Material objects.
The cell_fracs and cell_mats are used only when sub_voxel=True.
If sub_voxel=False, neither cell_fracs nor cell_mats will be used.
"""
tag_flux = flux_mesh.get_tag(flux_tag)
# find number of e_groups
e_groups = tag_flux[list(mesh_iterate(flux_mesh.mesh))[0]]
e_groups = np.atleast_1d(e_groups)
num_e_groups = len(e_groups)
# Establish for loop bounds based on if forward or backward printing
# is requested
if not reverse:
start = 0
stop = num_e_groups
direction = 1
else:
start = num_e_groups - 1
stop = -1
direction = -1
output = ""
if not sub_voxel:
for i, mat, ve in flux_mesh:
# print flux data to file
output = _output_flux(ve, tag_flux, output, start, stop, direction)
else:
ves = list(flux_mesh.iter_ve())
for row in cell_fracs:
if len(cell_mats[row['cell']].comp) != 0:
output = _output_flux(ves[row['idx']], tag_flux, output, start,
stop, direction)
with open(fluxin, "w") as f:
f.write(output)
def photon_source_to_hdf5(filename, chunkshape=(10000,)):
"""Converts a plaintext photon source file to an HDF5 version for
quick later use.
This function produces a single HDF5 file named <filename>.h5 containing the
table headings:
idx : int
The volume element index assuming the volume elements appear in xyz
order (z changing fastest) within the photon source file in the case of
a structured mesh or mesh.mesh_iterate() order for an unstructured mesh.
nuc : str
The nuclide name as it appears in the photon source file.
time : str
The decay time as it appears in the photon source file.
phtn_src : 1D array of floats
Contains the photon source density for each energy group.
Parameters
----------
filename : str
The path to the file
chunkshape : tuple of int
A 1D tuple of the HDF5 chunkshape.
"""
f = open(filename, 'r')
header = f.readline().strip().split('\t')
f.seek(0)
G = len(header) - 2
dt = np.dtype([
('idx', np.int64),
('nuc', 'S6'),
('time', 'S20'),
('phtn_src', np.float64, G),
])
filters = tb.Filters(complevel=1, complib='zlib')
h5f = tb.open_file(filename + '.h5', 'w', filters=filters)
tab = h5f.create_table('/', 'data', dt, chunkshape=chunkshape)
chunksize = chunkshape[0]
rows = np.empty(chunksize, dtype=dt)
idx = 0
old = ""
for i, line in enumerate(f, 1):
ls = line.strip().split('\t')
# Keep track of the idx by delimiting by the last TOTAL line in a
# volume element.
if ls[0] != 'TOTAL' and old == 'TOTAL':
idx += 1
j = (i-1) % chunksize
rows[j] = (idx, ls[0].strip(), ls[1].strip(),
np.array(ls[2:], dtype=np.float64))
# Save the nuclide in order to keep track of idx
old = ls[0]
if i % chunksize == 0:
tab.append(rows)
rows = np.empty(chunksize, dtype=dt)
if i % chunksize != 0:
tab.append(rows[:j+1])
h5f.close()
f.close()
def photon_source_hdf5_to_mesh(mesh, filename, tags, sub_voxel=False,
cell_mats=None):
"""This function reads in an hdf5 file produced by photon_source_to_hdf5
and tags the requested data to the mesh of a PyNE Mesh object. Any
combinations of nuclides and decay times are allowed. The photon source
file is assumed to be in mesh.__iter__() order
Parameters
----------
mesh : PyNE Mesh
The object containing the PyMOAB instance to be tagged.
filename : str
The path of the hdf5 version of the photon source file.
tags: dict
A dictionary were the keys are tuples with two values. The first is a
string denoting an nuclide in any form that is understood by
pyne.nucname (e.g. '1001', 'U-235', '242Am') or 'TOTAL' for all
nuclides. The second is a string denoting the decay time as it appears
in the file (e.g. 'shutdown', '1 h' '3 d'). The values of the
dictionary are the requested tag names for the combination of nuclide
and decay time. For example if one wanted tags for the photon source
densities from U235 at shutdown and from all nuclides at 1 hour, the
dictionary could be:
tags = {('U-235', 'shutdown') : 'tag1', ('TOTAL', '1 h') : 'tag2'}
sub_voxel: bool, optional
If the sub_voxel is True, then the sub-voxel r2s will be used.
Then the photon_source will be interpreted as sub-voxel photon source.
cell_mats : dict, optional
cell_mats is required when sub_voxel is True.
Maps geometry cell numbers to PyNE Material objects.
"""
# find number of energy groups
with tb.open_file(filename) as h5f:
num_e_groups = len(h5f.root.data[0][3])
max_num_cells = 1
ve0 = next(mesh.iter_ve())
if sub_voxel:
num_vol_elements = len(mesh)
subvoxel_array = _get_subvoxel_array(mesh, cell_mats)
# get max_num_cells
max_num_cells = len(np.atleast_1d(mesh.cell_number[ve0]))
# create a dict of tag handles for all keys of the tags dict
tag_handles = {}
tag_size = num_e_groups * max_num_cells
for tag_name in tags.values():
mesh.tag(tag_name, np.zeros(tag_size, dtype=float), 'nat_mesh',
size=tag_size, dtype=float)
tag_handles[tag_name] = mesh.get_tag(tag_name)
# creat a list of decay times (strings) in the source file
phtn_src_dc = []
with tb.open_file(filename) as h5f:
for row in h5f.root.data:
phtn_src_dc.append(row[2])
phtn_src_dc = list(set(phtn_src_dc))
# iterate through each requested nuclide/dectay time
for cond in tags.keys():
with tb.open_file(filename) as h5f:
# Convert nuclide to the form found in the ALARA phtn_src
# file, which is similar to the Serpent form. Note this form is
# different from the ALARA input nuclide form found in nucname.
if cond[0] != "TOTAL":
nuc = serpent(cond[0]).lower()
else:
nuc = "TOTAL"
# time match, convert string mathch to float mathch
dc = _find_phsrc_dc(cond[1], phtn_src_dc)
# create of array of rows that match the nuclide/decay criteria
matched_data = h5f.root.data.read_where(
"(nuc == '{0}') & (time == '{1}')".format(nuc, dc))
if not sub_voxel:
idx = 0
for i, _, ve in mesh:
if matched_data[idx][0] == i:
tag_handles[tags[cond]][ve] = matched_data[idx][3]
idx += 1
else:
tag_handles[tags[cond]][ve] = [0] * num_e_groups
else:
temp_mesh_data = np.empty(
shape=(num_vol_elements, max_num_cells, num_e_groups),
dtype=float)
temp_mesh_data.fill(0.0)
for sve, subvoxel in enumerate(subvoxel_array):
temp_mesh_data[subvoxel['idx'], subvoxel['scid'], :] = \
matched_data[sve][3][:]
for i, _, ve in mesh:
tag_handles[tags[cond]][ve] = \
temp_mesh_data[i, :].reshape(max_num_cells * num_e_groups)
def record_to_geom(mesh, cell_fracs, cell_mats, geom_file, matlib_file,
sig_figs=6, sub_voxel=False):
"""This function preforms the same task as alara.mesh_to_geom, except the
geometry is on the basis of the stuctured array output of
dagmc.discretize_geom rather than a PyNE material object with materials.
This allows for more efficient ALARA runs by minimizing the number of
materials in the ALARA matlib. This is done by treating mixtures that are
equal up to <sig_figs> digits to be the same mixture | |
<filename>inselect/gui/main_window.py<gh_stars>100-1000
import sys
from datetime import datetime
from functools import partial
from itertools import count
from pathlib import Path
from PyQt5 import QtWidgets
from PyQt5.QtCore import (Qt, QEvent, QSettings, QItemSelection,
QItemSelectionModel, QStandardPaths)
from PyQt5.QtGui import (QColor, QFont, QIcon, QImageWriter, QKeySequence,
QPixmap)
from PyQt5.QtWidgets import (QAction, QActionGroup, QFileDialog, QLabel,
QMainWindow, QMenu, QMessageBox, QSizePolicy,
QSplitter, QStackedWidget, QVBoxLayout, QWidget)
# This import is to register our icon resources with QT
import inselect.gui.icons # noqa
from inselect.lib.document import InselectDocument
from inselect.lib.document_export import DocumentExport
from inselect.lib.ingest import ingest_image, IMAGE_PATTERNS, IMAGE_SUFFIXES_RE
from inselect.lib.inselect_error import InselectError
from inselect.lib.utils import debug_print, is_writable
from . import prompts
from .about import show_about_box
from .colours import colour_scheme_choice
from .cookie_cutter_choice import cookie_cutter_choice
from .cookie_cutter_widget import CookieCutterWidget
from .format_validation_problems import format_validation_problems
from .info_widget import InfoWidget
from .model import Model
from .navigator import NavigatorView
from .plugins.barcode import BarcodePlugin
from .plugins.segment import SegmentPlugin
from .plugins.subsegment import SubsegmentPlugin
from .recent_documents import RecentDocuments
from .toolbar_ribbon import ToolbarRibbon
from .roles import RotationRole
from .shortcuts_help import show_shortcuts, show_shortcuts_post_startup
from .sidebar import SideBar
from .sort_document_items import sort_items_choice
from .user_template_choice import user_template_choice
from .utils import contiguous, qimage_of_bgr, load_icon
from .views.boxes import BoxesView, GraphicsItemView
from .views.metadata import MetadataView
from .views.object import ObjectView
from .views.selector import SelectorView
from .views.summary import SummaryView
from .worker_thread import WorkerThread
class MainWindow(QMainWindow):
"""The application's main window
"""
DOCUMENT_FILE_FILTER = 'Inselect documents (*{0});;Images ({1})'.format(
InselectDocument.EXTENSION,
' '.join(IMAGE_PATTERNS)
)
IMAGE_FILE_FILTER = 'Images ({0})'.format(' '.join(IMAGE_PATTERNS))
def __init__(self, print_time=False):
"""if print_time is True, will print, when a document is closed, the
elapsed time for which the document was open.
"""
super(MainWindow, self).__init__()
self.print_time = print_time
# Document
self.document = None
self.document_path = None
# Long-running operations are run in their own thread
self.running_operation = None
self.time_doc_opened = None
# self.setWindowFlags(Qt.Window | Qt.FramelessWindowHint)
# Plugins
self.plugins = (SegmentPlugin, SubsegmentPlugin, BarcodePlugin)
# QActions. Populated in self.create_menu_actions()
self.plugin_actions = len(self.plugins) * [None]
# QActions. Populated in self.create_menu_actions()
self.plugin_config_ui_actions = len(self.plugins) * [None]
self.plugin_image = None
self.plugin_image_visible = False
# Colour scheme QActions. Populated in self._create_menu_actions() and
# self._create_non_menu_actions()
self.colour_scheme_actions = []
# Model
self.model = Model()
self.model.modified_changed.connect(self.modified_changed)
self._create_menu_actions()
self._create_non_menu_actions()
self._create_views()
self._create_widgets()
self._create_toolbars()
self._create_menus()
# Conect signals
self.ribbon.currentChanged.connect(self.current_tab_changed)
colour_scheme_choice().colour_scheme_changed.connect(
self.colour_scheme_changed
)
self.boxes_view.viewport_changed.connect(
self.view_navigator.navigator.new_focus_rect
)
# TODO LH Syncing the UI everytime the boxes view's viewport changes
# is inefficient. We only need to set the checked states of
# self.zoom_to_selection_action and
# self.zoom_home_action.setChecked as the viewport changes.
self.boxes_view.viewport_changed.connect(
self.sync_ui
)
self.view_object.selectionModel().selectionChanged.connect(
self.selection_changed
)
# Main window layout
self.setCentralWidget(self.central)
# Event filters, for handling drag and drop
self.ribbon.installEventFilter(self)
self.boxes_view.installEventFilter(self)
self.view_metadata.installEventFilter(self)
self.view_object.installEventFilter(self)
self.view_summary.widget.installEventFilter(self)
self.view_selector.installEventFilter(self)
self.view_navigator.widget.installEventFilter(self)
self.setAcceptDrops(True)
self.sync_status_message()
self.empty_document()
def _create_views(self):
"Creates view objects"
# Boxes view
self.view_graphics_item = GraphicsItemView()
# self.boxes_view is a QGraphicsView, not a QAbstractItemView
self.boxes_view = BoxesView(self.view_graphics_item.scene)
# Object, metadata and summary views
self.view_metadata = MetadataView()
self.view_object = ObjectView()
self.view_summary = SummaryView()
self.view_selector = SelectorView()
self.view_navigator = NavigatorView()
# Set model
self.view_graphics_item.setModel(self.model)
self.view_metadata.setModel(self.model)
self.view_object.setModel(self.model)
self.view_summary.setModel(self.model)
self.view_selector.setModel(self.model)
self.view_navigator.setModel(self.model)
# A consistent selection across all views
sm = self.view_object.selectionModel()
self.view_graphics_item.setSelectionModel(sm)
self.view_metadata.setSelectionModel(sm)
self.view_summary.setSelectionModel(sm)
self.view_selector.setSelectionModel(sm)
self.view_navigator.setSelectionModel(sm)
def _create_widgets(self):
"Creates widgets owned by the MainWindow"
# Ribbon of toolbars - populated in self._create_toolbars
# TODO get these colours from stylesheet
self.ribbon = ToolbarRibbon(
QColor(0x4f, 0x4f, 0x4f),
QColor(0xdd, 0xdd, 0xdd)
)
font = self.ribbon.font()
font.setStyleStrategy(QFont.PreferAntialias)
font = self.ribbon.setFont(font)
# Views in a stack
self.views = QStackedWidget()
self.views.addWidget(self.boxes_view)
self.views.addWidget(self.view_object)
# Information about the loaded document
self.info_widget = InfoWidget()
# Side bar containing navigator, metadata and document information
sidebar_layout = QVBoxLayout()
sidebar_layout.addWidget(self.view_navigator.widget)
sidebar_layout.addWidget(self.view_metadata.widget)
sidebar_layout.addWidget(self.info_widget)
sidebar_layout.setSpacing(2)
sidebar_layout.setContentsMargins(0, 0, 0, 0)
# Empty widget with stretch to prevent other widgets from exanding to
# fill
sidebar_layout.addWidget(QWidget(), stretch=1)
sidebar_widget = QWidget()
sidebar_widget.setLayout(sidebar_layout)
sidebar = SideBar()
sidebar.setWidget(sidebar_widget)
# QStatusBar places temporary message at bottom left, which is not
# the behaviour that we require, so create a permanent QLabel to hold
# status messages.
self.status_message = QLabel()
self.status_message.setAlignment(Qt.AlignVCenter | Qt.AlignHCenter)
self.status_message.setSizePolicy(
QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
)
# Setup status bar
status_bar = self.statusBar()
status_bar.setSizeGripEnabled(False)
status_bar.addPermanentWidget(self.view_summary.widget)
status_bar.addPermanentWidget(self.status_message, stretch=1)
# Stack of views, side bar
self.splitter = QSplitter()
self.splitter.addWidget(self.views)
self.splitter.addWidget(sidebar)
self.splitter.setSizes([600, 200])
# Cookie cutter widget - contained within toolbar
self.cookie_cutter_widget = CookieCutterWidget()
self.cookie_cutter_widget.save_to_new_action.triggered.connect(
self.save_to_cookie_cutter
)
self.cookie_cutter_widget.apply_current_action.triggered.connect(
self.apply_cookie_cutter
)
cookie_cutter_choice().cookie_cutter_changed.connect(
self.new_cookie_cutter
)
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
layout.addWidget(self.ribbon)
layout.addWidget(self.splitter)
self.central = QWidget()
self.central.setLayout(layout)
def modified_changed(self):
"Updated UI's modified state"
debug_print('MainWindow.modified_changed')
self.setWindowModified(self.model.is_modified)
def eventFilter(self, obj, event):
"Event filter that accepts drag-drop events"
if event.type() in (QEvent.DragEnter, QEvent.Drop):
return True
else:
return super(MainWindow, self).eventFilter(obj, event)
def open_file(self, path=None):
"""Opens path, which can be None, the path to an inselect document or
the path to an image file. If None, the user is prompted to select a
file.
* If a .inselect file, the file is opened
* If an image file for which a .inselect document already exists, the
.inselect file is opened
* If a _thumbnail.jpg file corresponding to an existing .inselect file,
the .inselect file is opened
* If an image file, a new .inselect file is created and opened
"""
debug_print('MainWindow.open_file [{0}]'.format(path))
if not path:
folder = QSettings().value(
'working_directory',
QStandardPaths.writableLocation(QStandardPaths.DocumentsLocation)
)
path, selectedFilter = QFileDialog.getOpenFileName(
self, "Open", folder, self.DOCUMENT_FILE_FILTER
)
# path will be None if user cancelled getOpenFileName
if path:
path = Path(path)
# What type of file did the user select?
document_path = image_path = None
if InselectDocument.EXTENSION == path.suffix:
# An inselect document
document_path = path
elif IMAGE_SUFFIXES_RE.match(path.name):
# Compute the path to the inselect document (which may or
# may not already exist) of the image file
doc_of_image = path.name.replace(InselectDocument.THUMBNAIL_SUFFIX, '')
doc_of_image = path.parent / doc_of_image
doc_of_image = doc_of_image.with_suffix(InselectDocument.EXTENSION)
if doc_of_image.is_file():
# An image file corresponding to an existing .inselect file
document_path = doc_of_image
else:
# An image file
image_path = path
if not self.close_document(document_to_open=document_path):
# User does not want to close the existing document
pass
else:
# Process messages after closing to redraw the UI.
QtWidgets.qApp.processEvents()
if document_path:
# Open the .inselect document
debug_print('Opening inselect document [{0}]'.format(document_path))
self.open_document(path=document_path)
elif image_path:
msg = 'Creating new inselect document for image [{0}]'
debug_print(msg.format(image_path))
self.new_document(image_path)
else:
raise InselectError('Unknown file type [{0}]'.format(path))
def new_document(self, path, default_metadata_items=None):
"""Creates and opens a new inselect document for the scanned image
given in path
"""
debug_print('MainWindow.new_document [{0}]'.format(path))
path = Path(path)
if not path.is_file():
raise InselectError('Image file [{0}] does not exist'.format(path))
else:
# Callable for worker thread
thumbnail_width = user_template_choice().current.thumbnail_width_pixels
class NewDoc(object):
def __init__(self, image, default_metadata_items):
self.image = image
self.default_metadata_items = default_metadata_items
self.document = None
def __call__(self, progress):
progress('Creating thumbnail of scanned image')
doc = ingest_image(self.image, self.image.parent,
thumbnail_width,
self.default_metadata_items,
cookie_cutter_choice().current)
self.document = doc
self.run_in_worker(NewDoc(path, default_metadata_items),
'New document',
self.new_document_finished)
def new_document_finished(self, operation):
"""Called when new_document worker has finished
"""
debug_print('MainWindow.new_document_finished')
document = operation.document
document_path = document.document_path
QSettings().setValue('working_directory', str(document_path.parent))
self.open_document(document=document)
msg = 'New Inselect document [{0}] created in [{1}]'
msg = msg.format(document_path.stem, document_path.parent)
QMessageBox.information(self, "Document created", msg)
def _sync_recent_documents_actions(self):
"Synchronises the 'recent documents' actions"
debug_print('MainWindow._sync_recent_documents_actions')
recent = RecentDocuments().read_paths()
if not recent:
# No recent documents - a single disabled action with placeholder
# text
self.recent_doc_actions[0].setEnabled(False)
self.recent_doc_actions[0].setText('No recent documents')
self.recent_doc_actions[0].setVisible(True)
hide_actions_after = 1
elif len(recent) > len(self.recent_doc_actions):
msg = 'Unexpected number of recent documents [{0}]'
raise ValueError(msg.format(len(recent)))
else:
# Show as many actions as there are recent documents
for index, path, action in zip(count(), recent, self.recent_doc_actions):
action.setEnabled(True)
action.setText(path.stem)
action.setToolTip(str(path))
action.setVisible(True)
hide_actions_after = 1 + index
# Hide all actions after and including 'hide_actions_after'
for action in self.recent_doc_actions[hide_actions_after:]:
action.setVisible(False)
action.setText('')
def open_recent(self, checked=False, index=0):
debug_print('MainWindow._open_recent [{0}]'.format(index))
recent = RecentDocuments().read_paths()
self.open_file(path=recent[index])
def open_document(self, path=None, document=None):
"""Either loads the inselect document from path or uses the existing
InselectDocument given in document.
"""
if path and document:
raise ValueError('Both path and document given')
if path:
path = Path(path)
document = InselectDocument.load(path)
else:
path = document.document_path
debug_print('MainWindow.open_document [{0}]'.format(path))
QSettings().setValue("working_directory", str(path.parent))
self.model.from_document(document)
self.document = document
self.document_path = path
self.time_doc_opened = datetime.utcnow()
self.setWindowTitle('')
self.setWindowFilePath(str(self.document_path))
self.info_widget.set_document(self.document)
RecentDocuments().add_path(path)
self._sync_recent_documents_actions()
self.zoom_home()
self.sync_ui()
if not is_writable(path):
msg = ('The file [{0}] is read-only.\n\n'
'You will not be able to save any changes that you make.')
msg = msg.format(path.name)
QMessageBox.warning(self, "Document is read-only", msg)
def save_document(self, checked=False):
"""Saves the document
"""
debug_print('MainWindow.save_document')
self.model.to_document(self.document)
self.document.save()
self.model.set_modified(False)
self.info_widget.set_document(self.document)
def _prompt_validation_problems(self, problems, title, question):
"""Prompts the user with the question and the list of validation
problems. Returns the result of QMessageBox.exec_().
"""
box = QMessageBox(QMessageBox.Question, title, '',
QMessageBox.No | QMessageBox.Yes)
box.setDefaultButton(QMessageBox.No)
show_at_most = | |
= rgb_str(139, 87, 66)
LIGHTSEAGREEN = rgb_str(32, 178, 170)
LIGHTSKYBLUE = rgb_str(135, 206, 250)
LIGHTSKYBLUE1 = rgb_str(176, 226, 255)
LIGHTSKYBLUE2 = rgb_str(164, 211, 238)
LIGHTSKYBLUE3 = rgb_str(141, 182, 205)
LIGHTSKYBLUE4 = rgb_str(96, 123, 139)
LIGHTSLATEBLUE = rgb_str(132, 112, 255)
LIGHTSLATEGRAY = rgb_str(119, 136, 153)
LIGHTSTEELBLUE = rgb_str(176, 196, 222)
LIGHTSTEELBLUE1 = rgb_str(202, 225, 255)
LIGHTSTEELBLUE2 = rgb_str(188, 210, 238)
LIGHTSTEELBLUE3 = rgb_str(162, 181, 205)
LIGHTSTEELBLUE4 = rgb_str(110, 123, 139)
LIGHTYELLOW1 = rgb_str(255, 255, 224)
LIGHTYELLOW2 = rgb_str(238, 238, 209)
LIGHTYELLOW3 = rgb_str(205, 205, 180)
LIGHTYELLOW4 = rgb_str(139, 139, 122)
LIMEGREEN = rgb_str(50, 205, 50)
LINEN = rgb_str(250, 240, 230)
MAGENTA = rgb_str(255, 0, 255)
MAGENTA2 = rgb_str(238, 0, 238)
MAGENTA3 = rgb_str(205, 0, 205)
MAGENTA4 = rgb_str(139, 0, 139)
MANGANESEBLUE = rgb_str(3, 168, 158)
MAROON = rgb_str(128, 0, 0)
MAROON1 = rgb_str(255, 52, 179)
MAROON2 = rgb_str(238, 48, 167)
MAROON3 = rgb_str(205, 41, 144)
MAROON4 = rgb_str(139, 28, 98)
MEDIUMORCHID = rgb_str(186, 85, 211)
MEDIUMORCHID1 = rgb_str(224, 102, 255)
MEDIUMORCHID2 = rgb_str(209, 95, 238)
MEDIUMORCHID3 = rgb_str(180, 82, 205)
MEDIUMORCHID4 = rgb_str(122, 55, 139)
MEDIUMPURPLE = rgb_str(147, 112, 219)
MEDIUMPURPLE1 = rgb_str(171, 130, 255)
MEDIUMPURPLE2 = rgb_str(159, 121, 238)
MEDIUMPURPLE3 = rgb_str(137, 104, 205)
MEDIUMPURPLE4 = rgb_str(93, 71, 139)
MEDIUMSEAGREEN = rgb_str(60, 179, 113)
MEDIUMSLATEBLUE = rgb_str(123, 104, 238)
MEDIUMSPRINGGREEN = rgb_str(0, 250, 154)
MEDIUMTURQUOISE = rgb_str(72, 209, 204)
MEDIUMVIOLETRED = rgb_str(199, 21, 133)
MELON = rgb_str(227, 168, 105)
MIDNIGHTBLUE = rgb_str(25, 25, 112)
MINT = rgb_str(189, 252, 201)
MINTCREAM = rgb_str(245, 255, 250)
MISTYROSE1 = rgb_str(255, 228, 225)
MISTYROSE2 = rgb_str(238, 213, 210)
MISTYROSE3 = rgb_str(205, 183, 181)
MISTYROSE4 = rgb_str(139, 125, 123)
MOCCASIN = rgb_str(255, 228, 181)
NAVAJOWHITE1 = rgb_str(255, 222, 173)
NAVAJOWHITE2 = rgb_str(238, 207, 161)
NAVAJOWHITE3 = rgb_str(205, 179, 139)
NAVAJOWHITE4 = rgb_str(139, 121, 94)
NAVY = rgb_str(0, 0, 128)
OLDLACE = rgb_str(253, 245, 230)
OLIVE = rgb_str(128, 128, 0)
OLIVEDRAB = rgb_str(107, 142, 35)
OLIVEDRAB1 = rgb_str(192, 255, 62)
OLIVEDRAB2 = rgb_str(179, 238, 58)
OLIVEDRAB3 = rgb_str(154, 205, 50)
OLIVEDRAB4 = rgb_str(105, 139, 34)
ORANGE = rgb_str(255, 128, 0)
ORANGE1 = rgb_str(255, 165, 0)
ORANGE2 = rgb_str(238, 154, 0)
ORANGE3 = rgb_str(205, 133, 0)
ORANGE4 = rgb_str(139, 90, 0)
ORANGERED1 = rgb_str(255, 69, 0)
ORANGERED2 = rgb_str(238, 64, 0)
ORANGERED3 = rgb_str(205, 55, 0)
ORANGERED4 = rgb_str(139, 37, 0)
ORCHID = rgb_str(218, 112, 214)
ORCHID1 = rgb_str(255, 131, 250)
ORCHID2 = rgb_str(238, 122, 233)
ORCHID3 = rgb_str(205, 105, 201)
ORCHID4 = rgb_str(139, 71, 137)
PALEGOLDENROD = rgb_str(238, 232, 170)
PALEGREEN = rgb_str(152, 251, 152)
PALEGREEN1 = rgb_str(154, 255, 154)
PALEGREEN2 = rgb_str(144, 238, 144)
PALEGREEN3 = rgb_str(124, 205, 124)
PALEGREEN4 = rgb_str(84, 139, 84)
PALETURQUOISE1 = rgb_str(187, 255, 255)
PALETURQUOISE2 = rgb_str(174, 238, 238)
PALETURQUOISE3 = rgb_str(150, 205, 205)
PALETURQUOISE4 = rgb_str(102, 139, 139)
PALEVIOLETRED = rgb_str(219, 112, 147)
PALEVIOLETRED1 = rgb_str(255, 130, 171)
PALEVIOLETRED2 = rgb_str(238, 121, 159)
PALEVIOLETRED3 = rgb_str(205, 104, 137)
PALEVIOLETRED4 = rgb_str(139, 71, 93)
PAPAYAWHIP = rgb_str(255, 239, 213)
PEACHPUFF1 = rgb_str(255, 218, 185)
PEACHPUFF2 = rgb_str(238, 203, 173)
PEACHPUFF3 = rgb_str(205, 175, 149)
PEACHPUFF4 = rgb_str(139, 119, 101)
PEACOCK = rgb_str(51, 161, 201)
PINK = rgb_str(255, 192, 203)
PINK1 = rgb_str(255, 181, 197)
PINK2 = rgb_str(238, 169, 184)
PINK3 = rgb_str(205, 145, 158)
PINK4 = rgb_str(139, 99, 108)
PLUM = rgb_str(221, 160, 221)
PLUM1 = rgb_str(255, 187, 255)
PLUM2 = rgb_str(238, 174, 238)
PLUM3 = rgb_str(205, 150, 205)
PLUM4 = rgb_str(139, 102, 139)
POWDERBLUE = rgb_str(176, 224, 230)
PURPLE = rgb_str(128, 0, 128)
PURPLE1 = rgb_str(155, 48, 255)
PURPLE2 = rgb_str(145, 44, 238)
PURPLE3 = rgb_str(125, 38, 205)
PURPLE4 = rgb_str(85, 26, 139)
RASPBERRY = rgb_str(135, 38, 87)
RAWSIENNA = rgb_str(199, 97, 20)
RED1 = rgb_str(255, 0, 0)
RED2 = rgb_str(238, 0, 0)
RED3 = rgb_str(205, 0, 0)
RED4 = rgb_str(139, 0, 0)
ROSYBROWN = rgb_str(188, 143, 143)
ROSYBROWN1 = rgb_str(255, 193, 193)
ROSYBROWN2 = rgb_str(238, 180, 180)
ROSYBROWN3 = rgb_str(205, 155, 155)
ROSYBROWN4 = rgb_str(139, 105, 105)
ROYALBLUE = rgb_str(65, 105, 225)
ROYALBLUE1 = rgb_str(72, 118, 255)
ROYALBLUE2 = rgb_str(67, 110, 238)
ROYALBLUE3 = rgb_str(58, 95, 205)
ROYALBLUE4 = rgb_str(39, 64, 139)
SALMON = rgb_str(250, 128, 114)
SALMON1 = rgb_str(255, 140, 105)
SALMON2 = rgb_str(238, 130, 98)
SALMON3 = rgb_str(205, 112, 84)
SALMON4 = rgb_str(139, 76, 57)
SANDYBROWN = rgb_str(244, 164, 96)
SAPGREEN = rgb_str(48, 128, 20)
SEAGREEN1 = rgb_str(84, 255, 159)
SEAGREEN2 = rgb_str(78, 238, 148)
SEAGREEN3 = rgb_str(67, 205, 128)
SEAGREEN4 = rgb_str(46, 139, 87)
SEASHELL1 = rgb_str(255, 245, 238)
SEASHELL2 = rgb_str(238, 229, 222)
SEASHELL3 = rgb_str(205, 197, 191)
SEASHELL4 = rgb_str(139, 134, 130)
SEPIA = rgb_str(94, 38, 18)
SGIBEET = rgb_str(142, 56, 142)
SGIBRIGHTGRAY = rgb_str(197, 193, 170)
SGICHARTREUSE = rgb_str(113, 198, 113)
SGIDARKGRAY = rgb_str(85, 85, 85)
SGIGRAY12 = rgb_str(30, 30, 30)
SGIGRAY16 = rgb_str(40, 40, 40)
SGIGRAY32 = rgb_str(81, 81, 81)
SGIGRAY36 = rgb_str(91, 91, 91)
SGIGRAY52 = rgb_str(132, 132, 132)
SGIGRAY56 = rgb_str(142, 142, 142)
SGIGRAY72 = rgb_str(183, 183, 183)
SGIGRAY76 = rgb_str(193, 193, 193)
SGIGRAY92 = rgb_str(234, 234, 234)
SGIGRAY96 = rgb_str(244, 244, 244)
SGILIGHTBLUE = rgb_str(125, 158, 192)
SGILIGHTGRAY = rgb_str(170, 170, 170)
SGIOLIVEDRAB = rgb_str(142, 142, 56)
SGISALMON = rgb_str(198, 113, 113)
SGISLATEBLUE = rgb_str(113, 113, 198)
SGITEAL = rgb_str(56, 142, 142)
SIENNA = rgb_str(160, 82, 45)
SIENNA1 = rgb_str(255, 130, 71)
SIENNA2 = rgb_str(238, 121, 66)
SIENNA3 = rgb_str(205, 104, 57)
SIENNA4 = rgb_str(139, 71, 38)
SILVER = rgb_str(192, 192, 192)
SKYBLUE = rgb_str(135, 206, 235)
SKYBLUE1 = rgb_str(135, 206, 255)
SKYBLUE2 = rgb_str(126, 192, 238)
SKYBLUE3 = rgb_str(108, 166, 205)
SKYBLUE4 = rgb_str(74, 112, 139)
SLATEBLUE = rgb_str(106, 90, 205)
SLATEBLUE1 = rgb_str(131, 111, 255)
SLATEBLUE2 = rgb_str(122, 103, 238)
SLATEBLUE3 = rgb_str(105, 89, 205)
SLATEBLUE4 = rgb_str(71, 60, 139)
SLATEGRAY = rgb_str(112, 128, 144)
SLATEGRAY1 = rgb_str(198, 226, 255)
SLATEGRAY2 = rgb_str(185, 211, 238)
SLATEGRAY3 = rgb_str(159, 182, 205)
SLATEGRAY4 = rgb_str(108, 123, 139)
SNOW1 = rgb_str(255, 250, 250)
SNOW2 = rgb_str(238, 233, 233)
SNOW3 = rgb_str(205, 201, 201)
SNOW4 = rgb_str(139, 137, 137)
SPRINGGREEN = rgb_str(0, 255, 127)
SPRINGGREEN1 = rgb_str(0, 238, 118)
SPRINGGREEN2 = rgb_str(0, 205, 102)
SPRINGGREEN3 = rgb_str(0, 139, 69)
STEELBLUE = rgb_str(70, 130, 180)
STEELBLUE1 = rgb_str(99, 184, 255)
STEELBLUE2 = rgb_str(92, 172, 238)
STEELBLUE3 = rgb_str(79, 148, 205)
STEELBLUE4 = rgb_str(54, 100, 139)
TAN = rgb_str(210, 180, 140)
TAN1 = rgb_str(255, 165, 79)
TAN2 = rgb_str(238, 154, 73)
TAN3 = rgb_str(205, 133, 63)
TAN4 = rgb_str(139, 90, 43)
TEAL = rgb_str(0, 128, 128)
THISTLE = rgb_str(216, 191, 216)
THISTLE1 = rgb_str(255, 225, 255)
THISTLE2 = rgb_str(238, 210, 238)
THISTLE3 = rgb_str(205, 181, 205)
THISTLE4 = rgb_str(139, 123, 139)
TOMATO1 = rgb_str(255, 99, 71)
TOMATO2 = rgb_str(238, 92, 66)
TOMATO3 = rgb_str(205, 79, 57)
TOMATO4 = rgb_str(139, 54, 38)
TURQUOISE = rgb_str(64, 224, 208)
TURQUOISE1 = rgb_str(0, 245, 255)
TURQUOISE2 = rgb_str(0, 229, 238)
TURQUOISE3 = rgb_str(0, 197, 205)
TURQUOISE4 = rgb_str(0, 134, 139)
TURQUOISEBLUE = rgb_str(0, 199, 140)
VIOLET = rgb_str(238, 130, 238)
VIOLETRED = rgb_str(208, 32, 144)
VIOLETRED1 = rgb_str(255, 62, 150)
VIOLETRED2 = rgb_str(238, 58, 140)
VIOLETRED3 = rgb_str(205, 50, 120)
VIOLETRED4 = rgb_str(139, 34, 82)
WARMGREY = rgb_str(128, 128, 105)
WHEAT = rgb_str(245, 222, 179)
WHEAT1 = rgb_str(255, 231, 186)
WHEAT2 = rgb_str(238, 216, 174)
WHEAT3 = rgb_str(205, 186, 150)
WHEAT4 = rgb_str(139, 126, 102)
WHITE = rgb_str(255, 255, 255)
WHITESMOKE = rgb_str(245, 245, 245)
WHITESMOKE = rgb_str(245, 245, 245)
YELLOW1 = rgb_str(255, 255, 0)
YELLOW2 = rgb_str(238, 238, 0)
YELLOW3 = rgb_str(205, 205, 0)
YELLOW4 = rgb_str(139, 139, 0)
colors['aliceblue'] = ALICEBLUE
colors['antiquewhite'] = ANTIQUEWHITE
colors['antiquewhite1'] = ANTIQUEWHITE1
colors['antiquewhite2'] = ANTIQUEWHITE2
colors['antiquewhite3'] = ANTIQUEWHITE3
colors['antiquewhite4'] = ANTIQUEWHITE4
colors['aqua'] = AQUA
colors['aquamarine1'] = AQUAMARINE1
colors['aquamarine2'] = AQUAMARINE2
colors['aquamarine3'] = AQUAMARINE3
colors['aquamarine4'] = AQUAMARINE4
colors['azure1'] = AZURE1
colors['azure2'] = AZURE2
colors['azure3'] = AZURE3
colors['azure4'] = AZURE4
colors['banana'] = BANANA
colors['beige'] = BEIGE
colors['bisque1'] = BISQUE1
colors['bisque2'] = BISQUE2
colors['bisque3'] = BISQUE3
colors['bisque4'] = BISQUE4
colors['black'] = BLACK
colors['blanchedalmond'] = BLANCHEDALMOND
colors['blue'] = BLUE
colors['blue2'] = BLUE2
colors['blue3'] = BLUE3
colors['blue4'] = BLUE4
colors['blueviolet'] = BLUEVIOLET
colors['brick'] = BRICK
colors['brown'] = BROWN
colors['brown1'] = BROWN1
colors['brown2'] = BROWN2
colors['brown3'] = BROWN3
colors['brown4'] = BROWN4
colors['burlywood'] = BURLYWOOD
colors['burlywood1'] = BURLYWOOD1
colors['burlywood2'] = BURLYWOOD2
colors['burlywood3'] = BURLYWOOD3
colors['burlywood4'] = BURLYWOOD4
colors['burntsienna'] = BURNTSIENNA
colors['burntumber'] = BURNTUMBER
colors['cadetblue'] = CADETBLUE
colors['cadetblue1'] = CADETBLUE1
colors['cadetblue2'] = CADETBLUE2
colors['cadetblue3'] = CADETBLUE3
colors['cadetblue4'] = CADETBLUE4
colors['cadmiumorange'] = CADMIUMORANGE
colors['cadmiumyellow'] = CADMIUMYELLOW
colors['carrot'] = CARROT
colors['chartreuse1'] = CHARTREUSE1
colors['chartreuse2'] = CHARTREUSE2
colors['chartreuse3'] = CHARTREUSE3
colors['chartreuse4'] = CHARTREUSE4
colors['chocolate'] = CHOCOLATE
colors['chocolate1'] = CHOCOLATE1
colors['chocolate2'] = CHOCOLATE2
colors['chocolate3'] = CHOCOLATE3
colors['chocolate4'] = CHOCOLATE4
colors['cobalt'] = COBALT
colors['cobaltgreen'] = COBALTGREEN
colors['coldgrey'] = COLDGREY
colors['coral'] = CORAL
colors['coral1'] = CORAL1
colors['coral2'] = CORAL2
colors['coral3'] = CORAL3
colors['coral4'] = CORAL4
colors['cornflowerblue'] = CORNFLOWERBLUE
colors['cornsilk1'] = CORNSILK1
colors['cornsilk2'] = CORNSILK2
colors['cornsilk3'] = CORNSILK3
colors['cornsilk4'] = CORNSILK4
colors['crimson'] = CRIMSON
colors['cyan2'] = CYAN2
colors['cyan3'] = CYAN3
colors['cyan4'] = CYAN4
colors['darkgoldenrod'] = DARKGOLDENROD
colors['darkgoldenrod1'] = DARKGOLDENROD1
colors['darkgoldenrod2'] = DARKGOLDENROD2
colors['darkgoldenrod3'] = DARKGOLDENROD3
colors['darkgoldenrod4'] = DARKGOLDENROD4
colors['darkgray'] = DARKGRAY
colors['darkgreen'] = DARKGREEN
colors['darkkhaki'] = DARKKHAKI
colors['darkolivegreen'] = DARKOLIVEGREEN
colors['darkolivegreen1'] = DARKOLIVEGREEN1
colors['darkolivegreen2'] = DARKOLIVEGREEN2
colors['darkolivegreen3'] = DARKOLIVEGREEN3
colors['darkolivegreen4'] = DARKOLIVEGREEN4
colors['darkorange'] = DARKORANGE
colors['darkorange1'] = DARKORANGE1
colors['darkorange2'] = DARKORANGE2
colors['darkorange3'] = DARKORANGE3
colors['darkorange4'] = DARKORANGE4
colors['darkorchid'] = DARKORCHID
colors['darkorchid1'] = DARKORCHID1
colors['darkorchid2'] = DARKORCHID2
colors['darkorchid3'] = DARKORCHID3
colors['darkorchid4'] = DARKORCHID4
colors['darksalmon'] = DARKSALMON
colors['darkseagreen'] = DARKSEAGREEN
colors['darkseagreen1'] = DARKSEAGREEN1
colors['darkseagreen2'] = DARKSEAGREEN2
colors['darkseagreen3'] = DARKSEAGREEN3
colors['darkseagreen4'] = DARKSEAGREEN4
colors['darkslateblue'] = DARKSLATEBLUE
colors['darkslategray'] = DARKSLATEGRAY
colors['darkslategray1'] = DARKSLATEGRAY1
colors['darkslategray2'] = DARKSLATEGRAY2
colors['darkslategray3'] = DARKSLATEGRAY3
colors['darkslategray4'] = DARKSLATEGRAY4
colors['darkturquoise'] = DARKTURQUOISE
colors['darkviolet'] = DARKVIOLET
colors['deeppink1'] = DEEPPINK1
colors['deeppink2'] = DEEPPINK2
colors['deeppink3'] = DEEPPINK3
colors['deeppink4'] = DEEPPINK4
colors['deepskyblue1'] = DEEPSKYBLUE1
colors['deepskyblue2'] = DEEPSKYBLUE2
colors['deepskyblue3'] = DEEPSKYBLUE3
colors['deepskyblue4'] = DEEPSKYBLUE4
colors['dimgray'] = DIMGRAY
colors['dimgray'] = DIMGRAY
colors['dodgerblue1'] = DODGERBLUE1
colors['dodgerblue2'] = DODGERBLUE2
colors['dodgerblue3'] = DODGERBLUE3
colors['dodgerblue4'] = DODGERBLUE4
colors['eggshell'] = EGGSHELL
colors['emeraldgreen'] = EMERALDGREEN
colors['firebrick'] = FIREBRICK
colors['firebrick1'] = FIREBRICK1
colors['firebrick2'] = FIREBRICK2
colors['firebrick3'] = FIREBRICK3
colors['firebrick4'] = FIREBRICK4
colors['flesh'] = FLESH
colors['floralwhite'] = FLORALWHITE
colors['forestgreen'] = FORESTGREEN
colors['gainsboro'] = GAINSBORO
colors['ghostwhite'] = GHOSTWHITE
colors['gold1'] = GOLD1
colors['gold2'] = GOLD2
colors['gold3'] = GOLD3
colors['gold4'] = GOLD4
colors['goldenrod'] = GOLDENROD
colors['goldenrod1'] = GOLDENROD1
colors['goldenrod2'] = GOLDENROD2
colors['goldenrod3'] = GOLDENROD3
colors['goldenrod4'] = GOLDENROD4
colors['gray'] = GRAY
colors['gray1'] = GRAY1
colors['gray10'] = GRAY10
colors['gray11'] = GRAY11
colors['gray12'] = GRAY12
colors['gray13'] = GRAY13
colors['gray14'] = GRAY14
colors['gray15'] | |
in np.arange(measurement_count[0]):
if row == 0:
if measurement_count[0] == 1: # if final row
# straight line only
x_position_list = np.arange(0, object_size[1])
y_position = np.ceil(image_size[0] * 0.5).astype(int)
for position_x in x_position_list:
raster_point_list.append([y_position, position_x])
else:
# straight line
x_position_list = np.arange(0, object_size[1] - image_size[1])
y_position = np.ceil(image_size[0] * 0.5).astype(int)
for position_x in x_position_list:
raster_point_list.append([y_position, position_x])
# plus corner
for position_x, position_y in corner_gen_fn(image_size, orientation='ru'):
raster_point_list.append([position_y, position_x + object_size[1] - image_size[1]])
elif row % 2: # odd
for position_x, position_y in corner_gen_fn(image_size, orientation='rl'):
raster_point_list.append([position_y + row * image_size[0],
position_x + object_size[1] - image_size[1]])
if measurement_count[0] == row + 1: # final row: straight line
x_position_list = np.arange(0, object_size[1] - image_size[1], -1)
y_position = np.ceil(image_size[0] * 0.5 + row * image_size[0]).astype(int)
for position_x in x_position_list:
raster_point_list.append([y_position, position_x])
else:
# straight portion
x_position_list = np.arange(image_size[1], object_size[1] - image_size[1], -1)
y_position = np.ceil(image_size[0] * 0.5 + row * image_size[0]).astype(int)
for position_x in x_position_list:
raster_point_list.append([y_position, position_x])
# corner
for position_x, position_y in corner_gen_fn(image_size, orientation='lu'):
raster_point_list.append([position_y + row * image_size[0], position_x])
else: # even
for position_x, position_y in corner_gen_fn(image_size, orientation='ll'):
raster_point_list.append([position_y + row * image_size[0], position_x])
if measurement_count[0] == row + 1: # final row: straight line
x_position_list = np.arange(image_size[1], object_size[1])
y_position = np.ceil(image_size[0] * 0.5 + row * image_size[0]).astype(int)
for position_x in x_position_list:
raster_point_list.append([y_position, position_x])
else:
# straight portion
x_position_list = np.arange(image_size[1], object_size[1] - image_size[1])
y_position = np.ceil(image_size[0] * 0.5 + row * image_size[0]).astype(int)
for position_x in x_position_list:
raster_point_list.append([y_position, position_x])
# corner
for position_x, position_y in corner_gen_fn(image_size, orientation='ru'):
raster_point_list.append([position_y + row * image_size[0],
position_x + object_size[1] - image_size[1]])
raster_point_list = np.asarray(raster_point_list)
# Determine number of points per image
points_per_image = np.floor(raster_point_list.shape[0] / np.prod(measurement_count))
measurement_indicies = np.arange(raster_point_list.shape[0])
measurement_indicies = np.floor(measurement_indicies / points_per_image)
# If full_object_multi_pass flag is specified, we want to scan the object backwards
# and forwards multiple times instead of dividing it up into segments.
raster_point_list_segmented = []
for measurement_index in range(np.prod(measurement_count)):
if not full_object_multi_pass:
raster_point_list_segmented.append(raster_point_list[measurement_indicies == measurement_index, :])
elif full_object_multi_pass < 1:
midpoint = int(np.ceil(raster_point_list.shape[0] / 2))
if measurement_index % 2:
if measurement_index % 3:
raster_point_list_segmented.append(raster_point_list[midpoint:, :])
else:
raster_point_list_segmented.append(np.flip(raster_point_list[0:midpoint, :], axis=0))
else:
if measurement_index % 4:
raster_point_list_segmented.append(np.flip(raster_point_list[midpoint:, :], axis=0))
else:
raster_point_list_segmented.append(raster_point_list[0:midpoint, :])
else:
if measurement_index % 2:
raster_point_list_segmented.append(raster_point_list)
else:
raster_point_list_segmented.append(np.flip(raster_point_list, axis=0))
# Transpose points if user desires
if major_axis == 0:
return(np.flip(raster_point_list_segmented, axis=2))
else:
return(raster_point_list_segmented)
def flip_pts(points, image_size, orientations):
new_points = points
for orientation in orientations:
if orientation == 'reverse':
new_points = np.flipud(new_points)
else:
if orientation == 'ud':
def point_op(point): return (point[0], image_size[0] - point[1])
elif orientation == 'lr':
def point_op(point): return (image_size[1] - point[0], point[1])
else:
assert 0, 'unrecognized orientation'
new_points = [point_op(point) for point in new_points]
return new_points
# messy separate version for now, eventually merge custom corner logic with everything else to subsume this case
def genLinearRasterMotionPathway(object_size, image_size, full_object_multi_pass=0, measurement_redundancy=1):
"""Function which generates a list of points which make up a complete raster scan of a given FOV, given a small capture FOV
Args:
image_size: Capture frame size in (y,x)
object_size: Sample size in (y,x), should be larger than image_size
full_object_multi_pass: (0) Flag to force kernel generation to scan full object multiple times instead of dividing it into segments. If between 0 and 1, scans the object in halves.
measurement_redundancy: redundancy in x, increases number of measurements by this factor
Returns:
A 2D ndarray where each value is it's index in the input blur_kernel_map
"""
# Determine major axis
major_axis = np.argmax(np.asarray(object_size))
if object_size[0] == object_size[1]:
major_axis = 1
if major_axis == 0:
object_size = np.flip(object_size, 0)
image_size = np.flip(image_size, 0)
measurement_count = np.ceil(object_size / image_size).astype(np.int) # two components in x and y
assert np.any(measurement_count > 1), "image_size must be smaller than object_size!"
print("Image size requires %d x %d images" % (measurement_count[0], measurement_count[1]))
measurement_count[1] = int(measurement_redundancy * measurement_count[1])
raster_segments = np.zeros((measurement_count[0] * 2, 2), dtype=np.int)
y_stride = object_size[0] / measurement_count[0]
x_stride = object_size[1] / measurement_count[1]
raster_point_list = []
for row in np.arange(measurement_count[0]):
# Place the vertical upright
raster_segments[(2 * row), :] = [np.ceil(image_size[0] * (row + 0.5)).astype(int),
np.ceil(image_size[1] * 0.5).astype(int)]
raster_segments[(2 * row) + 1, :] = [np.ceil(image_size[0] * (row + 0.5)).astype(int),
np.ceil(object_size[1] - image_size[1] * 0.5).astype(int)]
# Determine points to use for horizontal scan
x_position_list = np.arange(0, object_size[1], 1)
for position_x in x_position_list:
raster_point_list.append([raster_segments[(2 * row), 0], position_x])
raster_point_list = np.asarray(raster_point_list)
# Determine number of points per image
points_per_image = np.floor(raster_point_list.shape[0] / np.prod(measurement_count))
measurement_indicies = np.arange(raster_point_list.shape[0])
measurement_indicies = np.floor(measurement_indicies / points_per_image)
# If full_object_multi_pass flag is specified, we want to scan the object backwards
# and forwards multiple times instead of dividing it up into segments.
raster_point_list_segmented = []
for measurement_index in range(np.prod(measurement_count)):
if not full_object_multi_pass:
raster_point_list_segmented.append(raster_point_list[measurement_indicies == measurement_index, :])
elif full_object_multi_pass < 1:
midpoint = int(np.ceil(raster_point_list.shape[0] / 2))
if measurement_index % 2:
if measurement_index % 3:
raster_point_list_segmented.append(raster_point_list[midpoint:, :])
else:
raster_point_list_segmented.append(np.flip(raster_point_list[0:midpoint, :], axis=0))
else:
if measurement_index % 4:
raster_point_list_segmented.append(np.flip(raster_point_list[midpoint:, :], axis=0))
else:
raster_point_list_segmented.append(raster_point_list[0:midpoint, :])
else:
if measurement_index % 2:
raster_point_list_segmented.append(raster_point_list)
else:
raster_point_list_segmented.append(np.flip(raster_point_list, axis=0))
# Transpose points if user desires
if major_axis == 0:
return(np.flip(raster_point_list_segmented, axis=2))
else:
return(raster_point_list_segmented)
def genCustomRasterPathway(image_size, object_size, corner_fn, measurement_redundancy=1):
# very rough function, to be improved later
assert (object_size / image_size == [3, 3]).all(), 'only design for 3x3 grid'
line_list = []
line = genTwoPointLineBlurposition_list((0, int(image_size[0] / 2)), (image_size[1], int(image_size[0] / 2)))
line_list.append(line)
line = genTwoPointLineBlurposition_list(
(image_size[1], int(image_size[0] / 2)), (2 * image_size[1], int(image_size[0] / 2)))
line_list.append(line)
line = corner_fn(image_size, offset=(2 * image_size[1], 0))
line_list.append(line)
line = corner_fn(image_size, offset=(2 * image_size[1], image_size[0]), orientations=['ud', 'reverse'])
line_list.append(line)
line = genTwoPointLineBlurposition_list(
(2 * image_size[0], image_size[1] + int(image_size[1] / 2)), (image_size[0], image_size[1] + int(image_size[1] / 2)))
line_list.append(line)
line = corner_fn(image_size, offset=(0, image_size[0]), orientations=['lr'])
line_list.append(line)
line = corner_fn(image_size, offset=(0, 2 * image_size[0]), orientations=['lr', 'ud', 'reverse'])
line_list.append(line)
line = genTwoPointLineBlurposition_list(
(image_size[1], 2 * image_size[0] + int(image_size[0] / 2)), (2 * image_size[1], 2 * image_size[0] + int(image_size[0] / 2)))
line_list.append(line)
line = genTwoPointLineBlurposition_list(
(2 * image_size[1], 2 * image_size[0] + int(image_size[0] / 2)), (3 * image_size[1], 2 * image_size[0] + int(image_size[0] / 2)))
line_list.append(line)
point_list_segmented = []
for line in line_list:
if measurement_redundancy == 2:
middle = int(np.floor((len(line) - 1) / 2))
point_list_segmented.append(np.asarray(line[:middle]))
point_list_segmented.append(np.asarray(line[middle:-1]))
else:
point_list_segmented.append(np.asarray(line[:-1]))
# for points in point_list_segmented:
# points[np.where(points >= 3*image_size)] = 3*image_size[0]-1
return point_list_segmented
def generate_open_corner(image_size, offset=(0, 0), orientations=[]):
midside = (int(image_size[0] / 2), int(image_size[1] / 2))
diamond_points = [(0, midside[0]), (int(image_size[1] / 3), int(image_size[0] / 6)),
(image_size[1], image_size[0])]
diamond_points = flip_pts(diamond_points, image_size, orientations)
line_list = []
for i in range(len(diamond_points) - 1):
p1 = tuple(p + q for p, q in zip(diamond_points[i], offset))
p2 = tuple(p + q for p, q in zip(diamond_points[i + 1], offset))
line = genTwoPointLineBlurposition_list(p1, p2)
line_list.append(line)
return np.concatenate(line_list)
def generate_diamond_corner(image_size, offset=(0, 0), orientations=[]):
midside = (int(image_size[0] / 2), int(image_size[1] / 2))
diamond_points = [(0, midside[0]), (midside[1], int(image_size[0] / 5)), (int(4 * image_size[1] / 5), midside[0]),
(midside[1], image_size[0])]
diamond_points = flip_pts(diamond_points, image_size, orientations)
line_list = []
for i in range(len(diamond_points) - 1):
p1 = tuple(p + q for p, q in zip(diamond_points[i], offset))
p2 = tuple(p + q for p, q in zip(diamond_points[i + 1], offset))
line = genTwoPointLineBlurposition_list(p1, p2)
line_list.append(line)
return np.concatenate(line_list)
def genRasterMotionPathway_fallback(object_size, image_size, full_object_multi_pass=False):
"""Function which generates a list of points which make up a complete raster scan of a given FOV, given a small capture FOV
Args:
image_size: Capture frame size in (y,x)
object_size: Sample size in (y,x), should be larger than image_size
full_object_multi_pass: (False) Flag to force kernel generation to scan full object multiple times instead of dividing it into segments
Returns:
A 2D ndarray where each value is it's index in the input blur_kernel_map
"""
measurement_count = np.ceil(object_size / image_size).astype(np.int) # two components in x and y
assert np.any(measurement_count > 1), "image_size must be smaller than object_size!"
print("Image size requires %d x %d images" % (measurement_count[0], measurement_count[1]))
raster_segments = np.zeros((measurement_count[0] * 2, 2), dtype=np.int)
y_stride = object_size[0] / measurement_count[0]
x_stride = object_size[1] / measurement_count[1]
minor_stride_side = 'r'
raster_point_list = []
for row in np.arange(measurement_count[0]):
# Place the vertical upright
if minor_stride_side == 'r':
raster_segments[(2 * | |
import os
import socket
import typing
from typing import List, Union, AnyStr, Iterable
from collections import OrderedDict
from datetime import datetime, timedelta
import math
import json
import csv
from pathlib import Path
from urllib.parse import urlparse
import base64
from uuid import uuid4
from io import StringIO, BytesIO, BufferedRandom
import backoff
from django.db.utils import IntegrityError
import rows
import paramiko
from paramiko import SSHClient, ssh_exception, RSAKey, AutoAddPolicy
from django.conf import settings
from django.utils import timezone
from django.db import models, transaction
from django.db.models.signals import pre_save, post_save, post_delete
from django.db.models.constraints import UniqueConstraint
from django.db.models import Q
from django.dispatch import receiver
from django.core.handlers.wsgi import WSGIRequest
from django.core.files.storage import get_storage_class, Storage
from django.core.serializers import serialize
from django.core.validators import URLValidator
from django.core.exceptions import ValidationError
from django.db.models import (
Model,
Manager,
CharField,
URLField,
ForeignKey,
BooleanField,
IntegerField,
DateTimeField,
QuerySet,
)
from django.contrib.postgres.fields import ArrayField
# from django.contrib.auth.models import User
from django.contrib.auth.models import AbstractUser
from guardian.mixins import GuardianUserMixin
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.utils import timezone
import reversion
from rest_framework.authtoken.models import Token
from storages.backends.sftpstorage import SFTPStorage, SFTPStorageFile
from .tasks import orchestration
from .util import (
unique,
has_method,
generate_uuid,
generate_secret_key,
find_filename_and_size_from_url,
laxy_sftp_url,
generate_cluster_stack_name,
)
import logging
logger = logging.getLogger(__name__)
if "postgres" not in settings.DATABASES["default"]["ENGINE"]:
from jsonfield import JSONField
else:
from django.contrib.postgres.fields import JSONField
SCHEME_STORAGE_CLASS_MAPPING = {
"file": "django.core.files.storage.FileSystemStorage",
"sftp": "storages.backends.sftpstorage.SFTPStorage",
"laxy+sftp": "storages.backends.sftpstorage.SFTPStorage",
}
"""
Maps URL schemes to Django storage backends that can handle them.
"""
CACHE_SFTP_CONNECTIONS = True
"""
If True, use CACHED_SFTP_STORAGE_CLASS_INSTANCES to cache SFTPStorage classes
to allow connection pooling to the same ComputeResource.
Seems buggy, so disabled by default.
"""
CACHED_SFTP_STORAGE_CLASS_INSTANCES = {}
"""
Cached instances on the SFTPStorage class, keyed by ComputeResource.id to allow
connection pooling for SFTP access to the same host.
"""
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
"""
Automatically create an Django Rest Framework API Token for every user when
their User model is first created.
http://www.django-rest-framework.org/api-guide/authentication/#generating-tokens
"""
if created:
Token.objects.create(user=instance)
class URIValidator(URLValidator):
"""
A validator for generic URIs that also allows additional schemes not
supported by the default Django URLValidator.
"""
schemes = (
"http",
"https",
"ftp",
"sftp",
"laxy+sftp",
"s3",
"magnet",
"file",
)
def __call__(self, value):
try:
scheme = urlparse(value).scheme
# skip additional validation of file, magnet, s3 etc
# since the regexes used in URLValidator assumes web addresses
# with hosts/IPs etc
if scheme not in super().schemes:
return
except ValueError as e:
raise ValidationError(self.message, code=self.code)
super().__call__(value)
class ExtendedURIField(URLField):
default_validators = [URIValidator()]
class Timestamped(Model):
class Meta:
abstract = True
get_latest_by = ["-modified_time"]
created_time = DateTimeField(auto_now_add=True)
modified_time = DateTimeField(auto_now=True)
def _job_expiry_datetime():
job_ttl = getattr(settings, "JOB_EXPIRY_TTL_DEFAULT", 30 * 24 * 60 * 60)
return timezone.now() + timedelta(seconds=job_ttl)
class Expires(Model):
class Meta:
abstract = True
get_latest_by = ["-expiry_time"]
expiry_time = DateTimeField(blank=True, null=True, default=_job_expiry_datetime)
expired = BooleanField(default=False)
class ReadOnlyFlag(Model):
"""
Mixin that adds a readonly boolean to a model.
Model will not save/delete when this flag is True (on the instance).
Setting the readonly flag has the side effect of saving the current instance (but updates ONLY
the _readonly field on the database, ignoring other modified fields).
Beware: custom model Managers, raw SQL and bulk operations may not always prevented from modifying
the model if they bypass the model instance-level save/delete. The _readonly database field can be
modified directly - this mixin isn't intended to completed lock a row, just act as a convenient
pattern to help in typical usage.
(If making a row readonly really really matters, you might want to do something like:
https://www.postgresql.org/docs/9.5/ddl-rowsecurity.html).
eg intended usage:
>>> class MyReadonlyModel(ReadOnlyFlag, Model):
>>> pass
>>> myreadonlymodel.readonly = False
>>> myreadonlymodel.save() # works fine
>>> myreadonlymodel.readonly = True
>>> myreadonlymodel.save()
RuntimeError: Attempting to save readonly model: 1
"""
class Meta:
abstract = True
_readonly = BooleanField(default=False)
@property
def readonly(self):
return self._readonly
@readonly.setter
def readonly(self, state):
if state != self._readonly:
self._readonly = state
super(ReadOnlyFlag, self).save(update_fields=["_readonly"])
def save(self, *args, **kwargs):
if self.readonly:
raise RuntimeError(f"Attempting to save readonly model: {self.id}")
super(ReadOnlyFlag, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
if self.readonly:
raise RuntimeError(f"Attempting to delete readonly model: {self.id}")
super(ReadOnlyFlag, self).delete(*args, **kwargs)
class UUIDModel(Model):
# We don't use the native UUIDField (even though it's more efficient on
# Postgres) since it makes inspecting the database for the job_id a
# nuisance.
# id = UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# Alternative: https://github.com/nebstrebor/django-shortuuidfield ?
# IDEA: If we were to make this NOT abstract and subclass it for various models,
# we get one big table where we can query UUIDModel.objects.get(id=uuid) to get
# any of our objects by UUID (Currently won't work due to related_name
# backreference clashes, but these could be resolved).
# https://docs.djangoproject.com/en/2.0/topics/db/models/#multi-table-inheritance
class Meta:
abstract = True
id = CharField(
primary_key=True, editable=False, max_length=24, default=generate_uuid
)
def uuid(self):
return self.id
def __unicode__(self):
return self.id
class User(AbstractUser, UUIDModel, GuardianUserMixin):
pass
class UserProfile(models.Model):
user = models.OneToOneField(
User,
primary_key=True,
on_delete=models.CASCADE,
blank=False,
related_name="profile",
)
image_url = models.URLField(max_length=2048, blank=True, null=True)
@receiver(post_save, sender=User)
def create_or_update_user_profile(sender, instance, created, **kwargs):
if created or not UserProfile.objects.filter(user=instance).exists():
UserProfile.objects.create(user=instance)
instance.profile.save()
class JSONSerializable:
def to_json(self):
return serialize("json", self)
# class JobParams(JSONSerializable, UUIDModel):
# """
# This class exists as a parent for all pipeline run models (eg JobParams),
# to allow a Job to point to a generic parameter model via a ForeignKey
# (Job->JobParams).
#
# The intention is that as other specific parameter subclasses are created
# (eg ChipSeqPipelineRun, HomologyModellingPipelineRun), we can retrieve them
# via Job.params.
#
# All JobParams subclasses should be JSON serializable (via implementing .to_json())
# such that we can get the JSON representation without knowing the specific type.
#
# We can also retrieve any run parameter set via it's UUID without knowing it's
# specific type via JobParams.object.get(id=some_params_id).to_json()
#
# https://docs.djangoproject.com/en/2.0/topics/db/models/#multi-table-inheritance
# """
# pass
class EventLog(UUIDModel):
class Meta:
ordering = ["-timestamp"]
get_latest_by = ["timestamp"]
user = ForeignKey(
User,
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name="event_logs",
)
timestamp = DateTimeField(default=timezone.now, db_index=True)
event = CharField(max_length=64)
message = CharField(max_length=256, blank=True, null=True)
extra = JSONField(default=OrderedDict)
content_type = ForeignKey(ContentType, null=True, on_delete=models.SET_NULL)
object_id = CharField(null=True, max_length=24, db_index=True)
obj = GenericForeignKey("content_type", "object_id")
@staticmethod
def log(event, message="", user=None, extra=None, obj=None, timestamp=None):
if extra is None:
extra = {}
content_type = None
object_id = None
if obj is not None:
content_type = ContentType.objects.get_for_model(obj)
object_id = obj.pk
if timestamp is None:
timestamp = timezone.now()
event = EventLog.objects.create(
user=user,
event=event,
message=message,
extra=extra,
content_type=content_type,
object_id=object_id,
timestamp=timestamp,
)
return event
class Pipeline(Timestamped, UUIDModel):
"""
A record for a pipeline that may be available to run.
"""
class Meta:
permissions = (("run_pipeline", "Can run this pipeline"),)
owner = ForeignKey(
User, blank=True, null=True, on_delete=models.CASCADE, related_name="pipelines",
)
name = CharField(max_length=256)
public = BooleanField(default=False)
# Contains extra pipeline info like:
# {"versions": ["1.5.3", "1.5.4"]}
# Ideally we should keep the data here independent
# of platform specific details (eg shouldn't be specific
# to a particular ComputeResource).
metadata = JSONField(default=OrderedDict)
def allowed_to_run(self, user: User):
return user.has_perm("laxy_backend.run_pipeline", self)
class SystemStatus(Timestamped, UUIDModel):
"""
A system status message to be displayed to the user, like a MOTD banner
(eg under conditions where there may be a compute backend outage).
"""
message = CharField(max_length=256, blank=True, null=True)
long_message = CharField(max_length=2048, blank=True, null=True)
link_url = ExtendedURIField(max_length=2048, blank=True, null=True)
status = CharField(max_length=32, default="online")
active = BooleanField(default=False)
start_time = DateTimeField(blank=True, null=True) # becomes shown after this time
end_time = DateTimeField(blank=True, null=True) # not shown after this time
# if two SystemStatus messages are active together, highest priority wins
priority = IntegerField(default=0)
class ComputeResourceDecommissioned(Exception):
pass
@reversion.register()
class ComputeResource(Timestamped, UUIDModel):
# model created, no actual resource yet
STATUS_CREATED = "created"
# actual resource is being created
STATUS_STARTING = "starting"
# resource is online and functioning as expected
STATUS_ONLINE = "online"
# resource isn't functioning correctly. may be temporary.
STATUS_ERROR = "error"
# resource is intentionally offline and shouldn't be used. may be temporary.
STATUS_OFFLINE = "offline"
# resource is being decommissioned and shouldn't be used. permanent.
STATUS_TERMINATING = "terminating"
# resource has been decommissioned and won't be available. permanent.
STATUS_DECOMMISSIONED = "decommissioned"
COMPUTE_STATUS_CHOICES = (
(STATUS_CREATED, "object_created"),
(STATUS_STARTING, "starting"),
(STATUS_ONLINE, "online"),
(STATUS_ERROR, "error"),
(STATUS_OFFLINE, "offline"),
(STATUS_TERMINATING, "terminating"),
(STATUS_DECOMMISSIONED, "decommissioned"),
)
owner = ForeignKey(
User,
blank=True,
null=True,
on_delete=models.CASCADE,
related_name="compute_resources",
)
status = CharField(
max_length=64, choices=COMPUTE_STATUS_CHOICES, default=STATUS_CREATED
)
host = CharField(max_length=255, blank=True, null=True)
gateway_server = CharField(max_length=255, blank=True, null=True)
disposable = BooleanField(default=False)
name = CharField(max_length=128, blank=True, null=True)
priority = IntegerField(default=0)
archive_host = ForeignKey(
"self",
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name="source_host",
)
extra = JSONField(default=OrderedDict)
"""
This contains resource type specific data. For a typical SSH-accessible
ComputeResource, it will contain a base_dir, username, private_key, queue_type
and possibly other configuration values.
See views.ComputeResourceCreate.post docs for details.
"""
| |
= smesh.Mesh(mesh)
except:
pass
else:
print "[X] The input object is not a mesh or the Mesh module was not yet loaded."; return
# Get the mesh name
mesh_name = mesh.GetName()
#-
# Renumber elements and nodes
mesh.RenumberNodes()
mesh.RenumberElements()
#-
# Get nodes number and IDs
nb_nodes_in_mesh = mesh.NbNodes()
node_ids_in_mesh = mesh.GetNodesId()
#-
# Get edges IDs
edge_ids_in_mesh = mesh.GetElementsByType(SMESH.EDGE)
nb_edges_in_mesh = mesh.NbEdges()
#-
# Get faces IDs
face_ids_in_mesh = mesh.GetElementsByType(SMESH.FACE)
nb_faces_in_mesh = mesh.NbFaces()
nb_triangles_in_mesh = mesh.NbTriangles()
nb_quadrangles_in_mesh = mesh.NbQuadrangles()
#-
# Get volumes IDs
nb_volumes_in_mesh = mesh.NbVolumes()
nb_tetrahedrons_in_mesh = mesh.NbTetras()
nb_pyramids_in_mesh = mesh.NbPyramids()
nb_prisms_in_mesh = mesh.NbPrisms()
nb_hexahedrons_in_mesh = mesh.NbHexas()
volume_ids_in_mesh = mesh.GetElementsByType(SMESH.VOLUME)
#-
# Get mesh dimension
if nb_volumes_in_mesh != 0:
mesh_dimension = 3
nb_elements_in_domain = nb_volumes_in_mesh
else:
mesh_dimension = 2
nb_elements_in_domain = nb_faces_in_mesh
#-
# Get groups
group_names = mesh.GetGroupNames()
groups = mesh.GetGroups()
#-
# Sort groups
sorted_groups = []
if only != [None]:
for group in groups:
group_name = group.GetName()
if group_name in only:
sorted_groups.append(group)
groups = sorted_groups
sorted_groups = []
if ignore != [None]:
for group in groups:
group_name = group.GetName()
if group_name not in ignore:
sorted_groups.append(group)
groups = sorted_groups
# Get the number of groups
nb_groups = len(groups)
# Get group types
group_types = []
for group in groups:
group_type = str(group.GetType())
group_types.append(group_type)
#-
# Open the amsh file
date = time.asctime(time.localtime())
if file == None:
file = mesh_name
amsh_file = open("%s.amsh"%(file), "w")
amsh_file.write("unstr_grid_data N 0 0 2\n")
amsh_file.write(" title L 1 1 0\n")
amsh_file.write(" '%s exported from Salome on %s'\n"%(mesh_name, date))
#-
# Open the help file
if help == True:
mesh_file = open("%s.help"%(file), "w")
mesh_file.write("%s\n"%(date))
mesh_file.write("'%s' '%s'\n"%(mesh_name, file))
mesh_file.write("NODES EDGES TRIA QUAD TETRA PYRA PRISM HEXA\n")
mesh_file.write("%i %i %i %i %i %i %i %i\n"%(nb_nodes_in_mesh, nb_edges_in_mesh, nb_triangles_in_mesh, nb_quadrangles_in_mesh, nb_tetrahedrons_in_mesh, nb_pyramids_in_mesh, nb_prisms_in_mesh, nb_hexahedrons_in_mesh))
for n in range(nb_groups):
mesh_file.write("'%s' "%(mesh.GetGroupNames()[n]))
mesh_file.write("\n")
mesh_file.write("NODES\nID X Y Z\n")
#-
# Get the region ffa dimension
region_ffa_dimension = 2 + nb_groups
if mesh_dimension == 2:
if nb_triangles_in_mesh > 0:
region_ffa_dimension += 1
if nb_quadrangles_in_mesh > 0:
region_ffa_dimension += 1
elif mesh_dimension == 3:
if nb_tetrahedrons_in_mesh > 0:
region_ffa_dimension += 1
if nb_pyramids_in_mesh > 0:
region_ffa_dimension += 1
if nb_prisms_in_mesh > 0:
region_ffa_dimension += 1
if nb_hexahedrons_in_mesh > 0:
region_ffa_dimension += 1
amsh_file.write(" region N 0 0 %i\n"%(region_ffa_dimension))
amsh_file.write(" region_name L 1 1 0\n")
amsh_file.write(" 'volume_elements'\n")
amsh_file.write(" coordinates DF %i %i 0\n"%(mesh_dimension, nb_nodes_in_mesh))
#-
print "[i] Writing node coordinates... (%s nodes)"%(nb_nodes_in_mesh)
# Get the node coordinates
node_coordinates = []
for n in range(mesh_dimension):
node_coordinates.append([])
#-
# Write the node coordinates
for node_id in node_ids_in_mesh:
if help == True:
mesh_file.write("%i %f %f %f\n"%(node_id, mesh.GetNodeXYZ(node_id)[0], mesh.GetNodeXYZ(node_id)[1], mesh.GetNodeXYZ(node_id)[2]))
for n in range(mesh_dimension):
node_coordinate = mesh.GetNodeXYZ(node_id)[n]
[node_float_coordinate, node_coordinate_power_of_ten] = powerOfTen(node_coordinate)
node_coordinate = "%.16fE%i"%(node_float_coordinate, node_coordinate_power_of_ten)
node_coordinates[n].append(node_coordinate)
figures = []
for n in range(mesh_dimension):
figures += node_coordinates[n]
WriteInColumns(amsh_file, figures, mesh_dimension, 18)
#-
# Get the group element definition
print "[i] Writing definition of group elements... (%s groups)"%(nb_groups)
if help == True:
mesh_file.write("GROUPS\n")
for group in groups:# For each group of the mesh
group_name = group.GetName()
element_ids_in_group = group.GetListOfID()
triangle_ids_in_group = []
quadrangle_ids_in_group = []
edges_ids_in_group = []
for element_id_in_group in element_ids_in_group:
nb_nodes_in_element = mesh.GetElemNbNodes(element_id_in_group)
if mesh_dimension == 3:
if nb_nodes_in_element == 3:
triangle_ids_in_group.append(element_id_in_group)
if nb_nodes_in_element == 4:
quadrangle_ids_in_group.append(element_id_in_group)
elif mesh_dimension == 2:
edges_ids_in_group.append(element_id_in_group)
nb_types_in_group = 0
types_in_groups = 0 #-1 = edges ; + 1 = triangles ; + 2 = quadrangles
nb_triangles_in_group = len(triangle_ids_in_group)
nb_quadrangles_in_group = len(quadrangle_ids_in_group)
nb_edges_in_group = len(edges_ids_in_group)
if nb_triangles_in_group > 0:
types_in_groups += 1
nb_types_in_group += 1
if nb_quadrangles_in_group > 0:
types_in_groups += 2
nb_types_in_group += 1
if nb_edges_in_group > 0:
types_in_groups -= 1
nb_types_in_group += 1
amsh_file.write(" boundary N 0 0 %i\n"%(nb_types_in_group + 1))
amsh_file.write(" boundary_name L 1 1 0\n")
amsh_file.write(" '%s'\n"%(group_name))
if help == True:
mesh_file.write("'%s'\n"%(group_name))
for n in range(nb_types_in_group):
amsh_file.write(" belem_group N 0 0 2\n")
amsh_file.write(" bound_elem_type L 1 1 0\n")
if types_in_groups == -1: # edges
if help == True:
mesh_file.write("EDGES\n")
element_ids_in_group = edges_ids_in_group
nb_elements_in_group = nb_edges_in_group
nb_nodes_in_elements = 2
elements_type = "bar2"
elif types_in_groups == 2: # quadrangles
if help == True:
mesh_file.write("QUAD\n")
element_ids_in_group = quadrangle_ids_in_group
nb_elements_in_group = nb_quadrangles_in_group
nb_nodes_in_elements = 4
elements_type = "quad4"
elif types_in_groups == 1 or types_in_groups == 3: # triangles
if help == True:
mesh_file.write("TRIA\n")
element_ids_in_group = triangle_ids_in_group
nb_elements_in_group = nb_triangles_in_group
nb_nodes_in_elements = 3
types_in_groups -= 1
elements_type = "tria3"
if help == True:
mesh_file.write("N ID NODE1 NODE2 ...\n")
N = 1
amsh_file.write(" '%s'\n"%(elements_type))
amsh_file.write(" bound_elem_nodes IF %i %i 0\n"%(nb_nodes_in_elements, nb_elements_in_group))
node_ids = []
for n in range(nb_nodes_in_elements):
node_ids.append([])
for element_id in element_ids_in_group:
if help == True:
mesh_file.write("%i %i "%(N, element_id))
N += 1
for n in range(nb_nodes_in_elements):
if help == True:
mesh_file.write("%i "%(mesh.GetElemNodes(element_id)[n]))
node_ids[n].append(mesh.GetElemNodes(element_id)[n])
if help == True:
mesh_file.write("\n")
figures = []
for n in range(nb_nodes_in_elements):
figures += node_ids[n]
WriteInColumns(amsh_file, figures, nb_nodes_in_elements, 30)
#-
# Write the domain element definitions
print "[i] Writing definition of domain elements... (%s elements)"%(nb_elements_in_domain)
if help == True:
mesh_file.write("DOMAIN CELLS\n")
triangle_ids_in_domain = []
quadrangle_ids_in_domain = []
tetrahedron_ids_in_domain = []
pyramid_ids_in_domain = []
prism_ids_in_domain = []
hexahedron_ids_in_domain = []
if mesh_dimension == 2:
element_ids_in_domain = face_ids_in_mesh
elif mesh_dimension == 3:
element_ids_in_domain = volume_ids_in_mesh
for element_id_in_domain in element_ids_in_domain:
nb_nodes_in_element = mesh.GetElemNbNodes(element_id_in_domain)
if mesh_dimension == 2:
if nb_nodes_in_element == 3:
triangle_ids_in_domain.append(element_id_in_domain)
if nb_nodes_in_element == 4:
quadrangle_ids_in_domain.append(element_id_in_domain)
elif mesh_dimension == 3:
if nb_nodes_in_element == 4:
tetrahedron_ids_in_domain.append(element_id_in_domain)
if nb_nodes_in_element == 5:
pyramid_ids_in_domain.append(element_id_in_domain)
if nb_nodes_in_element == 6:
prism_ids_in_domain.append(element_id_in_domain)
if nb_nodes_in_element == 8:
hexahedron_ids_in_domain.append(element_id_in_domain)
nb_types_in_domain = 0
types_in_domain = 0 #-2 = quadrangles ; - 1 = triangles ; + 1 = tetrahedrons ; + 2 = pyramids ; + 4 = prisms ; + 8 = hexahedrons
nb_triangles_in_domain = len(triangle_ids_in_domain)
nb_quandrangles_in_domain = len(quadrangle_ids_in_domain)
nb_tetrahedrons_in_domain = len(tetrahedron_ids_in_domain)
nb_pyramids_in_domain = len(pyramid_ids_in_domain)
nb_prisms_in_domain = len(prism_ids_in_domain)
nb_hexahedrons_in_domain = len(hexahedron_ids_in_domain)
if nb_triangles_in_domain > 0:
types_in_domain -= 1
nb_types_in_domain += 1
if nb_quandrangles_in_domain > 0:
types_in_domain -= 2
nb_types_in_domain += 1
if nb_tetrahedrons_in_domain > 0:
types_in_domain += 1
nb_types_in_domain += 1
if nb_pyramids_in_domain > 0:
types_in_domain += 2
nb_types_in_domain += 1
if nb_prisms_in_domain > 0:
types_in_domain += 4
nb_types_in_domain += 1
if nb_hexahedrons_in_domain > 0:
types_in_domain += 8
nb_types_in_domain += 1
types_for_quadrangles = [ - 3, - 2]
types_for_triangles = [ - 3, - 1]
types_for_tetrahedrons = [1, 3, 5, 7, 9, 11, 13, 15]
types_for_pyramids = [2, 3, 6, 7, 10, 11, 14, 15]
types_for_prisms = [4, 5, 6, 7, 12, 13, 14, 15]
types_for_hexahedrons = [8, 9, 10, 11, 12, 13, 14, 15]
for n in range(nb_types_in_domain):
amsh_file.write(" element_group N 0 0 2\n")
amsh_file.write(" element_type L 1 1 0\n")
if types_in_domain in types_for_quadrangles:
if help == True:
mesh_file.write("QUAD\n")
element_ids_in_domain = quadrangle_ids_in_domain
nb_elements_in_domain = nb_quandrangles_in_domain
nb_nodes_in_elements = 4
types_in_domain += 2
elements_type = "quad4"
elif types_in_domain in types_for_triangles:
if help == True:
mesh_file.write("TRIA\n")
element_ids_in_domain = triangle_ids_in_domain
nb_elements_in_domain = nb_triangles_in_domain
nb_nodes_in_elements = 3
types_in_domain += 1
elements_type = "tria3"
elif types_in_domain in types_for_hexahedrons:
if help == True:
mesh_file.write("HEXA\n")
element_ids_in_domain = hexahedron_ids_in_domain
nb_elements_in_domain = nb_hexahedrons_in_domain
nb_nodes_in_elements = 8
types_in_domain -= 8
elements_type = "hexa8"
elif types_in_domain in types_for_prisms:
if help == True:
mesh_file.write("PRISM\n")
element_ids_in_domain = prism_ids_in_domain
nb_elements_in_domain = nb_prisms_in_domain
nb_nodes_in_elements = 6
types_in_domain -= 4
elements_type = "penta6"
elif types_in_domain in types_for_pyramids:
if help == True:
mesh_file.write("PENTA\n")
element_ids_in_domain = pyramid_ids_in_domain
nb_elements_in_domain = nb_pyramids_in_domain
nb_nodes_in_elements = 5
types_in_domain -= 2
elements_type = "penta5"
elif types_in_domain in types_for_tetrahedrons:
if help == True:
mesh_file.write("TETRA\n")
element_ids_in_domain = tetrahedron_ids_in_domain
nb_elements_in_domain = nb_tetrahedrons_in_domain
nb_nodes_in_elements = 4
types_in_domain -= 1
elements_type = "tetra4"
if help == True:
mesh_file.write("N ID NODE1 NODE2 ...\n")
N = 1
amsh_file.write(" '%s'\n"%(elements_type))
amsh_file.write(" element_nodes IF %i %i 0\n"%(nb_nodes_in_elements, nb_elements_in_domain))
node_ids = []
for n in range(nb_nodes_in_elements):
node_ids.append([])
for element_id in element_ids_in_domain:
if help == True:
mesh_file.write("%i %i "%(N, element_id))
N += 1
for n in range(nb_nodes_in_elements):
if help == True:
mesh_file.write("%i "%(mesh.GetElemNodes(element_id)[n]))
node_ids[n].append(mesh.GetElemNodes(element_id)[n])
if help == True:
mesh_file.write("\n")
figures = []
for n in range(nb_nodes_in_elements):
figures += node_ids[n]
if mesh_dimension == 3:
# reorder node IDs
reordered_figures = []
split_figures = []
reordered_split_figures = []
for n in range(nb_nodes_in_elements):
split_figures.append([])
reordered_split_figures.append([])
f = 0
n = 0
for figure in figures:
split_figures[n].append(figure)
f += 1
if f == nb_elements_in_domain:
n += 1
f = 0
if elements_type == "hexa8" or elements_type == "penta6":
for n in range(nb_nodes_in_elements / 2):
reordered_split_figures[n] = split_figures[nb_nodes_in_elements / 2 + n]
reordered_split_figures[nb_nodes_in_elements / 2 + n] = split_figures[n]
for n in range(nb_nodes_in_elements):
reordered_figures += reordered_split_figures[n]
figures = reordered_figures
elif elements_type == "tetra4" or elements_type == "penta5":
for n in range(nb_nodes_in_elements - 1):
reordered_figures += split_figures[nb_nodes_in_elements - 2 - n]
figures = reordered_figures + split_figures[nb_nodes_in_elements - 1]
WriteInColumns(amsh_file, figures, nb_nodes_in_elements, 24)
#-
# Close the files
amsh_file.close()
if help == True:
mesh_file.close()
#-
eaf = ExportAmshFile
def ExportSU2File( mesh = None, file = None, only = [None], ignore = [None]):
"""
Description:
Exports a mesh into an .su2 file readable by the CFD solver SU2 4.0.
Arguments:
# mesh
Description: The mesh to export.
Type: Mesh
GUI selection: yes
Selection by name: yes
Recursive: -
Default value: None
# file
Description: The name without extension of the amsh file to write. If equals None, the name of the mesh in the study tree is taken.
Type: String
GUI selection: -
Selection by name: -
Recursive: -
Default value: None
# only
Description: The list of names of groups to export, excluding the others.
Type: List of Strings
GUI selection: -
Selection by name: -
Recursive: -
Default value: [None]
# ignore
Description: The list of names of groups to ignore.
Type: List of Strings
GUI selection: -
Selection by name: -
Recursive: -
Default value: [None]
Returned Values:
"dim" value: -
"single" value: -
Type: -
Number: -
Name: -
Conditions of use:
The mesh has to be computed and to contain groups describing the desired boundary conditions (inlet, outlet, wall, farfield, etc.).
"""
# Get the input shape(s)
mesh = GetGUISelection(mesh, uniq = True)
mesh = GetObject(mesh, "SMESH")
#-
# Check the input shape existence
if "error" in [mesh] or None in [mesh]: return
#-
else:# All checks done
def FindElementType(mesh_dimension, nb_nodes_in_element, boundary = False):
if boundary == True: mesh_dimension -= 1
line_type = 3
triangle_type = 5
quadrilateral_type = 9
tetrahedral_type = 10
hexahedral_type = 12
wedge_type = 13
pyramid_type = 14
element_type = None
if mesh_dimension == 1:
if nb_nodes_in_element == 2:
element_type = line_type
if mesh_dimension == 2:
if nb_nodes_in_element == 3:
element_type = triangle_type
if nb_nodes_in_element == 4:
element_type = quadrilateral_type
elif mesh_dimension == 3:
if nb_nodes_in_element == 4:
element_type = tetrahedral_type
if nb_nodes_in_element == 5:
element_type = pyramid_type
if nb_nodes_in_element == 6:
element_type = wedge_type
if nb_nodes_in_element == 8:
element_type = hexahedral_type
return element_type
def powerOfTen(figure):
figure *= 1.0
n = 0
if figure != 0:
if abs(figure) < 1:
while abs(figure) < 1:
figure *= 10
n -= 1
if abs(figure) >= 10:
while abs(figure) >= 10:
figure | |
<filename>tests/sim/test_anneal.py
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains tests for the functions in the ``qubovert.sim._anneal`` file.
"""
from qubovert.sim import (
anneal_qubo, anneal_quso, anneal_pubo, anneal_puso,
AnnealResults, SCHEDULES
)
from qubovert.utils import (
puso_to_pubo, quso_to_qubo, QUBOVertWarning,
QUBOMatrix, QUSOMatrix, PUBOMatrix, PUSOMatrix
)
from qubovert import QUBO, QUSO, PUBO, PUSO, PCBO, PCSO
from numpy.testing import assert_raises, assert_warns
import numpy as np
def test_anneal_puso():
_anneal_puso(dict)
_anneal_puso(PUSOMatrix)
_anneal_puso(PUSO)
_anneal_puso(PCSO)
def _anneal_puso(type_):
H = type_({(i, i+1, i+2): -1 for i in range(3)})
with assert_raises(ValueError):
anneal_puso(H, anneal_duration=-1)
with assert_raises(ValueError):
anneal_puso(H, anneal_duration=-2)
with assert_warns(QUBOVertWarning):
anneal_puso(H, temperature_range=(1, 2), schedule=[3, 2])
with assert_warns(QUBOVertWarning):
# a quadratic model warns that you shouldn't use anneal_puso
anneal_puso({(0, 1): 1})
with assert_raises(ValueError):
anneal_puso(H, temperature_range=(1, 2))
with assert_raises(ValueError):
anneal_puso(H, schedule='something')
empty_result = AnnealResults()
for _ in range(4):
empty_result.add_state({}, 2, True)
# less than quadratic model so will warn
with assert_warns(QUBOVertWarning):
assert anneal_puso({(): 2}, num_anneals=4) == empty_result
assert anneal_puso(H, num_anneals=0) == AnnealResults()
assert anneal_puso(H, num_anneals=-1) == AnnealResults()
# just make sure everything runs
anneal_puso(H, schedule='linear')
res = anneal_puso(H, initial_state=[1] * 5)
for x in res:
assert all(i in (1, -1) for i in x.state.values())
# check to see if we find the groundstate of a simple but largeish model.
H = type_({(i, i+1): -1 for i in range(30)})
# quadratic model so will warn
with assert_warns(QUBOVertWarning):
res = anneal_puso(H, num_anneals=4, seed=0)
assert res.best.state in (
dict(enumerate([1]*31)), dict(enumerate([-1]*31))
)
assert res.best.value == -30
assert len([x for x in res]) == 4
# check to see if we find the groundstate of same but out of order
# quadratic so will warn
with assert_warns(QUBOVertWarning):
res = anneal_puso(H, num_anneals=4, in_order=False, seed=0)
assert res.best.state in (
dict(enumerate([1]*31)), dict(enumerate([-1]*31))
)
assert res.best.value == -30
assert len([x for x in res]) == 4
# make sure we run branch where an explicit schedule is given and no
# temperature range is supplied
# quadratic so will warn
with assert_warns(QUBOVertWarning):
anneal_puso(H, schedule=[3, 2])
# make sure it works with fields
res = anneal_puso(
type_({(0, 1, 2): 1, (1,): -1, (): 2}),
num_anneals=10
)
assert len(res) == 10
res.sort()
for i in range(9):
assert res[i].value <= res[i + 1].value
# bigish ordering
res = anneal_puso(
type_(
{(i, j, j + 1): 1 for i in range(70) for j in range(i+1, 70)}
),
num_anneals=20
)
assert len(res) == 20
res.sort()
for i in range(19):
assert res[i].value <= res[i + 1].value
def test_anneal_quso():
_anneal_quso(dict)
_anneal_quso(QUSOMatrix)
_anneal_quso(PUSOMatrix)
_anneal_quso(QUSO)
_anneal_quso(PUSO)
_anneal_quso(PCSO)
def _anneal_quso(type_):
L = type_({(i, i+1): -1 for i in range(3)})
with assert_raises(ValueError):
anneal_quso(L, anneal_duration=-1)
with assert_raises(ValueError):
anneal_quso(L, anneal_duration=-2)
with assert_warns(QUBOVertWarning):
anneal_quso(L, temperature_range=(1, 2), schedule=[3, 15])
with assert_raises(ValueError):
anneal_quso(L, temperature_range=(1, 2))
with assert_raises(ValueError):
anneal_quso(L, schedule='something')
empty_result = AnnealResults()
for _ in range(4):
empty_result.add_state({}, 2, True)
assert anneal_quso({(): 2}, num_anneals=4) == empty_result
assert anneal_quso(L, num_anneals=0) == AnnealResults()
assert anneal_quso(L, num_anneals=-1) == AnnealResults()
# just make sure everything runs
anneal_quso(L, schedule='linear')
res = anneal_quso(L, initial_state=[1] * 5)
for x in res:
assert all(i in (1, -1) for i in x.state.values())
# check to see if we find the groundstate of a simple but largeish model.
L = type_({(i, i+1): -1 for i in range(30)})
res = anneal_quso(L, num_anneals=4, seed=0)
assert res.best.state in (
dict(enumerate([1]*31)), dict(enumerate([-1]*31))
)
assert res.best.value == -30
assert len([x for x in res]) == 4
# check to see if we find the groundstate of a sane but out of order
res = anneal_quso(L, num_anneals=4, in_order=False, seed=0)
assert res.best.state in (
dict(enumerate([1]*31)), dict(enumerate([-1]*31))
)
assert res.best.value == -30
assert len([x for x in res]) == 4
# make sure we run branch where an explicit schedule is given and no
# temperature range is supplied
anneal_quso(L, schedule=[3] * 10 + [2] * 15)
# make sure it works with fields
res = anneal_quso(type_({(0, 1): 1, (1,): -1, (): 2}), num_anneals=10)
assert len(res) == 10
res.sort()
for i in range(9):
assert res[i].value <= res[i + 1].value
# big ordering
res = anneal_quso(
type_({(i, j): 1 for i in range(70) for j in range(i+1, 70)}),
num_anneals=20
)
assert len(res) == 20
res.sort()
for i in range(19):
assert res[i].value <= res[i + 1].value
def test_anneal_pubo():
_anneal_pubo(dict)
_anneal_pubo(PUBOMatrix)
_anneal_pubo(PUBO)
_anneal_pubo(PCBO)
def _anneal_pubo(type_):
P = type_(puso_to_pubo({(i, i+1, i+2): -1 for i in range(3)}))
with assert_raises(ValueError):
anneal_pubo(P, anneal_duration=-1)
with assert_raises(ValueError):
anneal_pubo(P, anneal_duration=-2)
with assert_warns(QUBOVertWarning):
anneal_pubo(P, temperature_range=(1, 2), schedule=[3, 2])
with assert_warns(QUBOVertWarning):
# a quadratic model warns that you shouldn't use anneal_pubo
anneal_pubo({(0, 1): 1})
with assert_raises(ValueError):
anneal_pubo(P, temperature_range=(1, 2))
with assert_raises(ValueError):
anneal_pubo(P, schedule='something')
empty_result = AnnealResults()
for _ in range(4):
empty_result.add_state({}, 2, False)
# less than quadratic so will warn
with assert_warns(QUBOVertWarning):
assert anneal_pubo({(): 2}, num_anneals=4) == empty_result
assert anneal_pubo(P, num_anneals=0) == AnnealResults()
assert anneal_pubo(P, num_anneals=-1) == AnnealResults()
# just make sure everything runs
anneal_pubo(P, schedule='linear')
res = anneal_pubo(P, initial_state=[1] * 5)
for x in res:
assert all(i in (0, 1) for i in x.state.values())
# check to see if we find the groundstate of a simple but largeish model.
P = type_(puso_to_pubo({(i, i+1): -1 for i in range(30)}))
# quadratic so will warn
with assert_warns(QUBOVertWarning):
res = anneal_pubo(P, num_anneals=4, seed=0)
assert res.best.state in (
dict(enumerate([0]*31)), dict(enumerate([1]*31))
)
assert res.best.value == -30
assert len([x for x in res]) == 4
# check to see if we find the groundstate of same but out of order
# quadratic so will warn
with assert_warns(QUBOVertWarning):
res = anneal_pubo(P, num_anneals=4, in_order=False, seed=0)
assert res.best.state in (
dict(enumerate([0]*31)), dict(enumerate([1]*31))
)
assert res.best.value == -30
assert len([x for x in res]) == 4
# make sure we run branch where an explicit schedule is given and no
# temperature range is supplied
# quadratic so will warn
with assert_warns(QUBOVertWarning):
anneal_pubo(P, schedule=[3] * 10 + [2] * 15)
# make sure it works with fields
res = anneal_pubo(type_({(0, 1, 2): 1, (1,): -1, (): 2}), num_anneals=10)
assert len(res) == 10
res.sort()
for i in range(9):
assert res[i].value <= res[i + 1].value
# bigish ordering
res = anneal_pubo(
type_(
{(i, j, j + 1): 1 for i in range(70) for j in range(i+1, 70)}
),
num_anneals=20
)
assert len(res) == 20
res.sort()
for i in range(19):
assert res[i].value <= res[i + 1].value
def test_anneal_qubo():
_anneal_qubo(dict)
_anneal_qubo(QUBOMatrix)
_anneal_qubo(PUBOMatrix)
_anneal_qubo(QUBO)
_anneal_qubo(PUBO)
_anneal_qubo(PCBO)
def _anneal_qubo(type_):
Q = type_(quso_to_qubo({(i, i+1): -1 for i in range(3)}))
with assert_raises(ValueError):
anneal_qubo(Q, anneal_duration=-1)
with assert_raises(ValueError):
anneal_qubo(Q, anneal_duration=-2)
with assert_warns(QUBOVertWarning):
anneal_qubo(Q, temperature_range=(1, 2), schedule=[3, 2])
with assert_raises(ValueError):
anneal_qubo(Q, temperature_range=(1, 2))
with assert_raises(ValueError):
anneal_qubo(Q, schedule='something')
empty_result = AnnealResults()
for _ in range(4):
empty_result.add_state({}, 2, False)
assert anneal_qubo({(): 2}, num_anneals=4) == empty_result
assert anneal_qubo(Q, num_anneals=0) == AnnealResults()
assert anneal_qubo(Q, num_anneals=-1) == AnnealResults()
# just make sure everything runs
anneal_qubo(Q, schedule='linear')
res = anneal_qubo(Q, initial_state=[1] * 5)
for x in res:
assert all(i in (0, 1) for i in x.state.values())
# check to see if we find the groundstate of a simple but largeish model.
Q = type_(quso_to_qubo({(i, i+1): -1 for i in range(30)}))
res = anneal_qubo(Q, num_anneals=4, seed=0)
assert res.best.state in (
dict(enumerate([0]*31)), dict(enumerate([1]*31))
)
assert res.best.value == -30
assert len([x for x in res]) == 4
# check to see if we find the groundstate of the same but out of order
res = anneal_qubo(Q, num_anneals=4, in_order=False, seed=0)
assert res.best.state in (
dict(enumerate([0]*31)), dict(enumerate([1]*31))
)
assert res.best.value == -30
assert len([x for x in res]) == 4
# make sure we run branch where an explicit schedule is given and no
# temperature range is supplied
anneal_qubo(Q, schedule=[3] * 10 + [2] * 15)
# make sure it works with fields
res = anneal_qubo(type_({(0, 1): 1, (1,): -1, (): 2}), num_anneals=10)
assert | |
2000-07-23
<Field Y-26:sex> M
>>> show_elements (f_p, "ui_display")
<Entity X-26> <NAME>
<Field X-26:last_name> Tanzer
<Field X-26:first_name> Christian
<Field X-26:middle_name>
<Field X-26:title>
<Field_Composite X-26:lifetime> 1959-09-26
<Field X-26:lifetime.start> 1959-09-26
<Field X-26:lifetime.finish>
<Field X-26:sex>
>>> show_elements (f_p, "essence")
<Entity X-26> ('tanzer', 'christian', '', '')
<Field X-26:last_name> ('tanzer', 'christian', '', '')
<Field X-26:first_name> ('tanzer', 'christian', '', '')
<Field X-26:middle_name> ('tanzer', 'christian', '', '')
<Field X-26:title> ('tanzer', 'christian', '', '')
<Field_Composite X-26:lifetime> ('1959-09-26', )
<Field X-26:lifetime.start> ('1959-09-26', )
<Field X-26:lifetime.finish> ('1959-09-26', )
<Field X-26:sex> ('tanzer', 'christian', '', '')
>>> show_elements (f_p, "Entity.essence")
<Entity X-26> ('tanzer', 'christian', '', '')
<Field X-26:last_name> ('tanzer', 'christian', '', '')
<Field X-26:first_name> ('tanzer', 'christian', '', '')
<Field X-26:middle_name> ('tanzer', 'christian', '', '')
<Field X-26:title> ('tanzer', 'christian', '', '')
<Field_Composite X-26:lifetime> ('tanzer', 'christian', '', '')
<Field X-26:lifetime.start> ('tanzer', 'christian', '', '')
<Field X-26:lifetime.finish> ('tanzer', 'christian', '', '')
<Field X-26:sex> ('tanzer', 'christian', '', '')
>>> show_elements (f_Person, "root")
<Entity X-26> <Entity X-26>
<Field X-26:last_name> <Entity X-26>
<Field X-26:first_name> <Entity X-26>
<Field X-26:middle_name> <Entity X-26>
<Field X-26:title> <Entity X-26>
<Field_Composite X-26:lifetime> <Entity X-26>
<Field X-26:lifetime.start> <Entity X-26>
<Field X-26:lifetime.finish> <Entity X-26>
<Field X-26:sex> <Entity X-26>
>>> show_elements (f_Person, "Entity")
<Entity X-26> <Entity X-26>
<Field X-26:last_name> <Entity X-26>
<Field X-26:first_name> <Entity X-26>
<Field X-26:middle_name> <Entity X-26>
<Field X-26:title> <Entity X-26>
<Field_Composite X-26:lifetime> <Entity X-26>
<Field X-26:lifetime.start> <Entity X-26>
<Field X-26:lifetime.finish> <Entity X-26>
<Field X-26:sex> <Entity X-26>
>>> show_elements (f_Person_z, "Entity")
<Entity Z-26> <Entity Z-26>
<Field Z-26:last_name> <Entity Z-26>
<Field Z-26:first_name> <Entity Z-26>
<Field Z-26:middle_name> <Entity Z-26>
<Field Z-26:title> <Entity Z-26>
<Field_Composite Z-26:lifetime> <Entity Z-26>
<Field Z-26:lifetime.start> <Entity Z-26>
<Field Z-26:lifetime.finish> <Entity Z-26>
<Field Z-26:sex> <Entity Z-26>
<Field_Rev_Ref Z-26:phones> <Entity Z-26>
>>> show_elements (f_Person, "template_macro")
<Entity X-26> Entity_Form
<Field X-26:last_name> Field
<Field X-26:first_name> Field
<Field X-26:middle_name> Field
<Field X-26:title> Field
<Field_Composite X-26:lifetime> Field_Composite
<Field X-26:lifetime.start> Field
<Field X-26:lifetime.finish> Field
<Field X-26:sex> Field
>>> show_elements (f_Person, "template_module")
<Entity X-26> mf3
<Field X-26:last_name> None
<Field X-26:first_name> None
<Field X-26:middle_name> None
<Field X-26:title> None
<Field_Composite X-26:lifetime> mf3_h_cols
<Field X-26:lifetime.start> None
<Field X-26:lifetime.finish> None
<Field X-26:sex> None
>>> show_elements (f_Person_z ["phones"].proto, "parent")
<class Entity_Rev_Ref Z-26:phones> <class Field_Rev_Ref Z-26:phones>
<class Field_Entity Z-26:phones::right> <class Entity_Rev_Ref Z-26:phones>
<class Field Z-26:phones::right.cc> <class Field_Entity Z-26:phones::right>
<class Field Z-26:phones::right.ndc> <class Field_Entity Z-26:phones::right>
<class Field Z-26:phones::right.sn> <class Field_Entity Z-26:phones::right>
<class Field Z-26:phones::extension> <class Entity_Rev_Ref Z-26:phones>
<class Field Z-26:phones::desc> <class Entity_Rev_Ref Z-26:phones>
<class Field_Ref_Hidden Z-26:phones::left> <class Entity_Rev_Ref Z-26:phones>
<class Field Z-26:phones::left.last_name> <class Field_Ref_Hidden Z-26:phones::left>
<class Field Z-26:phones::left.first_name> <class Field_Ref_Hidden Z-26:phones::left>
<class Field Z-26:phones::left.middle_name> <class Field_Ref_Hidden Z-26:phones::left>
<class Field Z-26:phones::left.title> <class Field_Ref_Hidden Z-26:phones::left>
>>> show_elements (f_p_z, "Entity")
<Entity Z-26> <Entity Z-26>
<Field Z-26:last_name> <Entity Z-26>
<Field Z-26:first_name> <Entity Z-26>
<Field Z-26:middle_name> <Entity Z-26>
<Field Z-26:title> <Entity Z-26>
<Field_Composite Z-26:lifetime> <Entity Z-26>
<Field Z-26:lifetime.start> <Entity Z-26>
<Field Z-26:lifetime.finish> <Entity Z-26>
<Field Z-26:sex> <Entity Z-26>
<Field_Rev_Ref Z-26:phones> <Entity Z-26>
<Entity_Rev_Ref Z-26:phones@3> <Entity_Rev_Ref Z-26:phones@3>
<Field_Entity Z-26:phones::right@3> <Entity_Rev_Ref Z-26:phones@3>
<Field Z-26:phones::right.cc@3> <Field_Entity Z-26:phones::right@3>
<Field Z-26:phones::right.ndc@3> <Field_Entity Z-26:phones::right@3>
<Field Z-26:phones::right.sn@3> <Field_Entity Z-26:phones::right@3>
<Field Z-26:phones::extension@3> <Entity_Rev_Ref Z-26:phones@3>
<Field Z-26:phones::desc@3> <Entity_Rev_Ref Z-26:phones@3>
<Field_Ref_Hidden Z-26:phones::left@3> <Entity_Rev_Ref Z-26:phones@3>
>>> show_elements (f_p_z, "essence")
<Entity Z-26> ('tanzer', 'christian', '', '')
<Field Z-26:last_name> ('tanzer', 'christian', '', '')
<Field Z-26:first_name> ('tanzer', 'christian', '', '')
<Field Z-26:middle_name> ('tanzer', 'christian', '', '')
<Field Z-26:title> ('tanzer', 'christian', '', '')
<Field_Composite Z-26:lifetime> ('1959-09-26', )
<Field Z-26:lifetime.start> ('1959-09-26', )
<Field Z-26:lifetime.finish> ('1959-09-26', )
<Field Z-26:sex> ('tanzer', 'christian', '', '')
<Field_Rev_Ref Z-26:phones> ('tanzer', 'christian', '', '')
<Entity_Rev_Ref Z-26:phones@3> (('tanzer', 'christian', '', ''), ('43', '1', '98765432'), '42')
<Field_Entity Z-26:phones::right@3> ('43', '1', '98765432')
<Field Z-26:phones::right.cc@3> ('43', '1', '98765432')
<Field Z-26:phones::right.ndc@3> ('43', '1', '98765432')
<Field Z-26:phones::right.sn@3> ('43', '1', '98765432')
<Field Z-26:phones::extension@3> (('tanzer', 'christian', '', ''), ('43', '1', '98765432'), '42')
<Field Z-26:phones::desc@3> (('tanzer', 'christian', '', ''), ('43', '1', '98765432'), '42')
<Field_Ref_Hidden Z-26:phones::left@3> ('tanzer', 'christian', '', '')
>>> show_elements (f_p_z, "label")
<Entity Z-26> Person
<Field Z-26:last_name> Last name
<Field Z-26:first_name> First name
<Field Z-26:middle_name> Middle name
<Field Z-26:title> Academic title
<Field_Composite Z-26:lifetime> Lifetime
<Field Z-26:lifetime.start> Start
<Field Z-26:lifetime.finish> Finish
<Field Z-26:sex> Sex
<Field_Rev_Ref Z-26:phones> Phones
<Entity_Rev_Ref Z-26:phones@3> Person has Phone
<Field_Entity Z-26:phones::right@3> Phone
<Field Z-26:phones::right.cc@3> Country code
<Field Z-26:phones::right.ndc@3> Network destination code
<Field Z-26:phones::right.sn@3> Subscriber number
<Field Z-26:phones::extension@3> Extension
<Field Z-26:phones::desc@3> Description
<Field_Ref_Hidden Z-26:phones::left@3> Person
>>> show_elements (f_p_z, "_po_index")
<Entity Z-26> None
<Field Z-26:last_name> None
<Field Z-26:first_name> None
<Field Z-26:middle_name> None
<Field Z-26:title> None
<Field_Composite Z-26:lifetime> None
<Field Z-26:lifetime.start> None
<Field Z-26:lifetime.finish> None
<Field Z-26:sex> None
<Field_Rev_Ref Z-26:phones> None
<Entity_Rev_Ref Z-26:phones@3> None
<Field_Entity Z-26:phones::right@3> None
<Field Z-26:phones::right.cc@3> None
<Field Z-26:phones::right.ndc@3> None
<Field Z-26:phones::right.sn@3> None
<Field Z-26:phones::extension@3> None
<Field Z-26:phones::desc@3> None
<Field_Ref_Hidden Z-26:phones::left@3> None
>>> show_elements (f_p_z, "po_index")
<Entity Z-26> 0
<Field Z-26:last_name> 1
<Field Z-26:first_name> 2
<Field Z-26:middle_name> 3
<Field Z-26:title> 4
<Field_Composite Z-26:lifetime> 5
<Field Z-26:lifetime.start> 6
<Field Z-26:lifetime.finish> 7
<Field Z-26:sex> 8
<Field_Rev_Ref Z-26:phones> 9
<Entity_Rev_Ref Z-26:phones@3> 10
<Field_Entity Z-26:phones::right@3> 11
<Field Z-26:phones::right.cc@3> 12
<Field Z-26:phones::right.ndc@3> 13
<Field Z-26:phones::right.sn@3> 14
<Field Z-26:phones::extension@3> 15
<Field Z-26:phones::desc@3> 16
<Field_Ref_Hidden Z-26:phones::left@3> 17
>>> show_elements (f_p_z, "_po_index")
<Entity Z-26> 0
<Field Z-26:last_name> 1
<Field Z-26:first_name> 2
<Field Z-26:middle_name> 3
<Field Z-26:title> 4
<Field_Composite Z-26:lifetime> 5
<Field Z-26:lifetime.start> 6
<Field Z-26:lifetime.finish> 7
<Field Z-26:sex> 8
<Field_Rev_Ref Z-26:phones> 9
<Entity_Rev_Ref Z-26:phones@3> 10
<Field_Entity Z-26:phones::right@3> 11
<Field Z-26:phones::right.cc@3> 12
<Field Z-26:phones::right.ndc@3> 13
<Field Z-26:phones::right.sn@3> 14
<Field Z-26:phones::extension@3> 15
<Field Z-26:phones::desc@3> 16
<Field_Ref_Hidden Z-26:phones::left@3> 17
>>> f_p_z.reset_once_properties ()
>>> print (f_p_z, f_p_z._po_index)
<Entity Z-26> None
>>> f_p_z ["Z-26:phones"]
<Field_Rev_Ref Z-26:phones>
>>> f_p_z ["Z-26:phones@3"]
<Entity_Rev_Ref Z-26:phones@3>
>>> show_elements (f_p_z, "id")
<Entity Z-26> Z-26
<Field Z-26:last_name> Z-26:last_name
<Field Z-26:first_name> Z-26:first_name
<Field Z-26:middle_name> Z-26:middle_name
<Field Z-26:title> Z-26:title
<Field_Composite Z-26:lifetime> Z-26:lifetime
<Field Z-26:lifetime.start> Z-26:lifetime.start
<Field Z-26:lifetime.finish> Z-26:lifetime.finish
<Field Z-26:sex> Z-26:sex
<Field_Rev_Ref Z-26:phones> Z-26:phones
<Entity_Rev_Ref Z-26:phones@3> Z-26:phones@3
<Field_Entity Z-26:phones::right@3> Z-26:phones::right@3
<Field Z-26:phones::right.cc@3> Z-26:phones::right.cc@3
<Field Z-26:phones::right.ndc@3> Z-26:phones::right.ndc@3
<Field Z-26:phones::right.sn@3> Z-26:phones::right.sn@3
<Field Z-26:phones::extension@3> Z-26:phones::extension@3
<Field Z-26:phones::desc@3> Z-26:phones::desc@3
<Field_Ref_Hidden Z-26:phones::left@3> Z-26:phones::left@3
>>> show_elements (f_p_z, "index")
<Entity Z-26>
<Field Z-26:last_name>
<Field Z-26:first_name>
<Field Z-26:middle_name>
<Field Z-26:title>
<Field_Composite Z-26:lifetime>
<Field Z-26:lifetime.start>
<Field Z-26:lifetime.finish>
<Field Z-26:sex>
<Field_Rev_Ref Z-26:phones>
<Entity_Rev_Ref Z-26:phones@3> @3
<Field_Entity Z-26:phones::right@3> @3
<Field Z-26:phones::right.cc@3> @3
<Field Z-26:phones::right.ndc@3> @3
<Field Z-26:phones::right.sn@3> @3
<Field Z-26:phones::extension@3> @3
<Field Z-26:phones::desc@3> @3
<Field_Ref_Hidden Z-26:phones::left@3> @3
>>> show_elements (f_p_z, "parent")
<Entity Z-26> None
<Field Z-26:last_name> <Entity Z-26>
<Field Z-26:first_name> <Entity Z-26>
<Field Z-26:middle_name> <Entity Z-26>
<Field Z-26:title> <Entity Z-26>
<Field_Composite Z-26:lifetime> <Entity Z-26>
<Field Z-26:lifetime.start> <Field_Composite Z-26:lifetime>
<Field Z-26:lifetime.finish> <Field_Composite Z-26:lifetime>
<Field Z-26:sex> <Entity Z-26>
<Field_Rev_Ref Z-26:phones> <Entity Z-26>
<Entity_Rev_Ref Z-26:phones@3> <Field_Rev_Ref Z-26:phones>
<Field_Entity Z-26:phones::right@3> <Entity_Rev_Ref Z-26:phones@3>
<Field Z-26:phones::right.cc@3> <Field_Entity Z-26:phones::right@3>
<Field Z-26:phones::right.ndc@3> <Field_Entity Z-26:phones::right@3>
<Field Z-26:phones::right.sn@3> <Field_Entity Z-26:phones::right@3>
<Field Z-26:phones::extension@3> <Entity_Rev_Ref Z-26:phones@3>
<Field Z-26:phones::desc@3> <Entity_Rev_Ref Z-26:phones@3>
<Field_Ref_Hidden Z-26:phones::left@3> <Entity_Rev_Ref Z-26:phones@3>
>>> show_elements (f_p_z, "q_name")
<Entity Z-26> None
<Field Z-26:last_name> last_name
<Field Z-26:first_name> first_name
<Field Z-26:middle_name> middle_name
<Field Z-26:title> title
<Field_Composite Z-26:lifetime> lifetime
<Field Z-26:lifetime.start> lifetime.start
<Field Z-26:lifetime.finish> lifetime.finish
<Field Z-26:sex> sex
<Field_Rev_Ref Z-26:phones> phones
<Entity_Rev_Ref Z-26:phones@3> phones
<Field_Entity Z-26:phones::right@3> phones.right
<Field Z-26:phones::right.cc@3> phones.right.cc
<Field Z-26:phones::right.ndc@3> phones.right.ndc
<Field Z-26:phones::right.sn@3> phones.right.sn
<Field Z-26:phones::extension@3> phones.extension
<Field Z-26:phones::desc@3> phones.desc
<Field_Ref_Hidden Z-26:phones::left@3> phones.left
>>> show_elements (f_p_z, "r_name")
<Entity Z-26> ---
<Field Z-26:last_name> last_name
<Field Z-26:first_name> first_name
<Field Z-26:middle_name> middle_name
<Field Z-26:title> title
<Field_Composite Z-26:lifetime> lifetime
<Field Z-26:lifetime.start> lifetime.start
<Field Z-26:lifetime.finish> lifetime.finish
<Field Z-26:sex> sex
<Field_Rev_Ref Z-26:phones> phones
<Entity_Rev_Ref Z-26:phones@3> ---
<Field_Entity Z-26:phones::right@3> right
<Field Z-26:phones::right.cc@3> cc
<Field Z-26:phones::right.ndc@3> ndc
<Field Z-26:phones::right.sn@3> sn
<Field Z-26:phones::extension@3> extension
<Field Z-26:phones::desc@3> desc
<Field_Ref_Hidden Z-26:phones::left@3> left
>>> for e in f_p.entity_elements :
... print (e, portable_repr (sorted (getattr (e, "_Element_Map", []))))
<Entity X-26> ['X-26:first_name', 'X-26:last_name', 'X-26:lifetime', 'X-26:lifetime.finish', 'X-26:lifetime.start', 'X-26:middle_name', 'X-26:sex', 'X-26:title', 'first_name', 'last_name', 'lifetime', 'lifetime.finish', 'lifetime.start', 'middle_name', 'sex', 'title']
>>> for e in f_p_z.entity_elements :
... print (e, portable_repr (sorted (getattr (e, "_Element_Map", []))))
<Entity Z-26> ['Z-26:first_name', 'Z-26:last_name', 'Z-26:lifetime', 'Z-26:lifetime.finish', 'Z-26:lifetime.start', 'Z-26:middle_name', 'Z-26:phones', 'Z-26:phones::desc@3', 'Z-26:phones::extension@3', 'Z-26:phones::left.first_name@3', 'Z-26:phones::left.last_name@3', 'Z-26:phones::left.middle_name@3', 'Z-26:phones::left.title@3', 'Z-26:phones::left@3', 'Z-26:phones::right.cc@3', 'Z-26:phones::right.ndc@3', 'Z-26:phones::right.sn@3', 'Z-26:phones::right@3', 'Z-26:phones@3', 'Z-26:sex', 'Z-26:title', 'first_name', 'last_name', 'lifetime', 'lifetime.finish', 'lifetime.start', 'middle_name', 'phones', 'phones.desc', 'phones.extension', 'phones.left', 'phones.left.first_name', 'phones.left.last_name', 'phones.left.middle_name', 'phones.left.title', 'phones.right', 'phones.right.cc', 'phones.right.ndc', 'phones.right.sn', 'sex', 'title']
<Entity_Rev_Ref Z-26:phones@3> ['Z-26:phones::desc@3', 'Z-26:phones::extension@3', 'Z-26:phones::left.first_name@3', 'Z-26:phones::left.last_name@3', 'Z-26:phones::left.middle_name@3', 'Z-26:phones::left.title@3', 'Z-26:phones::left@3', 'Z-26:phones::right.cc@3', 'Z-26:phones::right.ndc@3', 'Z-26:phones::right.sn@3', 'Z-26:phones::right@3', 'desc', 'extension', 'left', 'left.first_name', 'left.last_name', 'left.middle_name', 'left.title', 'phones.desc', 'phones.extension', 'phones.left', 'phones.left.first_name', 'phones.left.last_name', 'phones.left.middle_name', 'phones.left.title', 'phones.right', 'phones.right.cc', 'phones.right.ndc', 'phones.right.sn', 'right', 'right.cc', 'right.ndc', 'right.sn']
<Field_Entity | |
<reponame>AmirS2/sagemaker-python-sdk
# Copyright 2017-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Placeholder docstring"""
from __future__ import absolute_import
import json
import logging
import tempfile
from six.moves.urllib.parse import urlparse
from sagemaker.amazon import validation
from sagemaker.amazon.hyperparameter import Hyperparameter as hp # noqa
from sagemaker.amazon.common import write_numpy_to_dense_tensor
from sagemaker.estimator import EstimatorBase, _TrainingJob
from sagemaker.inputs import FileSystemInput
from sagemaker.model import NEO_IMAGE_ACCOUNT
from sagemaker.session import s3_input
from sagemaker.utils import sagemaker_timestamp, get_ecr_image_uri_prefix
from sagemaker.xgboost.estimator import get_xgboost_image_uri
from sagemaker.xgboost.defaults import XGBOOST_LATEST_VERSION
logger = logging.getLogger(__name__)
class AmazonAlgorithmEstimatorBase(EstimatorBase):
"""Base class for Amazon first-party Estimator implementations. This class
isn't intended to be instantiated directly.
"""
feature_dim = hp("feature_dim", validation.gt(0), data_type=int)
mini_batch_size = hp("mini_batch_size", validation.gt(0), data_type=int)
repo_name = None
repo_version = None
def __init__(
self, role, train_instance_count, train_instance_type, data_location=None, **kwargs
):
"""Initialize an AmazonAlgorithmEstimatorBase.
Args:
role:
train_instance_count:
train_instance_type:
data_location (str or None): The s3 prefix to upload RecordSet
objects to, expressed as an S3 url. For example
"s3://example-bucket/some-key-prefix/". Objects will be saved in
a unique sub-directory of the specified location. If None, a
default data location will be used.
**kwargs:
"""
super(AmazonAlgorithmEstimatorBase, self).__init__(
role, train_instance_count, train_instance_type, **kwargs
)
data_location = data_location or "s3://{}/sagemaker-record-sets/".format(
self.sagemaker_session.default_bucket()
)
self._data_location = data_location
def train_image(self):
"""Placeholder docstring"""
return get_image_uri(
self.sagemaker_session.boto_region_name, type(self).repo_name, type(self).repo_version
)
def hyperparameters(self):
"""Placeholder docstring"""
return hp.serialize_all(self)
@property
def data_location(self):
"""Placeholder docstring"""
return self._data_location
@data_location.setter
def data_location(self, data_location):
"""
Args:
data_location:
"""
if not data_location.startswith("s3://"):
raise ValueError(
'Expecting an S3 URL beginning with "s3://". Got "{}"'.format(data_location)
)
if data_location[-1] != "/":
data_location = data_location + "/"
self._data_location = data_location
@classmethod
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None):
"""Convert the job description to init params that can be handled by the
class constructor
Args:
job_details: the returned job details from a describe_training_job
API call.
model_channel_name (str): Name of the channel where pre-trained
model data will be downloaded.
Returns:
dictionary: The transformed init_params
"""
init_params = super(
AmazonAlgorithmEstimatorBase, cls
)._prepare_init_params_from_job_description(job_details, model_channel_name)
# The hyperparam names may not be the same as the class attribute that holds them,
# for instance: local_lloyd_init_method is called local_init_method. We need to map these
# and pass the correct name to the constructor.
for attribute, value in cls.__dict__.items():
if isinstance(value, hp):
if value.name in init_params["hyperparameters"]:
init_params[attribute] = init_params["hyperparameters"][value.name]
del init_params["hyperparameters"]
del init_params["image"]
return init_params
def _prepare_for_training(self, records, mini_batch_size=None, job_name=None):
"""Set hyperparameters needed for training.
Args:
records (:class:`~RecordSet`): The records to train this ``Estimator`` on.
mini_batch_size (int or None): The size of each mini-batch to use when
training. If ``None``, a default value will be used.
job_name (str): Name of the training job to be created. If not
specified, one is generated, using the base name given to the
constructor if applicable.
"""
super(AmazonAlgorithmEstimatorBase, self)._prepare_for_training(job_name=job_name)
feature_dim = None
if isinstance(records, list):
for record in records:
if record.channel == "train":
feature_dim = record.feature_dim
break
if feature_dim is None:
raise ValueError("Must provide train channel.")
else:
feature_dim = records.feature_dim
self.feature_dim = feature_dim
self.mini_batch_size = mini_batch_size
def fit(self, records, mini_batch_size=None, wait=True, logs=True, job_name=None):
"""Fit this Estimator on serialized Record objects, stored in S3.
``records`` should be an instance of :class:`~RecordSet`. This
defines a collection of S3 data files to train this ``Estimator`` on.
Training data is expected to be encoded as dense or sparse vectors in
the "values" feature on each Record. If the data is labeled, the label
is expected to be encoded as a list of scalas in the "values" feature of
the Record label.
More information on the Amazon Record format is available at:
https://docs.aws.amazon.com/sagemaker/latest/dg/cdf-training.html
See :meth:`~AmazonAlgorithmEstimatorBase.record_set` to construct a
``RecordSet`` object from :class:`~numpy.ndarray` arrays.
Args:
records (:class:`~RecordSet`): The records to train this ``Estimator`` on
mini_batch_size (int or None): The size of each mini-batch to use
when training. If ``None``, a default value will be used.
wait (bool): Whether the call should wait until the job completes
(default: True).
logs (bool): Whether to show the logs produced by the job. Only
meaningful when wait is True (default: True).
job_name (str): Training job name. If not specified, the estimator
generates a default job name, based on the training image name
and current timestamp.
"""
self._prepare_for_training(records, job_name=job_name, mini_batch_size=mini_batch_size)
self.latest_training_job = _TrainingJob.start_new(self, records)
if wait:
self.latest_training_job.wait(logs=logs)
def record_set(self, train, labels=None, channel="train", encrypt=False):
"""Build a :class:`~RecordSet` from a numpy :class:`~ndarray` matrix and
label vector.
For the 2D ``ndarray`` ``train``, each row is converted to a
:class:`~Record` object. The vector is stored in the "values" entry of
the ``features`` property of each Record. If ``labels`` is not None,
each corresponding label is assigned to the "values" entry of the
``labels`` property of each Record.
The collection of ``Record`` objects are protobuf serialized and
uploaded to new S3 locations. A manifest file is generated containing
the list of objects created and also stored in S3.
The number of S3 objects created is controlled by the
``train_instance_count`` property on this Estimator. One S3 object is
created per training instance.
Args:
train (numpy.ndarray): A 2D numpy array of training data.
labels (numpy.ndarray): A 1D numpy array of labels. Its length must
be equal to the number of rows in ``train``.
channel (str): The SageMaker TrainingJob channel this RecordSet
should be assigned to.
encrypt (bool): Specifies whether the objects uploaded to S3 are
encrypted on the server side using AES-256 (default: ``False``).
Returns:
RecordSet: A RecordSet referencing the encoded, uploading training
and label data.
"""
s3 = self.sagemaker_session.boto_session.resource("s3")
parsed_s3_url = urlparse(self.data_location)
bucket, key_prefix = parsed_s3_url.netloc, parsed_s3_url.path
key_prefix = key_prefix + "{}-{}/".format(type(self).__name__, sagemaker_timestamp())
key_prefix = key_prefix.lstrip("/")
logger.debug("Uploading to bucket %s and key_prefix %s", bucket, key_prefix)
manifest_s3_file = upload_numpy_to_s3_shards(
self.train_instance_count, s3, bucket, key_prefix, train, labels, encrypt
)
logger.debug("Created manifest file %s", manifest_s3_file)
return RecordSet(
manifest_s3_file,
num_records=train.shape[0],
feature_dim=train.shape[1],
channel=channel,
)
class RecordSet(object):
"""Placeholder docstring"""
def __init__(
self, s3_data, num_records, feature_dim, s3_data_type="ManifestFile", channel="train"
):
"""A collection of Amazon :class:~`Record` objects serialized and stored
in S3.
Args:
s3_data (str): The S3 location of the training data
num_records (int): The number of records in the set.
feature_dim (int): The dimensionality of "values" arrays in the
Record features, and label (if each Record is labeled).
s3_data_type (str): Valid values: 'S3Prefix', 'ManifestFile'. If
'S3Prefix', ``s3_data`` defines a prefix of s3 objects to train
on. All objects with s3 keys beginning with ``s3_data`` will be
used to train. If 'ManifestFile', then ``s3_data`` defines a
single s3 manifest file, listing each s3 object to train on.
channel (str): The SageMaker Training Job channel this RecordSet
should be bound to
"""
self.s3_data = s3_data
self.feature_dim = feature_dim
self.num_records = num_records
self.s3_data_type = s3_data_type
self.channel = channel
def __repr__(self):
"""Return an unambiguous representation of this RecordSet"""
return str((RecordSet, self.__dict__))
def data_channel(self):
"""Return a dictionary to represent the training data in a channel for
use with ``fit()``
"""
return {self.channel: self.records_s3_input()}
def records_s3_input(self):
"""Return a s3_input to represent the training data"""
return s3_input(self.s3_data, distribution="ShardedByS3Key", s3_data_type=self.s3_data_type)
class FileSystemRecordSet(object):
"""Amazon SageMaker channel configuration for a file system data source
for Amazon algorithms.
"""
def __init__(
self,
file_system_id,
file_system_type,
directory_path,
num_records,
feature_dim,
file_system_access_mode="ro",
channel="train",
):
"""Initialize a ``FileSystemRecordSet`` object.
Args:
file_system_id (str): An Amazon file system ID starting with 'fs-'.
file_system_type (str): The type of file system used for the input.
Valid values: 'EFS', 'FSxLustre'.
directory_path (str): Absolute or normalized path to the root directory (mount point) in
the file system. Reference:
https://docs.aws.amazon.com/efs/latest/ug/mounting-fs.html and
https://docs.aws.amazon.com/efs/latest/ug/wt1-test.html
num_records (int): The number of records in the set.
feature_dim (int): The dimensionality of "values" arrays in the Record features,
and label (if each Record is labeled).
file_system_access_mode (str): Permissions for read and write.
Valid values: 'ro' or 'rw'. Defaults to 'ro'.
channel (str): The SageMaker Training Job channel this RecordSet should be bound to
| |
"""
This function will calculate the interval, at which an annotation shoud be showed
for example
7456 observations, and the user wants 13 Annotations
7456/13 = 573.56 ~ each 573th observation will receive an annotation
The user will then be able to use modulo (%) to plot an annotation equal amount of times
Parameters
----------
size: int
Size of the dataset (total amnt of obervations)
amount_anno: int
The amount of annotations that the user wants
"""
return int(math.floor(size/amount_anno)) #floor to get rid of decimals
#BBox is for the annotation style
bbox = dict(boxstyle = "round", fc = "0.8") #for annotation
for df in dataframe_list:
#For each dataframe (vessel queried)
df_lenght = len(df) #getting the lenght of the currrent dataframe
#Extracting the values for plotting & in workable format conver to lists
lat = df['lat'].values
long = df['long'].values
sog = df['sog'].values
cog = df['cog'].values
datetime_df = pd.to_datetime(df['datetimestamp'])
#These paramater chages the colour of the vessel limit the amount of colours
df_change_colour = math.floor(df_lenght/150) #there is 150 colours to choose from
colour = 0 #incremental, denotes the startig colour
max_sog = max(sog) #used to scale the marker size of the plot
offset = 60000 #Annotation offset amount
x,y = m(lat,long) #convert long lat to basemap coordinates
if(chk_annotation_status.get() == 1): #if annotations where selected
annotation_amount = annotateInterval(size = df_lenght,
amount_anno =int(grid_Annotation_amt.get()))
annotation_at_zero = 1 #set varaible to 1 if include annotation make 0
if(chk_annotation_speed_status.get() == 1):
annotation_at_zero = 0 #include observations from 0
#plotting observations of each dataframe
for i in range(1, df_lenght):
sog_marker_plot = math.ceil( (sog[i]/max_sog)*3)
#normalizing and scaling the different speeds for different marker sizes
if(sog_marker_plot == 0): #if zero, scale up. cannot have 0 marker size
sog_marker_plot +=1
if(df_change_colour != 0): #Avoid devide by zero if an observation has less than 150 observations
if((i%df_change_colour == 0) and (colour < 149)): #149 amt of shades
colour += 1 #change colour shade
#Adding annotations
if(chk_annotation_status.get() == 1):
if((i%annotation_amount == 0) and sog[i] >= annotation_at_zero): #only annotate if speed is larger than
#^^^^^^^iterator % interval amnt , if == 0, good division, annotate
plt.annotate(
annotateString(sog[i],cog[i],datetime_df[i]),
(x[i],y[i]),
xytext = ((x[i]+int(0.5*offset),y[i]-offset)),
ha = 'center', #centre text
wrap = True, #wrap text
fontsize = 5.5,
arrowprops = dict(arrowstyle = "->",facecolor = 'black'), bbox = bbox) #annotation arrows
#Adding Arrows & stationary points
if(chk_annotation_arrow.get() == 1):
#Scaling the size
sog_scaled = getArrowSize(sog[i])
arrow_radius = (sog_scaled) #speed^4 / 2, to have an exponential scale, and divide by 2 to scale down
arrow_angle = (cog[i]-90)*(-1) #adjusting the scale of the basmap to compas degrees
#cog == course over ground in degrees, needs to be converted because basemap 0° is other way aroun(anti clock wize)
angle_rad = arrow_angle * math.pi/180 #conver to radiants
if(i%10 == 0): #plot every 10th arrow
arrow_head_dimentions = sog_scaled*0.7 #arrow head dimentions, also related to the speed
plt.arrow(x[i],y[i],
(arrow_radius)*math.cos(angle_rad), #drawing the arrow (get correspondin coordinate values)
(arrow_radius)*math.sin(angle_rad),
head_width = arrow_head_dimentions, #width of the arrow head
head_length = 100+arrow_head_dimentions, #lenght of the arrow head
linewidth = 2, #arrow line width
fc= (colour_palettes[col_pal])[colour], #arrow colours
ec =(colour_palettes[col_pal])[colour])
#plot static observations
m.plot([x[i-1],x[i]],[y[i-1],y[i]],"o-", linewidth = 1,
markersize=2, c = (colour_palettes[col_pal])[colour]) #different colours
#Indicate stationary times
if (sog[i] <0.5): #0.5 speed wat which a boat is slow
circle = plt.Circle((x[i],y[i]),1000, color = '#9d00ff', fill = True)
plt.gcf().gca().add_artist(circle)
col_pal +=1 #change colour palet
#Draw basemap settings
m.drawmapboundary(fill_color='#dcf6fa')
m.drawcoastlines()
m.fillcontinents(color = '#a8a8a8', lake_color = '#1d6bb5') #fill continents
m.drawrivers(color = '#1d6bb5') #draw rivers
#Adding a legend to basemap
legend_elements =[]
legend_elements.append(Line2D([0], [0],color='b', lw=5, label=entry_MMSI.get()))
legend_colours = ['b','g','r']
legend_counter = 1 # to add appropriate colour to legend
#if present add vessel 2
if(chk_ship_2_status.get() == 1):
legend_elements.append(Line2D([0], [0],color=legend_colours[legend_counter]
, lw=5, label=grid_Ship_2.get()))
legend_counter += 1 #add if 3 is also chosen
#if present add vessel 3
if(chk_ship_3_status.get() == 1):
legend_elements.append(Line2D([0], [0],color=legend_colours[legend_counter],
lw=5, label=grid_Ship_3.get()))
#Adding long and lat lines intervals to the maps
parallels = np.arange(0.,60.,2.) #increments of the longitude
m.drawparallels(parallels,labels=[False,True,True,False], fontsize = 14)
meridians = np.arange(-10.,40.,2.)
m.drawmeridians(meridians,labels=[True,False,False,True], fontsize = 14)
plt.legend(handles = legend_elements, loc = 'best', fontsize = 14,
title = "MMSI's",title_fontsize = 14) #add legend at best location
plt.tight_layout(pad = 4) #stretch plot
plt.xlabel("Longitude", fontsize = 15, labelpad = 25)
plt.ylabel("Latitude", fontsize = 15, labelpad = 30)
plt.title("Static plot of vessels", pad = 5, size = 18)
plt.get_current_fig_manager().full_screen_toggle()
plt.show() #start plot
def annotationObjectState(status):
"""
This function will toggle the state of the annotation related objects
Parameters
----------
status: int
Indicate the status of a tKinter object
1 = active,
0 = disable
"""
if(status == 1): #enable
grid_Annotation_amt['state'] = "enable"
chk_annotation_speed['state'] = 'active'
grid_Annotation_amt.current(11)
else: #disable
grid_Annotation_amt['state'] = "disabled"
chk_annotation_speed['state'] = 'disabled'
def objectStatiMMSIState(objState):
"""
This function will change all the object states listed
Parameters
----------
objState: string
String will be sent to switch the state
"""
grid_Ship_2["state"] = objState
grid_Ship_3["state"] = objState
btn_staticPlot["state"] = objState
grid_quality["state"] = objState
if(objState == 'enable'):
chk_ship2["state"] = "active"
chk_ship3["state"] = "active"
chk_annotation["state"] = "active"
chk_arrows["state"] = "active"
else:
chk_ship2["state"] = "disabled"
chk_ship3["state"] = "disabled"
chk_annotation["state"] = "disabled"
chk_annotation_speed['state'] = 'disabled'
chk_arrows["state"] = 'disabled'
#MMSI entered dataframe
mmsi_main_df = []
def searchMMSI_static():
"""
This function does error handeling and catching, makes sure
that a specific mmsi exists and that the user may continue or not
"""
all_tests_passed = False
#Testing if the MMSI exists
try:
db_query = ("SELECT mmsi FROM " + str(database_table) +
" WHERE mmsi = " +str(entry_MMSI.get()) + " LIMIT 1;")
temp = readDatabase(db_query) #get information from the Database
if(len(temp) == 0): # tests wheter there is data present
btn_staticPlot['state'] = 'disabled' #user cannot proceed or plot
objectStatiMMSIState("disable") #making sure user cannot contiue
popMsg("MMSI does not exist / Zero Observations") #Error
else:
all_tests_passed = True #if no errors
objectStatiMMSIState('enable')
except Exception as err :
objectStatiMMSIState("disable") #making sure user cannot contiue
print(err)
popMsg("Please enter a valid MMSI") #error to user
if(all_tests_passed == True): #if no errors
addGridData()
def addGridData():
"""
If all error handling was passed this function will be envoked
The grid options will be filled by the nearest 20 mmsi' values of the
selected mmsi
The main MMSI values will be setted here so that we dont query the
database unnessesarely
"""
#Set values and query the database
mmsi_main_df = readDatabase(
str("SELECT mmsi, ST_X(geom::geometry) as lat, ST_Y(geom::geometry) as long, " +
" sog, datetimestamp FROM " + database_table+" WHERE (MMSI = "+ str(entry_MMSI.get()) +
") ORDER BY datetimestamp ASC"))
#Get the average LOGITUDE and latitude value so that we can get vessels in the area
lat_mean = np.mean(mmsi_main_df["lat"].values)
long_mean = np.mean(mmsi_main_df["long"].values)
#The query for retrieving the nearest mmsi's of the inputed mmsi
query_near_20 = str("SELECT mmsi FROM " + str(database_table)+
" WHERE (sog > 0) and (sog < 30) and ST_DWithin(geom::geography," +
"ST_GeogFromText('POINT(" +str(lat_mean) +" " + str(long_mean) + ")'),(1000)*1000, false) LIMIT 20;")
#radius of 1000km
#getting the date
df_mmsi = readDatabase(query_near_20)
#setting the mmsi values to the grids objects
grid_Ship_2["values"] = ((df_mmsi["mmsi"].values).astype(int)).tolist()#convert to list
grid_Ship_2.current(1)
grid_Ship_3["values"] = ((df_mmsi["mmsi"].values).astype(int)).tolist() #convert to list
grid_Ship_3.current(10)
####---------------------------END OF STATIC MAP FRAME -------------------------
####--------------------- CLASS: HEATMAP FRAME ---------------------------------
#Declaring a global variable, label so that there can be an update in the message
lbl_Patient = 0
class HeatMapFrame(tk.Frame):
"""
This class is the Spatial Distribution Map frame and the options the user have.
Gridboxes = select the quality of the plot and radius of observations to plot
within the selected point
Buttons = to go back or to start plot
"""
def __init__(self,parent,controller):
#Frame settings
tk.Frame.__init__(self,parent)
self.configure(background = BACKGROUND_COLOUR) #Setting the background colour
global lbl_Patient
#Defining labels for the frame
lbl_heatmap_heading = ttk.Label(self,text = "Spatial Distribution Map",
font = HEADING_FONT, background = BACKGROUND_COLOUR)
lbl_chooseRadius = ttk.Label(self,text = "Radius size:",
font = ENTRY_FONT, background = BACKGROUND_COLOUR)
lbl_dummySpace = ttk.Label(self,text =" ",
font =("Century Gothic",20),background = BACKGROUND_COLOUR)
lbl_SOG_UpLow = ttk.Label(self,text = "SOG:",
font = ENTRY_FONT, background = BACKGROUND_COLOUR)
lbl_sogLower = ttk.Label(self,text = "lower bound",
font = ENTRY_FONT, background = BACKGROUND_COLOUR)
lbl_sogUpper = ttk.Label(self,text = "upper bound",
| |
def models(self, value):
if value is None:
self._property_models = None
return
self.assert_isinstance(value, "models", (list, tuple))
self.assert_isinstance(value, "models", (dict,), is_array=True)
self._property_models = value
class DeleteModelsResponse(Response):
"""
Response of tasks.delete_models endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
"""
_service = "tasks"
_action = "delete_models"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated=None, **kwargs):
super(DeleteModelsResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
class DequeueRequest(Request):
"""
Remove a task from its queue.
Fails if task status is not queued.
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "dequeue"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(self, task, status_reason=None, status_message=None, **kwargs):
super(DequeueRequest, self).__init__(**kwargs)
self.task = task
self.status_reason = status_reason
self.status_message = status_message
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self):
return self._property_status_reason
@status_reason.setter
def status_reason(self, value):
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self):
return self._property_status_message
@status_message.setter
def status_message(self, value):
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
class DequeueResponse(Response):
"""
Response of tasks.dequeue endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
:param dequeued: Number of tasks dequeued (0 or 1)
:type dequeued: int
"""
_service = "tasks"
_action = "dequeue"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"dequeued": {
"description": "Number of tasks dequeued (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated=None, fields=None, dequeued=None, **kwargs):
super(DequeueResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
self.dequeued = dequeued
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self):
return self._property_fields
@fields.setter
def fields(self, value):
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
@schema_property("dequeued")
def dequeued(self):
return self._property_dequeued
@dequeued.setter
def dequeued(self, value):
if value is None:
self._property_dequeued = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "dequeued", six.integer_types)
self._property_dequeued = value
class EditRequest(Request):
"""
Edit task's details.
:param task: ID of the task
:type task: str
:param force: If not true, call fails if the task status is not 'created'
:type force: bool
:param name: Task name Unique within the company.
:type name: str
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
:param type: Type of task
:type type: TaskTypeEnum
:param comment: Free text comment
:type comment: str
:param parent: Parent task id Must be a completed task.
:type parent: str
:param project: Project ID of the project to which this task is assigned Must
exist[ab]
:type project: str
:param output_dest: Output storage id Must be a reference to an existing
storage.
:type output_dest: str
:param execution: Task execution params
:type execution: Execution
:param script: Script info
:type script: Script
:param hyperparams: Task hyper params per section
:type hyperparams: dict
:param configuration: Task configuration params
:type configuration: dict
:param models: Task models
:type models: TaskModels
:param container: Docker container parameters
:type container: dict
:param runtime: Task runtime mapping
:type runtime: dict
"""
_service = "tasks"
_action = "edit"
_version = "2.13"
_schema = {
"definitions": {
"artifact": {
"properties": {
"content_size": {
"description": "Raw data length in bytes",
"type": "integer",
},
"display_data": {
"description": "User-defined list of key/value pairs, sorted",
"items": {"items": {"type": "string"}, "type": "array"},
"type": "array",
},
"hash": {
"description": "Hash of entire raw data",
"type": "string",
},
"key": {"description": "Entry key", "type": "string"},
"mode": {
"$ref": "#/definitions/artifact_mode_enum",
"description": "System defined input/output indication",
},
"timestamp": {
"description": "Epoch time when artifact was created",
"type": "integer",
},
"type": {
"description": "System defined type",
"type": "string",
},
"type_data": {
"$ref": "#/definitions/artifact_type_data",
"description": "Additional fields defined by the system",
},
"uri": {"description": "Raw data location", "type": "string"},
},
"required": ["key", "type"],
"type": "object",
},
"artifact_mode_enum": {
"default": "output",
"enum": ["input", "output"],
"type": "string",
},
"artifact_type_data": {
"properties": {
"content_type": {
"description": "System defined raw data content type",
"type": ["string", "null"],
},
"data_hash": {
"description": "Hash of raw data, without any headers or descriptive parts",
"type": ["string", "null"],
},
"preview": {
"description": "Description or textual data",
"type": ["string", "null"],
},
},
"type": "object",
},
"configuration_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. Should be unique",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"execution": {
"properties": {
"artifacts": {
"description": "Task artifacts",
"items": {"$ref": "#/definitions/artifact"},
"type": ["array", "null"],
},
"framework": {
"description": "Framework related to the task. Case insensitive. Mandatory for Training tasks. ",
"type": ["string", "null"],
},
"model_desc": {
"additionalProperties": True,
"description": "Json object representing the Model descriptors",
"type": ["object", "null"],
},
"model_labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object representing the ids of the labels in the model.\n The keys are the layers' names and the values are the IDs.\n Not applicable for Register (Import) tasks.\n Mandatory for Training tasks",
"type": ["object", "null"],
},
"parameters": {
"additionalProperties": True,
"description": "Json object containing the Task parameters",
"type": ["object", "null"],
},
"queue": {
"description": "Queue ID where task was queued.",
"type": ["string", "null"],
},
},
"type": "object",
},
"params_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. The combination of section and name should be unique",
"type": ["string", "null"],
},
"section": {
"description": "Section that the parameter belongs to",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"script": {
"properties": {
"binary": {
"default": "python",
"description": "Binary to use when running the script",
"type": ["string", "null"],
},
"branch": {
"description": "Repository branch id If not provided and tag not provided, default repository branch is used.",
"type": ["string", "null"],
},
"diff": {
"description": "Uncommitted changes found in the repository when task was run",
"type": ["string", "null"],
},
"entry_point": {
"description": "Path to execute within the repository",
"type": ["string", "null"],
},
"repository": {
"description": "Name of the repository where the script is located",
"type": ["string", "null"],
},
"requirements": {
"description": "A JSON object containing requirements strings by key",
"type": ["object", "null"],
},
"tag": {
"description": "Repository tag",
"type": ["string", "null"],
},
"version_num": {
"description": "Version (changeset) number. Optional (default is head version) Unused if tag is provided.",
"type": ["string", "null"],
},
"working_dir": {
"description": "Path to the folder from which to run the script Default - root folder | |
== "Split":
if "split" in attributes and attributes["split"] == (
old_d_embd,
old_d_embd,
old_d_embd,
):
assert len(attributes) == 2
attributes = frozendict(
{
"axis": attributes["axis"],
"split": (
new_d_embd,
new_d_embd,
new_d_embd,
),
}
)
elif op_type == "Constant":
value = attributes["value"]
if value == old_n_head:
value = new_n_head
new_device = new_device if new_device is not None else attributes["device"]
attributes = frozendict({"value": value, "device": new_device})
elif new_device is not None:
attributes = frozendict({"value": value, "device": new_device})
return attributes
# TODO assign device 1 to init_fn inputs here?
def gpt2_dhp_transform(
function,
dp_degree,
hp_degree,
pp_degree,
devices,
num_microbatches,
d_embd,
n_head,
skip_allgathers=False,
debug=False,
):
"""Automatically distributes a GPT-2 function using D/H/P hybrid parallelism."""
if debug:
logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.DEBUG)
# Initialize the transformed function and construct the device tree given the
# specified parallelism dimensions.
fn_name = f"{function.name}_{dp_degree}_{hp_degree}_{pp_degree}_{num_microbatches}"
transformed_function = FunctionMaker(name=fn_name)
device_tree = _get_device_tree(dp_degree, hp_degree, pp_degree, devices)
device_tree_root = tuple(device_tree.keys())[0]
dp_devices = tuple(sorted(device_tree[device_tree_root].keys()))
# A list of lists of horizontal parallel devices that synchronize
# across data parallel partitions.
hp_device_groups = list(
zip(
*[
tuple(sorted(device_tree[device_tree_root][dp_device].keys()))
for dp_device in dp_devices
]
)
)
# Construct pipeline parallel partitions and schedules for each
# horizontal parallel partition.
# A map with the following structure:
# Data parallel partition ID
# |-> Attention block (subfunction)
# |-> Assigned device
partition_maps = defaultdict(dict)
# A list of pipeline parallel schedules, with one schedule
# (represented as a list of dicts) for every horizontal parallel partition.
pp_schedules = defaultdict(list)
op_to_stage_maps = {}
for i, dp_device in enumerate(device_tree[device_tree_root]):
hp_devices = tuple(sorted(device_tree[device_tree_root][dp_device].keys()))
# Construct the pipeline parallel schedules for each horizontal parallel partition.
for j, hp_device in enumerate(hp_devices):
pp_devices = device_tree[device_tree_root][dp_device][hp_device]
partition_maps[i][j] = _pipeline_parallel_partition(
function, pp_degree, pp_devices
)
op_to_stage_maps[i] = _get_op_to_stage_map(partition_maps[i][j].keys())
scheduler = PipeDreamScheduler(num_microbatches)
schedule = scheduler.schedule(function, partition_maps[i][j])
pp_schedules[i].append(schedule)
# An init function that moves weights/inputs to correct devices.
init_function = FunctionMaker(name=fn_name + "_init")
transformed_inputs = {}
for inp in function.inputs:
v = init_function.add_input_value(inp.name, inp.type)
transformed_inputs[inp] = v
# Partition inputs across each parallelism dimension.
dp_inputs = _partition_inputs_dp(init_function, device_tree)
hp_inputs = _partition_inputs_hp(init_function, device_tree, dp_inputs)
pp_inputs = _partition_inputs_pp(
init_function,
device_tree,
dp_inputs,
hp_inputs,
num_microbatches,
function,
transformed_inputs,
partition_maps,
op_to_stage_maps,
)
init_function = init_function.finalize()
# Infer types so that init_function.outputs have correct types
# init_function = infer_types(init_function, init_function.inputs)
# Inputs of transformed_function are outputs of init_function.
for v in init_function.outputs:
transformed_function.inputs.append(v)
function_inputs = set(function.inputs)
dp_outputs = defaultdict(list)
for i, dp_device in enumerate(device_tree[device_tree_root]):
# A map with the following structure:
# original intermediate value
# |-> horizontal parallel partition ID
# |-> pipeline parallel partition ID
# |-> microbatch ID
# |-> transformed intermediate value
intermediate_value_map = defaultdict(
lambda: defaultdict(lambda: defaultdict(dict))
)
# Jointly iterate through all the schedules, timestep by timestep.
# Timesteps will be a tuple of dicts corresponding to the pipeline parallel
# schedules at this timestep (represented as a dict) for each horizontal
# parallel partition. The keys (devices) for each schedule will be different,
# but the values should be the same. This iteration strategy is necessary
# for Megatron-style synchronization.
hp_devices = tuple(sorted(device_tree[device_tree_root][dp_device].keys()))
for timesteps in zip(*pp_schedules[i]):
# For a given set of timesteps, iterate through in order of matching
# horizontal parallel devices.
for devices in zip(*tuple(sorted(ts.keys()) for ts in timesteps)):
# Verify that for this group of horizontal parallel devices the
# corresponding pipeline parallel stage is exactly the same.
assert (
len(set(ts[device] for ts, device in zip(timesteps, devices))) == 1
)
assert len(devices) == hp_degree
stage, microbatch_id = timesteps[0][devices[0]]
logging.debug(
f"Scheduling stage {stage.name}, microbatch {microbatch_id} "
f"on device(s) {devices}"
)
for op in stage.ops:
# Collect inputs for this op.
for j, device in enumerate(devices):
logging.debug(
f"Scheduling op {op} on device {device.device_id}"
)
pp_devices = device_tree[device_tree_root][dp_device][
hp_devices[j]
]
k = pp_devices.index(device)
input_values = []
for inp in op.inputs:
# Retrieve the transformed input value from the appropriate
# data structure depending on whether the original input is
# a function input or an intermediate value.
if inp in function_inputs:
v = transformed_inputs[inp]
dp_v = dp_inputs[v][i]
hp_v = hp_inputs[dp_v][j]
pp_v = pp_inputs[hp_v][k][microbatch_id]
input_values.append(pp_v)
else:
output_value = intermediate_value_map[j][k][
microbatch_id
][inp]
input_values.append(output_value)
# Add the op once for each device to the transformed function.
if (
hp_degree > 1 and op.op_type == "Split"
) or op.op_type == "Constant":
attributes = update_attributes(
op.op_type,
op.attributes,
old_d_embd=d_embd,
new_d_embd=d_embd // hp_degree,
old_n_head=n_head,
new_n_head=n_head // hp_degree,
new_device=device,
)
else:
attributes = op.attributes
transformed_outputs = transformed_function.add_op(
op.op_type,
name=op.name,
inputs=input_values,
attributes=attributes,
output_names=[
(
f"{v.name}_dp_{i}_hp_{j}_pp_{microbatch_id}"
f"_device_{device.device_id}"
)
for v in op.outputs
],
)
if not isinstance(transformed_outputs, tuple):
transformed_outputs = (transformed_outputs,)
for output, transformed_output in zip(
op.outputs, transformed_outputs
):
assert (
output
not in intermediate_value_map[j][k][microbatch_id]
)
intermediate_value_map[j][k][microbatch_id][
output
] = transformed_output
# Reset variables.
j = None
k = None
device = None
# Aggregate horizontal parallel outputs.
if hp_degree > 1:
if op.op_type == "Gemm" and any(
[
"attn.c_proj.weight" in inp.name
or "mlp.c_proj.weight" in inp.name
for inp in op.inputs
]
):
for output in op.outputs:
value_names = tuple(
intermediate_value_map[j][k][microbatch_id][output]
for j in range(len(devices))
for k in intermediate_value_map[j]
if output
in intermediate_value_map[j][k][microbatch_id]
)
logging.debug(
f"Doing horizontal parallel reduction for "
f"microbatch {microbatch_id} for {value_names}"
)
aggregated_hp_outputs = []
for j, device in enumerate(devices):
pp_devices = device_tree[device_tree_root][
dp_device
][hp_devices[j]]
aggregated_hp_outputs.append(
intermediate_value_map[j][
pp_devices.index(device)
][microbatch_id][output]
)
reduced_outputs = _mpi_allreduce_values(
tuple(aggregated_hp_outputs),
transformed_function,
output_names=[
(
f"{output.name}_dp_{i}_hp_all_pp_"
f"{microbatch_id}_device_{device.device_id}"
)
for j, device in enumerate(devices)
],
)
assert len(reduced_outputs) == len(devices)
for j, (device, reduced_output) in enumerate(
zip(devices, reduced_outputs)
):
pp_devices = device_tree[device_tree_root][
dp_device
][hp_devices[j]]
k = pp_devices.index(device)
intermediate_value_map[j][k][microbatch_id][
output
] = reduced_output
# Aggregate pipeline parallel outputs.
for output in op.outputs:
if output in function.outputs:
for j, device in enumerate(devices):
pp_devices = device_tree[device_tree_root][dp_device][
hp_devices[j]
]
k = pp_devices.index(device)
mb_k_output = intermediate_value_map[j][k][
microbatch_id
][output]
match = re.search(r"hp\_(.*)\_pp", mb_k_output.name)
hp_level = match.group(1)
if microbatch_id == 0:
# We clone the output from the first microbatch to create
# the aggregated output.
if num_microbatches > 1:
intermediate_value_map[j][k]["all"][
output
] = _identity(
mb_k_output,
transformed_function,
f"{output.name}_dp_{i}_hp_{hp_level}_pp_all_"
f"device_{device.device_id}",
)
else:
intermediate_value_map[j][k]["all"][
output
] = mb_k_output
else:
# For all subsequent microbatches, we aggregate into the
# specially designated aggregation output. In particular,
# we add weights together and concatenate batch-dependent
# values together.
assert output in intermediate_value_map[j][k]["all"]
mb_all_output = intermediate_value_map[j][k]["all"][
output
]
assert (
re.search(
r"hp\_(.*)\_pp", mb_all_output.name
).group(1)
== hp_level
)
logging.debug(
f"Doing pipeline parallel aggregation for {mb_all_output} "
f"and {mb_k_output} on device {device.device_id}"
)
intermediate_value_map[j][k]["all"][
output
] = _concat_values(
(mb_all_output, mb_k_output),
transformed_function,
dim=0,
output_name=(
f"{output.name}_dp_{i}_hp_{hp_level}_"
f"pp_all_device_{device.device_id}"
),
)
# Forward any timestep outputs to the next pipeline parallel partition.
if pp_degree > 1:
for devices in zip(*tuple(sorted(ts.keys()) for ts in timesteps)):
logging.debug(f"Forwarding outputs for stage {stage.name}...")
stage, microbatch_id = timesteps[0][devices[0]]
for j, device in enumerate(devices):
pp_devices = device_tree[device_tree_root][dp_device][
hp_devices[j]
]
k = pp_devices.index(device)
for output in stage.outputs:
# An output is forwarded when its consumer devices reside
# on a different device than the current stage's device.
transformed_output = intermediate_value_map[j][k][
microbatch_id
][output]
consumer_devices = _get_consumer_devices_for_pp_value(
output,
function,
op_to_stage_maps[i],
pp_devices,
partition_maps[i][j],
)
logging.debug(
f"Consumer devices for output {output.name}, "
f"microbatch {microbatch_id}, "
f"device {device.device_id}: "
f"{[d.device_id for d in consumer_devices]}"
)
for consumer_device in consumer_devices:
if device != consumer_device:
logging.debug(
f"Sending value {output.name} to "
f"device {consumer_device.device_id}"
)
intermediate_value_map[j][
pp_devices.index(consumer_device)
][microbatch_id][output] = _send_value(
transformed_output,
transformed_function,
consumer_device,
output_name=(
f"{output.name}_dp_{i}_hp_{j}_pp_"
f"{microbatch_id}_device_"
f"{consumer_device.device_id}"
),
)
# Collect the pipeline parallel aggregated function outputs
# from horizontal parallel partitions to do data parallel aggregation.
for output in function.outputs:
dp_outputs[output].append(
tuple(
intermediate_value_map[j][k]["all"][output]
for j in intermediate_value_map
for k in intermediate_value_map[j]
if output in intermediate_value_map[j][k]["all"]
)
)
# There should only be as many pipeline parallel aggregated function outputs
# as there are horizontal parallel partitions.
assert len(dp_outputs[output][-1]) == len(hp_devices)
# Aggregate data parallel outputs.
if dp_degree > 1 and not skip_allgathers:
for output in dp_outputs:
logging.debug(f"Doing data parallel reduction for {dp_outputs[output]}")
hp_groups = list(zip(*dp_outputs[output]))
if output.name == "output1":
for i, hp_group in enumerate(hp_groups):
if hp_degree > 1:
hp_device_group_str = ",".join(
[str(d.device_id) for d in hp_device_groups[i]]
)
else:
hp_device_group_str = "all"
_mpi_allgather_values(
hp_group,
transformed_function,
dim=0,
| |
select_params=None,
headers=None
):
"""Select一个文件的内容到本地文件
:param key: OSS文件名
:param filename: 本地文件名。其父亲目录已经存在且有写权限。
:param progress_callback: 调用进度的callback。参考 :ref:`progress_callback`
:param select_params: select参数集合。参见 :ref:`select_params`
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: 如果文件不存在, 抛出 :class:`NoSuchKey <oss2.exceptions.NoSuchKey>`
"""
with open(to_unicode(filename), 'wb') as f:
result = self.select_object(key, sql, progress_callback=progress_callback,
select_params=select_params, headers=headers)
for chunk in result:
f.write(chunk)
return result
def head_object(self, key, headers=None, params=None):
"""获取文件元信息。
HTTP响应的头部包含了文件元信息,可以通过 `RequestResult` 的 `headers` 成员获得。
用法 ::
>>> result = bucket.head_object('readme.txt')
>>> print(result.content_type)
text/plain
:param key: 文件名
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:param params: HTTP请求参数,传入versionId,获取指定版本Object元信息
:type params: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`HeadObjectResult <oss2.models.HeadObjectResult>`
:raises: 如果Bucket不存在或者Object不存在,则抛出 :class:`NotFound <oss2.exceptions.NotFound>`
"""
logger.debug("Start to head object, bucket: {0}, key: {1}, headers: {2}".format(
self.bucket_name, to_string(key), headers))
resp = self.__do_object('HEAD', key, headers=headers, params=params)
logger.debug("Head object done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return HeadObjectResult(resp)
def create_select_object_meta(self, key, select_meta_params=None, headers=None):
"""获取或创建CSV,JSON LINES 文件元信息。如果元信息存在,返回之;不然则创建后返回之
HTTP响应的头部包含了文件元信息,可以通过 `RequestResult` 的 `headers` 成员获得。
CSV文件用法 ::
>>> select_meta_params = { 'FieldDelimiter': ',',
'RecordDelimiter': '\r\n',
'QuoteCharacter': '"',
'OverwriteIfExists' : 'false'}
>>> result = bucket.create_select_object_meta('csv.txt', select_meta_params)
>>> print(result.rows)
JSON LINES文件用法 ::
>>> select_meta_params = { 'Json_Type':'LINES', 'OverwriteIfExists':'False'}
>>> result = bucket.create_select_object_meta('jsonlines.json', select_meta_params)
:param key: 文件名
:param select_meta_params: 参数词典,可以是dict,参见ref:`csv_meta_params`
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`GetSelectObjectMetaResult <oss2.models.HeadObjectResult>`.
除了 rows 和splits 属性之外, 它也返回head object返回的其他属性。
rows表示该文件的总记录数。
splits表示该文件的总Split个数,一个Split包含若干条记录,每个Split的总字节数大致相当。用户可以以Split为单位进行分片查询。
:raises: 如果Bucket不存在或者Object不存在,则抛出:class:`NotFound <oss2.exceptions.NotFound>`
"""
headers = http.CaseInsensitiveDict(headers)
body = xml_utils.to_get_select_object_meta(select_meta_params)
params = {'x-oss-process': 'csv/meta'}
if select_meta_params is not None and 'Json_Type' in select_meta_params:
params['x-oss-process'] = 'json/meta'
self.timeout = 3600
resp = self.__do_object('POST', key, data=body, headers=headers, params=params)
return GetSelectObjectMetaResult(resp)
def get_object_meta(self, key, params=None, headers=None):
"""获取文件基本元信息,包括该Object的ETag、Size(文件大小)、LastModified,并不返回其内容。
HTTP响应的头部包含了文件基本元信息,可以通过 `GetObjectMetaResult` 的 `last_modified`,`content_length`,`etag` 成员获得。
:param key: 文件名
:param dict params: 请求参数
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`GetObjectMetaResult <oss2.models.GetObjectMetaResult>`
:raises: 如果文件不存在,则抛出 :class:`NoSuchKey <oss2.exceptions.NoSuchKey>` ;还可能抛出其他异常
"""
headers = http.CaseInsensitiveDict(headers)
logger.debug("Start to get object metadata, bucket: {0}, key: {1}".format(self.bucket_name, to_string(key)))
if params is None:
params = dict()
if Bucket.OBJECTMETA not in params:
params[Bucket.OBJECTMETA] = ''
resp = self.__do_object('GET', key, params=params, headers=headers)
logger.debug("Get object metadata done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return GetObjectMetaResult(resp)
def object_exists(self, key, headers=None):
"""如果文件存在就返回True,否则返回False。如果Bucket不存在,或是发生其他错误,则抛出异常。"""
#:param key: 文件名
#:param headers: HTTP头部
#:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
# 如果我们用head_object来实现的话,由于HTTP HEAD请求没有响应体,只有响应头部,这样当发生404时,
# 我们无法区分是NoSuchBucket还是NoSuchKey错误。
#
# 2.2.0之前的实现是通过get_object的if-modified-since头部,把date设为当前时间24小时后,这样如果文件存在,则会返回
# 304 (NotModified);不存在,则会返回NoSuchKey。get_object会受回源的影响,如果配置会404回源,get_object会判断错误。
#
# 目前的实现是通过get_object_meta判断文件是否存在。
logger.debug("Start to check if object exists, bucket: {0}, key: {1}".format(self.bucket_name, to_string(key)))
try:
self.get_object_meta(key, headers=headers)
except exceptions.NoSuchKey:
return False
except:
raise
return True
def copy_object(self, source_bucket_name, source_key, target_key, headers=None, params=None):
"""拷贝一个文件到当前Bucket。
:param str source_bucket_name: 源Bucket名
:param str source_key: 源文件名
:param str target_key: 目标文件名
:param dict params: 请求参数
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`PutObjectResult <oss2.models.PutObjectResult>`
"""
headers = http.CaseInsensitiveDict(headers)
if params and Bucket.VERSIONID in params:
headers[OSS_COPY_OBJECT_SOURCE] = '/' + source_bucket_name + \
'/' + urlquote(source_key, '') + '?versionId=' + params[Bucket.VERSIONID]
else:
headers[OSS_COPY_OBJECT_SOURCE] = '/' + source_bucket_name + '/' + urlquote(source_key, '')
logger.debug(
"Start to copy object, source bucket: {0}, source key: {1}, bucket: {2}, key: {3}, headers: {4}".format(
source_bucket_name, to_string(source_key), self.bucket_name, to_string(target_key), headers))
resp = self.__do_object('PUT', target_key, headers=headers)
logger.debug("Copy object done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return PutObjectResult(resp)
def update_object_meta(self, key, headers):
"""更改Object的元数据信息,包括Content-Type这类标准的HTTP头部,以及以x-oss-meta-开头的自定义元数据。
用户可以通过 :func:`head_object` 获得元数据信息。
:param str key: 文件名
:param headers: HTTP头部,包含了元数据信息
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`RequestResult <oss2.models.RequestResults>`
"""
if headers is not None:
headers[OSS_METADATA_DIRECTIVE] = 'REPLACE'
logger.debug("Start to update object metadata, bucket: {0}, key: {1}".format(self.bucket_name, to_string(key)))
return self.copy_object(self.bucket_name, key, key, headers=headers)
def delete_object(self, key, params=None, headers=None):
"""删除一个文件。
:param str key: 文件名
:param params: 请求参数
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`RequestResult <oss2.models.RequestResult>`
"""
headers = http.CaseInsensitiveDict(headers)
logger.info("Start to delete object, bucket: {0}, key: {1}".format(self.bucket_name, to_string(key)))
resp = self.__do_object('DELETE', key, params=params, headers=headers)
logger.debug("Delete object done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return RequestResult(resp)
def restore_object(self, key, params=None, headers=None, input=None):
"""restore an object
如果是第一次针对该object调用接口,返回RequestResult.status = 202;
如果已经成功调用过restore接口,且服务端仍处于解冻中,抛异常RestoreAlreadyInProgress(status=409)
如果已经成功调用过restore接口,且服务端解冻已经完成,再次调用时返回RequestResult.status = 200,且会将object的可下载时间延长一天,最多延长7天。
如果object不存在,则抛异常NoSuchKey(status=404);
对非Archive类型的Object提交restore,则抛异常OperationNotSupported(status=400)
也可以通过调用head_object接口来获取meta信息来判断是否可以restore与restore的状态
代码示例::
>>> meta = bucket.head_object(key)
>>> if meta.resp.headers['x-oss-storage-class'] == oss2.BUCKET_STORAGE_CLASS_ARCHIVE:
>>> bucket.restore_object(key)
>>> while True:
>>> meta = bucket.head_object(key)
>>> if meta.resp.headers['x-oss-restore'] == 'ongoing-request="true"':
>>> time.sleep(5)
>>> else:
>>> break
:param str key: object name
:param params: 请求参数
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:param input: 解冻配置。
:type input: class:`RestoreConfiguration <oss2.models.RestoreConfiguration>`
:return: :class:`RequestResult <oss2.models.RequestResult>`
"""
headers = http.CaseInsensitiveDict(headers)
logger.debug("Start to restore object, bucket: {0}, key: {1}".format(self.bucket_name, to_string(key)))
if params is None:
params = dict()
if Bucket.RESTORE not in params:
params[Bucket.RESTORE] = ''
data = self.__convert_data(RestoreConfiguration, xml_utils.to_put_restore_config, input)
resp = self.__do_object('POST', key, params=params, headers=headers, data=data)
logger.debug("Restore object done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return RequestResult(resp)
def put_object_acl(self, key, permission, params=None, headers=None):
"""设置文件的ACL。
:param str key: 文件名
:param str permission: 可以是oss2.OBJECT_ACL_DEFAULT、oss2.OBJECT_ACL_PRIVATE、oss2.OBJECT_ACL_PUBLIC_READ或
oss2.OBJECT_ACL_PUBLIC_READ_WRITE。
:param dict params: 请求参数
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`RequestResult <oss2.models.RequestResult>`
"""
logger.debug("Start to put object acl, bucket: {0}, key: {1}, acl: {2}".format(
self.bucket_name, to_string(key), permission))
headers = http.CaseInsensitiveDict(headers)
headers[OSS_OBJECT_ACL] = permission
if params is None:
params = dict()
if Bucket.ACL not in params:
params[Bucket.ACL] = ''
resp = self.__do_object('PUT', key, params=params, headers=headers)
logger.debug("Put object acl done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return RequestResult(resp)
def get_object_acl(self, key, params=None, headers=None):
"""获取文件的ACL。
:param key: 文件名
:param params: 请求参数
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`GetObjectAclResult <oss2.models.GetObjectAclResult>`
"""
logger.debug("Start to get object acl, bucket: {0}, key: {1}".format(self.bucket_name, to_string(key)))
headers = http.CaseInsensitiveDict(headers)
if params is None:
params = dict()
if Bucket.ACL not in params:
params[Bucket.ACL] = ''
resp = self.__do_object('GET', key, params=params, headers=headers)
logger.debug("Get object acl done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return self._parse_result(resp, xml_utils.parse_get_object_acl, GetObjectAclResult)
def batch_delete_objects(self, key_list, headers=None):
"""批量删除文件。待删除文件列表不能为空。
:param key_list: 文件名列表,不能为空。
:type key_list: list of str
:param headers: HTTP头部
:return: :class:`BatchDeleteObjectsResult <oss2.models.BatchDeleteObjectsResult>`
"""
if not key_list:
raise ClientError('key_list should not be empty')
logger.debug("Start to delete objects, bucket: {0}, keys: {1}".format(self.bucket_name, key_list))
data = xml_utils.to_batch_delete_objects_request(key_list, False)
headers = http.CaseInsensitiveDict(headers)
headers['Content-MD5'] = utils.content_md5(data)
resp = self.__do_object('POST', '',
data=data,
params={'delete': '', 'encoding-type': 'url'},
headers=headers)
logger.debug("Delete objects done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return self._parse_result(resp, xml_utils.parse_batch_delete_objects, BatchDeleteObjectsResult)
def delete_object_versions(self, keylist_versions, headers=None):
"""批量删除带版本文件。待删除文件列表不能为空。
:param key_list_with_version: 带版本的文件名列表,不能为空。(如果传入,则不能为空)
:type key_list: list of BatchDeleteObjectsList
:param headers: HTTP头部
:return: :class:`BatchDeleteObjectsResult <oss2.models.BatchDeleteObjectsResult>`
"""
if not keylist_versions:
raise ClientError('keylist_versions should not be empty')
logger.debug("Start to delete object versions, bucket: {0}".format(self.bucket_name))
data = xml_utils.to_batch_delete_objects_version_request(keylist_versions, False)
headers = http.CaseInsensitiveDict(headers)
headers['Content-MD5'] = utils.content_md5(data)
resp = self.__do_object('POST', '',
data=data,
params={'delete': '', 'encoding-type': 'url'},
headers=headers)
logger.debug("Delete object versions done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return self._parse_result(resp, xml_utils.parse_batch_delete_objects, BatchDeleteObjectsResult)
def init_multipart_upload(self, key, headers=None, params=None):
"""初始化分片上传。
返回值中的 `upload_id` 以及Bucket名和Object名三元组唯一对应了此次分片上传事件。
:param str key: 待上传的文件名
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`InitMultipartUploadResult <oss2.models.InitMultipartUploadResult>`
"""
headers = utils.set_content_type(http.CaseInsensitiveDict(headers), key)
if params is None:
tmp_params = dict()
else:
tmp_params = params.copy()
tmp_params['uploads'] = ''
logger.debug("Start to init multipart upload, bucket: {0}, keys: {1}, headers: {2}, params: {3}".format(
self.bucket_name, to_string(key), headers, tmp_params))
resp = self.__do_object('POST', key, params=tmp_params, headers=headers)
logger.debug("Init multipart upload done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
return self._parse_result(resp, xml_utils.parse_init_multipart_upload, InitMultipartUploadResult)
def upload_part(self, key, upload_id, part_number, data, progress_callback=None, headers=None):
"""上传一个分片。
:param str key: 待上传文件名,这个文件名要和 :func:`init_multipart_upload` 的文件名一致。
:param str upload_id: 分片上传ID
:param int part_number: 分片号,最小值是1.
:param data: 待上传数据。
:param progress_callback: 用户指定进度回调函数。可以用来实现进度条等功能。参考 :ref:`progress_callback` 。
:param headers: 用户指定的HTTP头部。可以指定Content-MD5头部等
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`PutObjectResult <oss2.models.PutObjectResult>`
"""
headers = http.CaseInsensitiveDict(headers)
if progress_callback:
data = utils.make_progress_adapter(data, progress_callback)
if self.enable_crc:
data = utils.make_crc_adapter(data)
logger.debug(
"Start to upload multipart, bucket: {0}, key: {1}, upload_id: {2}, part_number: {3}, headers: {4}".format(
self.bucket_name, to_string(key), upload_id, part_number, headers))
resp = self.__do_object('PUT', key,
params={'uploadId': upload_id, 'partNumber': str(part_number)},
headers=headers,
data=data)
logger.debug("Upload multipart done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
result = PutObjectResult(resp)
if self.enable_crc and result.crc is not None:
utils.check_crc('upload part', data.crc, result.crc, result.request_id)
return result
def complete_multipart_upload(self, key, upload_id, parts, headers=None):
"""完成分片上传,创建文件。
:param str key: 待上传的文件名,这个文件名要和 :func:`init_multipart_upload` 的文件名一致。
:param str upload_id: 分片上传ID
:param parts: PartInfo列表。PartInfo中的part_number和etag是必填项。其中的etag可以从 :func:`upload_part` 的返回值中得到。
:type parts: list of `PartInfo <oss2.models.PartInfo>`
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`PutObjectResult <oss2.models.PutObjectResult>`
"""
headers = http.CaseInsensitiveDict(headers)
parts = sorted(parts, key=lambda p: p.part_number)
data = xml_utils.to_complete_upload_request(parts)
logger.debug("Start to complete multipart upload, bucket: {0}, key: {1}, upload_id: {2}, parts: {3}".format(
self.bucket_name, to_string(key), upload_id, data))
resp = self.__do_object('POST', key,
params={'uploadId': upload_id},
data=data,
headers=headers)
logger.debug(
"Complete multipart upload done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status))
result = PutObjectResult(resp)
if self.enable_crc:
object_crc = utils.calc_obj_crc_from_parts(parts)
utils.check_crc('multipart upload', object_crc, result.crc, result.request_id)
return result
def abort_multipart_upload(self, key, upload_id, headers=None):
"""取消分片上传。
:param str key: 待上传的文件名,这个文件名要和 :func:`init_multipart_upload` 的文件名一致。
:param | |
from elasticsearch.client.utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
class MlClient(NamespacedClient):
@query_params('from_', 'size')
def get_filters(self, filter_id=None, params=None):
"""
:arg filter_id: The ID of the filter to fetch
:arg from_: skips a number of filters
:arg size: specifies a max number of filters to get
"""
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'filters', filter_id), params=params)
@query_params()
def get_datafeeds(self, datafeed_id=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed.html>`_
:arg datafeed_id: The ID of the datafeeds to fetch
"""
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id), params=params)
@query_params()
def get_datafeed_stats(self, datafeed_id=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed-stats.html>`_
:arg datafeed_id: The ID of the datafeeds stats to fetch
"""
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id, '_stats'), params=params)
@query_params('anomaly_score', 'desc', 'end', 'exclude_interim', 'expand',
'from_', 'size', 'sort', 'start')
def get_buckets(self, job_id, timestamp=None, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-bucket.html>`_
:arg job_id: ID of the job to get bucket results from
:arg timestamp: The timestamp of the desired single bucket result
:arg body: Bucket selection details if not provided in URI
:arg anomaly_score: Filter for the most anomalous buckets
:arg desc: Set the sort direction
:arg end: End time filter for buckets
:arg exclude_interim: Exclude interim results
:arg expand: Include anomaly records
:arg from_: skips a number of buckets
:arg size: specifies a max number of buckets to get
:arg sort: Sort buckets by a particular field
:arg start: Start time filter for buckets
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, 'results', 'buckets', timestamp),
params=params, body=body)
@query_params('reset_end', 'reset_start')
def post_data(self, job_id, body, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-data.html>`_
:arg job_id: The name of the job receiving the data
:arg body: The data to process
:arg reset_end: Optional parameter to specify the end of the bucket
resetting range
:arg reset_start: Optional parameter to specify the start of the bucket
resetting range
"""
for param in (job_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, '_data'), params=params,
body=self._bulk_body(body))
@query_params('force', 'timeout')
def stop_datafeed(self, datafeed_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-stop-datafeed.html>`_
:arg datafeed_id: The ID of the datafeed to stop
:arg force: True if the datafeed should be forcefully stopped.
:arg timeout: Controls the time to wait until a datafeed has stopped.
Default to 20 seconds
"""
if datafeed_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'datafeed_id'.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id, '_stop'), params=params)
@query_params()
def get_jobs(self, job_id=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job.html>`_
:arg job_id: The ID of the jobs to fetch
"""
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id), params=params)
@query_params()
def delete_expired_data(self, params=None):
"""
"""
return self.transport.perform_request('DELETE',
'/_xpack/ml/_delete_expired_data', params=params)
@query_params()
def put_job(self, job_id, body, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html>`_
:arg job_id: The ID of the job to create
:arg body: The job
"""
for param in (job_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('PUT', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id), params=params, body=body)
@query_params()
def validate_detector(self, body, params=None):
"""
:arg body: The detector
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request('POST',
'/_xpack/ml/anomaly_detectors/_validate/detector', params=params,
body=body)
@query_params('end', 'start', 'timeout')
def start_datafeed(self, datafeed_id, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-start-datafeed.html>`_
:arg datafeed_id: The ID of the datafeed to start
:arg body: The start datafeed parameters
:arg end: The end time when the datafeed should stop. When not set, the
datafeed continues in real time
:arg start: The start time from where the datafeed should begin
:arg timeout: Controls the time to wait until a datafeed has started.
Default to 20 seconds
"""
if datafeed_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'datafeed_id'.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id, '_start'), params=params, body=body)
@query_params('desc', 'end', 'exclude_interim', 'from_', 'record_score',
'size', 'sort', 'start')
def get_records(self, job_id, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-record.html>`_
:arg job_id: None
:arg body: Record selection criteria
:arg desc: Set the sort direction
:arg end: End time filter for records
:arg exclude_interim: Exclude interim results
:arg from_: skips a number of records
:arg record_score:
:arg size: specifies a max number of records to get
:arg sort: Sort records by a particular field
:arg start: Start time filter for records
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, 'results', 'records'), params=params,
body=body)
@query_params()
def update_job(self, job_id, body, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-job.html>`_
:arg job_id: The ID of the job to create
:arg body: The job update settings
"""
for param in (job_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, '_update'), params=params, body=body)
@query_params()
def put_filter(self, filter_id, body, params=None):
"""
:arg filter_id: The ID of the filter to create
:arg body: The filter details
"""
for param in (filter_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('PUT', _make_path('_xpack', 'ml',
'filters', filter_id), params=params, body=body)
@query_params()
def update_datafeed(self, datafeed_id, body, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-datafeed.html>`_
:arg datafeed_id: The ID of the datafeed to update
:arg body: The datafeed update settings
"""
for param in (datafeed_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id, '_update'), params=params, body=body)
@query_params()
def preview_datafeed(self, datafeed_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-preview-datafeed.html>`_
:arg datafeed_id: The ID of the datafeed to preview
"""
if datafeed_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'datafeed_id'.")
return self.transport.perform_request('GET', _make_path('_xpack', 'ml',
'datafeeds', datafeed_id, '_preview'), params=params)
@query_params('advance_time', 'calc_interim', 'end', 'skip_time', 'start')
def flush_job(self, job_id, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-flush-job.html>`_
:arg job_id: The name of the job to flush
:arg body: Flush parameters
:arg advance_time: Advances time to the given value generating results
and updating the model for the advanced interval
:arg calc_interim: Calculates interim results for the most recent bucket
or all buckets within the latency period
:arg end: When used in conjunction with calc_interim, specifies the
range of buckets on which to calculate interim results
:arg skip_time: Skips time to the given value without generating results
or updating the model for the skipped interval
:arg start: When used in conjunction with calc_interim, specifies the
range of buckets on which to calculate interim results
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, '_flush'), params=params, body=body)
@query_params('force', 'timeout')
def close_job(self, job_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-close-job.html>`_
:arg job_id: The name of the job to close
:arg force: True if the job should be forcefully closed
:arg timeout: Controls the time to wait until a job has closed. Default
to 30 minutes
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, '_close'), params=params)
@query_params()
def open_job(self, job_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html>`_
:arg job_id: The ID of the job to open
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, '_open'), params=params)
@query_params('force')
def delete_job(self, job_id, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html>`_
:arg job_id: The ID of the job to delete
:arg force: True if the job should be forcefully deleted
"""
if job_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'job_id'.")
return self.transport.perform_request('DELETE', _make_path('_xpack',
'ml', 'anomaly_detectors', job_id), params=params)
@query_params()
def update_model_snapshot(self, job_id, snapshot_id, body, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-snapshot.html>`_
:arg job_id: The ID of the job to fetch
:arg snapshot_id: The ID of the snapshot to update
:arg body: The model snapshot properties to update
"""
for param in (job_id, snapshot_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request('POST', _make_path('_xpack', 'ml',
'anomaly_detectors', job_id, 'model_snapshots', snapshot_id,
'_update'), params=params, body=body)
@query_params()
def delete_filter(self, filter_id, params=None):
"""
:arg filter_id: The ID of the filter to delete
"""
if filter_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'filter_id'.")
return self.transport.perform_request('DELETE', _make_path('_xpack',
'ml', 'filters', filter_id), params=params)
@query_params()
def validate(self, body, params=None):
"""
:arg body: The job config
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required | |
returned for the compute record
if active_compute_record.session_state and active_compute_record.session_state.session_desc:
most_recent_session_desc: ComputeSessionDesc = active_compute_record.session_state.session_desc
else:
most_recent_session_desc: ComputeSessionDesc = state.session_desc
job_name = most_recent_session_desc.resource_desc.resource_name
job_run_id = most_recent_session_desc.session_id
response = exponential_retry(
self._glue.batch_stop_job_run, self.CLIENT_RETRYABLE_EXCEPTION_LIST, JobName=job_name, JobRunIds=[job_run_id]
)
if "Errors" in response and response["Errors"]:
raise RuntimeError(
f"An error occurred while trying to stop AWS Glue job run! " f"Error: {response['Errors']!r}"
) # Errors will contain job name and run id.
# overrides
def get_max_wait_time_for_next_retry_in_secs(self) -> int:
"""Owerwrite the maximum interval used by the default retry strategy in
BatchCompute::can_retry
"""
# enough to get out of Glue's 'resource unavailable' cycle?
# retry with increasing probability as wait time gets close to this
return 2 * 60 * 60
def dev_init(self, platform: "DevelopmentPlatform") -> None:
super().dev_init(platform)
# TODO introduce ABI as a sub-key to lang in this dict
# we currently support compute_defs.ABI.GLUE_EMBEDDED
self._glue_job_lang_map: Dict[GlueJobLanguage, Dict[str, Dict[str, Any]]] = {
GlueJobLanguage.PYTHON: {
"1.0": {"job_name": "", "job_arn": "", "boilerplate": GlueDefaultABIPython, "suffix": "", "ext": "py"},
"2.0": {"job_name": "", "job_arn": "", "boilerplate": GlueDefaultABIPython, "suffix": "v2_0", "ext": "py"},
"3.0": {"job_name": "", "job_arn": "", "boilerplate": GlueDefaultABIPython, "suffix": "v3_0", "ext": "py"},
},
GlueJobLanguage.SCALA: {
"1.0": {"job_name": "", "job_arn": "", "boilerplate": GlueAllABIScala, "suffix": "", "ext": "scala"},
"2.0": {"job_name": "", "job_arn": "", "boilerplate": GlueAllABIScala, "suffix": "v2_0", "ext": "scala"},
"3.0": {"job_name": "", "job_arn": "", "boilerplate": GlueAllABIScala, "suffix": "v3_0", "ext": "scala"},
},
}
# prepare job-names
for lang, lang_spec in self._glue_job_lang_map.items():
for version, version_spec in lang_spec.items():
boilerplate_module = version_spec["boilerplate"]
version_suffix = version_spec["suffix"]
job_name: str = self.GLUE_JOB_NAME_FORMAT.format(
self.__class__.__name__, boilerplate_module.__name__, self._dev_platform.context_id + version_suffix, self._region
)
if len(job_name) > 255:
raise ValueError(
f"Cannot dev_init {self.__class__.__name__} due to very long"
f" AWS Glue Job Name {job_name} (limit < 255),"
f" as a result of very long context_id '{self._dev_platform.context_id}'."
)
self._glue_job_lang_map[lang][version].update({"job_name": job_name})
self._glue_job_lang_map[lang][version].update({"job_arn": f"arn:aws:glue:{self._region}:{self._account_id}:job/{job_name}"})
self._intelliflow_python_workingset_key = build_object_key(["batch"], "bundle.zip")
self._bucket_name: str = self.SCRIPTS_ROOT_FORMAT.format(
"awsglue".lower(), self._dev_platform.context_id.lower(), self._account_id, self._region
)
bucket_len_diff = len(self._bucket_name) - MAX_BUCKET_LEN
if bucket_len_diff > 0:
msg = (
f"Platform context_id '{self._dev_platform.context_id}' is too long (by {bucket_len_diff}!"
f" {self.__class__.__name__} needs to use it create {self._bucket_name} bucket in S3."
f" Please refer https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html"
f" to align your naming accordingly in order to be able to use this driver."
)
module_logger.error(msg)
raise ValueError(msg)
def runtime_init(self, platform: "RuntimePlatform", context_owner: "BaseConstruct") -> None:
"""Whole platform got bootstrapped at runtime. For other runtime services, this
construct should be initialized (ex: context_owner: Lambda, etc)"""
AWSConstructMixin.runtime_init(self, platform, context_owner)
self._glue = boto3.client("glue", region_name=self._region)
# TODO comment the following, probably won't need at runtime
self._s3 = boto3.resource("s3")
self._bucket = get_bucket(self._s3, self._bucket_name)
def provide_runtime_trusted_entities(self) -> List[str]:
return ["glue.amazonaws.com"]
def provide_runtime_default_policies(self) -> List[str]:
# arn:aws:iam::aws:policy/service-role/AWSGlueServiceRole
return ["service-role/AWSGlueServiceRole"]
def provide_runtime_permissions(self) -> List[ConstructPermission]:
# allow exec-role (post-activation, cumulative list of all trusted entities [AWS services]) to do the following;
permissions = [
ConstructPermission([f"arn:aws:s3:::{self._bucket_name}", f"arn:aws:s3:::{self._bucket_name}/*"], ["s3:*"]),
# TODO be more picky.
# allow other service assuming our role to call the jobs here
ConstructPermission(
[
f"arn:aws:glue:{self._region}:{self._account_id}:job/{self._glue_job_lang_map[lang][version]['job_name']}"
for lang in self._glue_job_lang_map.keys()
for version in self._glue_job_lang_map[lang].keys()
],
["glue:*"],
),
# CW Logs (might look redundant, but please forget about other drivers while declaring these),
# deduping is handled automatically.
ConstructPermission([f"arn:aws:logs:{self._region}:{self._account_id}:*"], ["logs:*"]),
# must add a policy to allow your users the iam:PassRole permission for IAM roles to match your naming convention
ConstructPermission([self._params[AWSCommonParams.IF_EXE_ROLE]], ["iam:PassRole"]),
# TODO post-MVP evaluate External output support.
# Currently we dont support it. So no write related permission should be granted for external signals.
# And, for internals, it is obvious that Storage impl should have already granted our role with the
# necessary permissions.
]
external_library_resource_arns = set()
for route in self._pending_internal_routes:
for slot in route.slots:
if slot.code_metadata.external_library_paths:
for s3_path in slot.code_metadata.external_library_paths:
try:
s3_spec = S3SignalSourceAccessSpec.from_url(account_id=None, url=s3_path)
except Exception:
module_logger.error(
f"External library path {s3_path} attached to route {route.route_id!r} "
f" via slot: {(slot.type, slot.code_lang)!r} is not supported by "
f" BatchCompute driver {self.__class__.__name__!r}. "
)
raise
# exact resource (JARs, zips)
external_library_resource_arns.add(f"arn:aws:s3:::{s3_spec.bucket}/{s3_path[len(f's3://{s3_spec.bucket}/'):]}")
# TODO Move into <BatchCompute>
# TODO evalute moving is_batch_compute check even before the external library paths extraction.
if slot.type.is_batch_compute() and slot.permissions:
for compute_perm in slot.permissions:
# TODO check compute_perm feasibility in AWS Glue (check ARN, resource type, etc)
if compute_perm.context != PermissionContext.DEVTIME:
permissions.append(ConstructPermission(compute_perm.resource, compute_perm.action))
if external_library_resource_arns:
permissions.append(
ConstructPermission(list(external_library_resource_arns), ["s3:GetObject", "s3:GetObjectVersion", "s3:ListBucket"])
)
# might look familiar (from Processor impl maybe), but please forget about other drivers while declaring these),
# deduping is handled automatically.
ext_s3_signals = [
ext_signal for ext_signal in self._pending_external_signals if ext_signal.resource_access_spec.source == SignalSourceType.S3
]
if ext_s3_signals:
# External S3 access
permissions.append(
ConstructPermission(
[
f"arn:aws:s3:::{ext_signal.resource_access_spec.bucket}{'/' + ext_signal.resource_access_spec.folder if ext_signal.resource_access_spec.folder else ''}/*"
for ext_signal in ext_s3_signals
]
+ [
f"arn:aws:s3:::{ext_signal.resource_access_spec.bucket}/{ext_signal.resource_access_spec.folder if ext_signal.resource_access_spec.folder else ''}"
for ext_signal in ext_s3_signals
],
["s3:GetObject", "s3:GetObjectVersion", "s3:ListBucket"],
)
)
encryption_key_list: Set[str] = {
ext_signal.resource_access_spec.encryption_key
for ext_signal in ext_s3_signals
if ext_signal.resource_access_spec.encryption_key
}
if encryption_key_list:
permissions.append(
ConstructPermission(
list(encryption_key_list),
[
"kms:Decrypt",
"kms:DescribeKey",
"kms:GenerateDataKey",
"kms:DescribeCustomKeyStores",
"kms:ListKeys",
"kms:ListAliases",
],
)
)
return permissions
@classmethod
def provide_devtime_permissions(cls, params: ConstructParamsDict) -> List[ConstructPermission]:
# dev-role permissions (things this construct would do during development)
# dev-role should be able to do the following.
bucket_name_format: str = cls.SCRIPTS_ROOT_FORMAT.format(
"awsglue".lower(), "*", params[AWSCommonParams.ACCOUNT_ID], params[AWSCommonParams.REGION]
)
return [
ConstructPermission([f"arn:aws:s3:::{bucket_name_format}", f"arn:aws:s3:::{bucket_name_format}/*"], ["s3:*"]),
# ConstructPermission(["*"], ["glue:*"]),
ConstructPermission(
[
f"arn:aws:glue:{params[AWSCommonParams.REGION]}:{params[AWSCommonParams.ACCOUNT_ID]}:catalog",
f"arn:aws:glue:{params[AWSCommonParams.REGION]}:{params[AWSCommonParams.ACCOUNT_ID]}:database/*",
],
["glue:GetDatabases"],
),
# Read-access into everything else in the same catalog
# Refer
# https://docs.aws.amazon.com/glue/latest/dg/glue-specifying-resource-arns.html
ConstructPermission(
[
f"arn:aws:glue:{params[AWSCommonParams.REGION]}:{params[AWSCommonParams.ACCOUNT_ID]}:catalog",
f"arn:aws:glue:{params[AWSCommonParams.REGION]}:{params[AWSCommonParams.ACCOUNT_ID]}:database/default",
f"arn:aws:glue:{params[AWSCommonParams.REGION]}:{params[AWSCommonParams.ACCOUNT_ID]}:database/*",
f"arn:aws:glue:{params[AWSCommonParams.REGION]}:{params[AWSCommonParams.ACCOUNT_ID]}:table/*/*",
# f"arn:aws:glue:{params[AWSCommonParams.REGION]}:{params[AWSCommonParams.ACCOUNT_ID]}:userDefinedFunction/*/*",
# f"arn:aws:glue:{params[AWSCommonParams.REGION]}:{params[AWSCommonParams.ACCOUNT_ID]}:connection/*",
],
[
"glue:GetDatabase",
"glue:GetTable",
"glue:GetTables",
"glue:GetPartition",
"glue:GetPartitions",
"glue:BatchGetPartition",
"glue:Get*",
"glue:BatchGet*",
],
),
# More permissive read access on other non-catalog entities
ConstructPermission(
["*"],
[
"glue:ListCrawlers",
"glue:BatchGetCrawlers",
"glue:ListDevEndpoints",
"glue:BatchGetDevEndpoints",
"glue:GetJob",
"glue:GetJobs",
"glue:ListJobs",
"glue:BatchGetJobs",
"glue:GetJobRun",
"glue:GetJobRuns",
"glue:GetJobBookmark",
"glue:GetJobBookmarks",
"glue:GetTrigger",
"glue:GetTriggers",
"glue:ListTriggers",
"glue:BatchGetTriggers",
],
),
# and finally: full-authorization on activation and (local) compute time permissions (on its own resources)
ConstructPermission(
[
f"arn:aws:glue:{params[AWSCommonParams.REGION]}:{params[AWSCommonParams.ACCOUNT_ID]}:job/{cls.GLUE_JOB_NAME_FORMAT.format(cls.__name__, '*', '*', params[AWSCommonParams.REGION])}"
],
["glue:*"],
),
# TODO dev-role should have the right to do BucketNotification on external signals
# this would require post-MVP design change on how dev-role is used and when it is updated.
# probably during the activation again (switching to the admin credentails if authorization is given).
]
def _provide_route_metrics(self, route: Route) -> List[ConstructInternalMetricDesc]:
# TODO
return []
def _provide_internal_metrics(self) -> List[ConstructInternalMetricDesc]:
"""Provide internal metrics (of type INTERNAL_METRIC) that should be managed by RheocerOS and emitted by this
driver via Diagnostics::emit.
These metrics are logical metrics generated by the driver (with no assumption on other drivers and other details
about the underlying platform). So as a driver impl, you want Diagnostics driver to manage those metrics and
bind them to alarms, etc. Example: Routing metrics.
"""
return []
def _provide_internal_alarms(self) -> List[Signal]:
"""Provide internal alarms (of type INTERNAL_ALARM OR INTERNAL_COMPOSITE_ALARM) managed/emitted
by this driver impl"""
return []
# overrides
def _provide_system_metrics(self) -> List[Signal]:
"""Expose system generated metrics to the rest of the platform in a consolidated, filtered and
well-defined RheocerOS metric signal format.
"""
# where dimension is Type='count'
job_level_COUNT_metrics = [
"glue.driver.aggregate.bytesRead",
"glue.driver.aggregate.elapsedTime",
"glue.driver.aggregate.numCompletedStages",
"glue.driver.aggregate.numCompletedTasks",
"glue.driver.aggregate.numFailedTask",
"glue.driver.aggregate.numKilledTasks",
"glue.driver.aggregate.recordsRead",
"glue.driver.aggregate.shuffleBytesWritten",
"glue.driver.aggregate.shuffleLocalBytesRead",
]
job_level_GAUGE_metrics = [
"glue.driver.BlockManager.disk.diskSpaceUsed_MB",
"glue.driver.jvm.heap.usage",
"glue.driver.jvm.heap.used",
"glue.driver.s3.filesystem.read_bytes",
"glue.driver.s3.filesystem.write_bytes",
"glue.driver.system.cpuSystemLoad",
"glue.ALL.jvm.heap.usage",
"glue.ALL.jvm.heap.used",
"glue.ALL.s3.filesystem.read_bytes",
"glue.ALL.s3.filesystem.write_bytes",
"glue.ALL.system.cpuSystemLoad",
]
return [
Signal(
SignalType.CW_METRIC_DATA_CREATION,
CWMetricSignalSourceAccessSpec(
"Glue",
{"JobName": self._glue_job_lang_map[lang][version]["job_name"], "Type": "count", "JobRunId": "ALL"},
# metadata (should be visible in front-end as well)
**{"Notes": "Supports 1 min period"},
),
SignalDomainSpec(
dimension_filter_spec=DimensionFilter.load_raw(
{
metric_name: { # Name (overwrite in filter spec to make them visible to the user. otherwise user should specify the name,
# in that case platform abstraction is broken since user should have a very clear idea about what is
# providing these metrics).
"*": { # Statistic
"*": { # Period (AWS emits with 1 min by default), let user decide.
"*": {} # Time always leave it 'any' if not experimenting.
}
}
}
for metric_name in job_level_COUNT_metrics
}
),
dimension_spec=None,
integrity_check_protocol=None,
),
# make sure that default metric alias/ID complies with CW expectation (first letter lower case).
f"batchCompute.typeCount.{lang.value}.{version}",
)
for lang in self._glue_job_lang_map.keys()
for version in self._glue_job_lang_map[lang].keys()
] + [
Signal(
SignalType.CW_METRIC_DATA_CREATION,
CWMetricSignalSourceAccessSpec(
"Glue", {"JobName": self._glue_job_lang_map[lang][version]["job_name"], "Type": "gauge", "JobRunId": "ALL"}
),
SignalDomainSpec(
dimension_filter_spec=DimensionFilter.load_raw(
{metric_name: {"*": {"*": {"*": {}}}} for metric_name in | |
self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves the list of ForwardingRule resources available to the specified project.
Args:
request: (ComputeGlobalForwardingRulesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ForwardingRuleList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def SetTarget(self, request, global_params=None):
"""Changes target url for forwarding rule.
Args:
request: (ComputeGlobalForwardingRulesSetTargetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('SetTarget')
return self._RunMethod(
config, request, global_params=global_params)
class GlobalOperationsService(base_api.BaseApiService):
"""Service class for the globalOperations resource."""
_NAME = u'globalOperations'
def __init__(self, client):
super(ComputeAlpha.GlobalOperationsService, self).__init__(client)
self._method_configs = {
'AggregatedList': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.globalOperations.aggregatedList',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/aggregated/operations',
request_field='',
request_type_name=u'ComputeGlobalOperationsAggregatedListRequest',
response_type_name=u'OperationAggregatedList',
supports_download=False,
),
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'compute.globalOperations.delete',
ordered_params=[u'project', u'operation'],
path_params=[u'operation', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/operations/{operation}',
request_field='',
request_type_name=u'ComputeGlobalOperationsDeleteRequest',
response_type_name=u'ComputeGlobalOperationsDeleteResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.globalOperations.get',
ordered_params=[u'project', u'operation'],
path_params=[u'operation', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/operations/{operation}',
request_field='',
request_type_name=u'ComputeGlobalOperationsGetRequest',
response_type_name=u'Operation',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.globalOperations.list',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/global/operations',
request_field='',
request_type_name=u'ComputeGlobalOperationsListRequest',
response_type_name=u'OperationList',
supports_download=False,
),
}
self._upload_configs = {
}
def AggregatedList(self, request, global_params=None):
"""Retrieves the list of all operations grouped by scope.
Args:
request: (ComputeGlobalOperationsAggregatedListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(OperationAggregatedList) The response message.
"""
config = self.GetMethodConfig('AggregatedList')
return self._RunMethod(
config, request, global_params=global_params)
def Delete(self, request, global_params=None):
"""Deletes the specified operation resource.
Args:
request: (ComputeGlobalOperationsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ComputeGlobalOperationsDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Retrieves the specified operation resource.
Args:
request: (ComputeGlobalOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves the list of operation resources contained within the specified project.
Args:
request: (ComputeGlobalOperationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(OperationList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
class HttpHealthChecksService(base_api.BaseApiService):
"""Service class for the httpHealthChecks resource."""
_NAME = u'httpHealthChecks'
def __init__(self, client):
super(ComputeAlpha.HttpHealthChecksService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'compute.httpHealthChecks.delete',
ordered_params=[u'project', u'httpHealthCheck'],
path_params=[u'httpHealthCheck', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/httpHealthChecks/{httpHealthCheck}',
request_field='',
request_type_name=u'ComputeHttpHealthChecksDeleteRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.httpHealthChecks.get',
ordered_params=[u'project', u'httpHealthCheck'],
path_params=[u'httpHealthCheck', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/httpHealthChecks/{httpHealthCheck}',
request_field='',
request_type_name=u'ComputeHttpHealthChecksGetRequest',
response_type_name=u'HttpHealthCheck',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'compute.httpHealthChecks.insert',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[],
relative_path=u'projects/{project}/global/httpHealthChecks',
request_field=u'httpHealthCheck',
request_type_name=u'ComputeHttpHealthChecksInsertRequest',
response_type_name=u'Operation',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.httpHealthChecks.list',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/global/httpHealthChecks',
request_field='',
request_type_name=u'ComputeHttpHealthChecksListRequest',
response_type_name=u'HttpHealthCheckList',
supports_download=False,
),
'Patch': base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'compute.httpHealthChecks.patch',
ordered_params=[u'project', u'httpHealthCheck'],
path_params=[u'httpHealthCheck', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/httpHealthChecks/{httpHealthCheck}',
request_field=u'httpHealthCheckResource',
request_type_name=u'ComputeHttpHealthChecksPatchRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'compute.httpHealthChecks.update',
ordered_params=[u'project', u'httpHealthCheck'],
path_params=[u'httpHealthCheck', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/httpHealthChecks/{httpHealthCheck}',
request_field=u'httpHealthCheckResource',
request_type_name=u'ComputeHttpHealthChecksUpdateRequest',
response_type_name=u'Operation',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Deletes the specified HttpHealthCheck resource.
Args:
request: (ComputeHttpHealthChecksDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns the specified HttpHealthCheck resource.
Args:
request: (ComputeHttpHealthChecksGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(HttpHealthCheck) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a HttpHealthCheck resource in the specified project using the data included in the request.
Args:
request: (ComputeHttpHealthChecksInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves the list of HttpHealthCheck resources available to the specified project.
Args:
request: (ComputeHttpHealthChecksListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(HttpHealthCheckList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Patch(self, request, global_params=None):
"""Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.
Args:
request: (ComputeHttpHealthChecksPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None):
"""Updates a HttpHealthCheck resource in the specified project using the data included in the request.
Args:
request: (ComputeHttpHealthChecksUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
class HttpsHealthChecksService(base_api.BaseApiService):
"""Service class for the httpsHealthChecks resource."""
_NAME = u'httpsHealthChecks'
def __init__(self, client):
super(ComputeAlpha.HttpsHealthChecksService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'compute.httpsHealthChecks.delete',
ordered_params=[u'project', u'httpsHealthCheck'],
path_params=[u'httpsHealthCheck', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}',
request_field='',
request_type_name=u'ComputeHttpsHealthChecksDeleteRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.httpsHealthChecks.get',
ordered_params=[u'project', u'httpsHealthCheck'],
path_params=[u'httpsHealthCheck', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}',
request_field='',
request_type_name=u'ComputeHttpsHealthChecksGetRequest',
response_type_name=u'HttpsHealthCheck',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'compute.httpsHealthChecks.insert',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[],
relative_path=u'projects/{project}/global/httpsHealthChecks',
request_field=u'httpsHealthCheck',
request_type_name=u'ComputeHttpsHealthChecksInsertRequest',
response_type_name=u'Operation',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.httpsHealthChecks.list',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/global/httpsHealthChecks',
request_field='',
request_type_name=u'ComputeHttpsHealthChecksListRequest',
response_type_name=u'HttpsHealthCheckList',
supports_download=False,
),
'Patch': base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'compute.httpsHealthChecks.patch',
ordered_params=[u'project', u'httpsHealthCheck'],
path_params=[u'httpsHealthCheck', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}',
request_field=u'httpsHealthCheckResource',
request_type_name=u'ComputeHttpsHealthChecksPatchRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'compute.httpsHealthChecks.update',
ordered_params=[u'project', u'httpsHealthCheck'],
path_params=[u'httpsHealthCheck', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}',
request_field=u'httpsHealthCheckResource',
request_type_name=u'ComputeHttpsHealthChecksUpdateRequest',
response_type_name=u'Operation',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Deletes the specified HttpsHealthCheck resource.
Args:
request: (ComputeHttpsHealthChecksDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns the specified HttpsHealthCheck resource.
Args:
request: (ComputeHttpsHealthChecksGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(HttpsHealthCheck) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a HttpsHealthCheck resource in the specified project using the data included in the request.
Args:
request: (ComputeHttpsHealthChecksInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves the list of HttpsHealthCheck resources available to the specified project.
Args:
request: (ComputeHttpsHealthChecksListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(HttpsHealthCheckList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Patch(self, request, global_params=None):
"""Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.
Args:
request: (ComputeHttpsHealthChecksPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None):
"""Updates a HttpsHealthCheck resource in the specified project using the data included in the request.
Args:
request: (ComputeHttpsHealthChecksUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
class ImagesService(base_api.BaseApiService):
"""Service class for the images resource."""
_NAME = u'images'
def __init__(self, client):
super(ComputeAlpha.ImagesService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'compute.images.delete',
ordered_params=[u'project', u'image'],
path_params=[u'image', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/images/{image}',
request_field='',
request_type_name=u'ComputeImagesDeleteRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Deprecate': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'compute.images.deprecate',
ordered_params=[u'project', u'image'],
path_params=[u'image', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/images/{image}/deprecate',
request_field=u'deprecationStatus',
request_type_name=u'ComputeImagesDeprecateRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.images.get',
ordered_params=[u'project', u'image'],
path_params=[u'image', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/images/{image}',
request_field='',
request_type_name=u'ComputeImagesGetRequest',
response_type_name=u'Image',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'compute.images.insert',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[],
relative_path=u'projects/{project}/global/images',
request_field=u'image',
request_type_name=u'ComputeImagesInsertRequest',
response_type_name=u'Operation',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.images.list',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/global/images',
request_field='',
request_type_name=u'ComputeImagesListRequest',
response_type_name=u'ImageList',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Deletes the specified image resource.
Args:
request: (ComputeImagesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Deprecate(self, request, global_params=None):
"""Sets the deprecation status of an image.
If an empty request body is given, clears the deprecation status instead.
Args:
request: (ComputeImagesDeprecateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Deprecate')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns the specified image resource.
Args:
request: (ComputeImagesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Image) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates an image resource in the specified project using the data included in the request.
Args:
request: (ComputeImagesInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, | |
getattr(self, "tauBand", None) is None:
self.windowRange.tauBand = 0
else:
if not isinstance(self.tauBand, int):
logging.warn(
"Casting non-integer tauBand={} to int...".format(
self.tauBand
)
)
self.tauBand = int(self.tauBand)
self.windowRange.tauBand = self.tauBand
if self.dtau:
self.windowRange.dtau = self.dtau
if self.tauMin is None:
self.windowRange.tau = int(2 * self.Tsft)
else:
if not isinstance(self.tauMin, int):
logging.warn(
"Casting non-integer tauMin={} to int...".format(
self.tauMin
)
)
self.tauMin = int(self.tauMin)
self.windowRange.tau = self.tauMin
logging.info("Initialising transient FstatMap features...")
(
self.tCWFstatMapFeatures,
self.gpu_context,
) = tcw.init_transient_fstat_map_features(
self.tCWFstatMapVersion == "pycuda", self.cudaDeviceName
)
if self.BSGL:
self.twoFXatMaxTwoF = np.zeros(lalpulsar.PULSAR_MAX_DETECTORS)
def _set_min_max_cover_freqs(self):
# decide on which minCoverFreq and maxCoverFreq to use:
# either from direct user input, estimate_min_max_CoverFreq(), or SFTs
if self.sftfilepattern is not None:
minFreq_SFTs, maxFreq_SFTs = self._get_min_max_freq_from_SFTCatalog()
if (self.minCoverFreq is None) != (self.maxCoverFreq is None):
raise ValueError(
"Please use either both or none of [minCoverFreq,maxCoverFreq]."
)
elif (
self.minCoverFreq is None
and self.maxCoverFreq is None
and self.search_ranges is None
):
raise ValueError(
"Please use either search_ranges or both of [minCoverFreq,maxCoverFreq]."
)
elif self.minCoverFreq is None or self.maxCoverFreq is None:
logging.info(
"[minCoverFreq,maxCoverFreq] not provided, trying to estimate"
" from search ranges."
)
self.estimate_min_max_CoverFreq()
elif (self.minCoverFreq < 0.0) or (self.maxCoverFreq < 0.0):
if self.sftfilepattern is None:
raise ValueError(
"If sftfilepattern==None, cannot use negative values for"
" minCoverFreq or maxCoverFreq (interpreted as offsets from"
" min/max SFT frequency)."
" Please use actual frequency values for both,"
" or set both to None (automated estimation)."
)
if self.minCoverFreq < 0.0:
logging.info(
"minCoverFreq={:f} provided, using as offset from min(SFTs).".format(
self.minCoverFreq
)
)
# to set *above* min, since minCoverFreq is negative: subtract it
self.minCoverFreq = minFreq_SFTs - self.minCoverFreq
if self.maxCoverFreq < 0.0:
logging.info(
"maxCoverFreq={:f} provided, using as offset from max(SFTs).".format(
self.maxCoverFreq
)
)
# to set *below* max, since minCoverFreq is negative: add it
self.maxCoverFreq = maxFreq_SFTs + self.maxCoverFreq
if (self.sftfilepattern is not None) and (
(self.minCoverFreq < minFreq_SFTs) or (self.maxCoverFreq > maxFreq_SFTs)
):
raise ValueError(
"[minCoverFreq,maxCoverFreq]=[{:f},{:f}] Hz incompatible with"
" SFT files content [{:f},{:f}] Hz".format(
self.minCoverFreq, self.maxCoverFreq, minFreq_SFTs, maxFreq_SFTs
)
)
logging.info(
"Using minCoverFreq={} and maxCoverFreq={}.".format(
self.minCoverFreq, self.maxCoverFreq
)
)
def _get_min_max_freq_from_SFTCatalog(self):
fAs = [d.header.f0 for d in self.SFTCatalog.data]
minFreq_SFTs = np.min(fAs)
fBs = [
d.header.f0 + (d.numBins - 1) * d.header.deltaF
for d in self.SFTCatalog.data
]
maxFreq_SFTs = np.max(fBs)
return minFreq_SFTs, maxFreq_SFTs
def estimate_min_max_CoverFreq(self):
"""Extract spanned spin-range at reference -time from the template bank.
To use this method, self.search_ranges must be a dictionary of lists per search parameter
which can be either [single_value], [min,max] or [min,max,step].
"""
if type(self.search_ranges) is not dict:
raise ValueError("Need a dictionary for search_ranges!")
range_keys = list(self.search_ranges.keys())
required_keys = ["Alpha", "Delta", "F0"]
if len(np.setdiff1d(required_keys, range_keys)) > 0:
raise ValueError(
"Required keys not found in search_ranges: {}".format(
np.setdiff1d(required_keys, range_keys)
)
)
for key in range_keys:
if (
type(self.search_ranges[key]) is not list
or len(self.search_ranges[key]) == 0
or len(self.search_ranges[key]) > 3
):
raise ValueError(
"search_ranges entry for {:s}"
" is not a list of a known format"
" (either [single_value], [min,max]"
" or [min,max,step]): {}".format(key, self.search_ranges[key])
)
# start by constructing a DopplerRegion structure
# which will be needed to conservatively account for sky-position dependent
# Doppler shifts of the frequency range to be covered
searchRegion = lalpulsar.DopplerRegion()
# sky region
Alpha = self.search_ranges["Alpha"][0]
AlphaBand = (
self.search_ranges["Alpha"][1] - Alpha
if len(self.search_ranges["Alpha"]) >= 2
else 0.0
)
Delta = self.search_ranges["Delta"][0]
DeltaBand = (
self.search_ranges["Delta"][1] - Delta
if len(self.search_ranges["Delta"]) >= 2
else 0.0
)
searchRegion.skyRegionString = lalpulsar.SkySquare2String(
Alpha,
Delta,
AlphaBand,
DeltaBand,
)
searchRegion.refTime = self.tref
# frequency and spindowns
searchRegion.fkdot = np.zeros(lalpulsar.PULSAR_MAX_SPINS)
searchRegion.fkdotBand = np.zeros(lalpulsar.PULSAR_MAX_SPINS)
for k in range(3):
Fk = "F{:d}".format(k)
if Fk in range_keys:
searchRegion.fkdot[k] = self.search_ranges[Fk][0]
searchRegion.fkdotBand[k] = (
self.search_ranges[Fk][1] - self.search_ranges[Fk][0]
if len(self.search_ranges[Fk]) >= 2
else 0.0
)
# now construct DopplerFullScan from searchRegion
scanInit = lalpulsar.DopplerFullScanInit()
scanInit.searchRegion = searchRegion
scanInit.stepSizes = lalpulsar.PulsarDopplerParams()
scanInit.stepSizes.refTime = self.tref
scanInit.stepSizes.Alpha = (
self.search_ranges["Alpha"][-1]
if len(self.search_ranges["Alpha"]) == 3
else 0.001 # fallback, irrelevant for band estimate but must be > 0
)
scanInit.stepSizes.Delta = (
self.search_ranges["Delta"][-1]
if len(self.search_ranges["Delta"]) == 3
else 0.001 # fallback, irrelevant for band estimate but must be > 0
)
scanInit.stepSizes.fkdot = np.zeros(lalpulsar.PULSAR_MAX_SPINS)
for k in range(3):
if Fk in range_keys:
Fk = "F{:d}".format(k)
scanInit.stepSizes.fkdot[k] = (
self.search_ranges[Fk][-1]
if len(self.search_ranges[Fk]) == 3
else 0.0
)
scanInit.startTime = self.minStartTime
scanInit.Tspan = float(self.maxStartTime - self.minStartTime)
scanState = lalpulsar.InitDopplerFullScan(scanInit)
# now obtain the PulsarSpinRange extended over all relevant Doppler shifts
spinRangeRef = lalpulsar.PulsarSpinRange()
lalpulsar.GetDopplerSpinRange(spinRangeRef, scanState)
# optional: binary parameters
if "asini" in range_keys:
if len(self.search_ranges["asini"]) >= 2:
maxOrbitAsini = self.search_ranges["asini"][1]
else:
maxOrbitAsini = self.search_ranges["asini"][0]
else:
maxOrbitAsini = 0.0
if "period" in range_keys:
minOrbitPeriod = self.search_ranges["period"][0]
else:
minOrbitPeriod = 0.0
if "ecc" in range_keys:
if len(self.search_ranges["ecc"]) >= 2:
maxOrbitEcc = self.search_ranges["ecc"][1]
else:
maxOrbitEcc = self.search_ranges["ecc"][0]
else:
maxOrbitEcc = 0.0
# finally call the wrapped lalpulsar estimation function with the
# extended PulsarSpinRange and optional binary parameters
self.minCoverFreq, self.maxCoverFreq = helper_functions.get_covering_band(
tref=self.tref,
tstart=self.minStartTime,
tend=self.maxStartTime,
F0=spinRangeRef.fkdot[0],
F1=spinRangeRef.fkdot[1],
F2=spinRangeRef.fkdot[2],
F0band=spinRangeRef.fkdotBand[0],
F1band=spinRangeRef.fkdotBand[1],
F2band=spinRangeRef.fkdotBand[2],
maxOrbitAsini=maxOrbitAsini,
minOrbitPeriod=minOrbitPeriod,
maxOrbitEcc=maxOrbitEcc,
)
def get_fullycoherent_detstat(
self,
F0,
F1,
F2,
Alpha,
Delta,
asini=None,
period=None,
ecc=None,
tp=None,
argp=None,
tstart=None,
tend=None,
):
"""Computes the detection statistic (twoF or log10BSGL) fully-coherently at a single point.
These are also stored to `self.twoF` and `self.log10BSGL` respectively.
As the basic statistic of this class, `self.twoF` is always computed.
If `self.BSGL`, additionally the single-detector 2F-stat values are saved
in `self.twoFX`.
If transient parameters are enabled (`self.transientWindowType` is set),
the full transient-F-stat map will also be computed here,
but stored in `self.FstatMap`, not returned.
Parameters
----------
F0, F1, F2, Alpha, Delta: float
Parameters at which to compute the statistic.
asini, period, ecc, tp, argp: float, optional
Optional: Binary parameters at which to compute the statistic.
tstart, tend: int or None
GPS times to restrict the range of data used.
If None: falls back to self.minStartTime and self.maxStartTime.
This is only passed on to `self.get_transient_detstat()`,
i.e. only used if `self.transientWindowType` is set.
Returns
-------
stat: float
A single value of the detection statistic (twoF or log10BSGL)
at the input parameter values.
Also stored as `self.twoF` or `self.log10BSGL`.
"""
self.get_fullycoherent_twoF(
F0, F1, F2, Alpha, Delta, asini, period, ecc, tp, argp
)
if not self.transientWindowType:
if self.BSGL is False:
return self.twoF
self.get_fullycoherent_single_IFO_twoFs()
self.get_fullycoherent_log10BSGL()
return self.log10BSGL
self.get_transient_maxTwoFstat(tstart, tend)
if self.BSGL is False:
return self.maxTwoF
else:
return self.get_transient_log10BSGL()
def get_fullycoherent_twoF(
self,
F0,
F1,
F2,
Alpha,
Delta,
asini=None,
period=None,
ecc=None,
tp=None,
argp=None,
):
"""Computes the fully-coherent 2F statistic at a single point.
NOTE: This always uses the full data set as defined when initialising
the search object.
If you want to restrict the range of data used for a single 2F computation,
you need to set a `self.transientWindowType` and then call
`self.get_fullycoherent_detstat()` with `tstart` and `tend` options
instead of this funcion.
Parameters
----------
F0, F1, F2, Alpha, Delta: float
Parameters at which to compute the statistic.
asini, period, ecc, tp, argp: float, optional
Optional: Binary parameters at which to compute the statistic.
Returns
-------
twoF: float
A single value of the fully-coherent 2F statistic
at the input parameter values.
Also stored as `self.twoF`.
"""
self.PulsarDopplerParams.fkdot = np.zeros(lalpulsar.PULSAR_MAX_SPINS)
self.PulsarDopplerParams.fkdot[:3] = [F0, F1, F2]
self.PulsarDopplerParams.Alpha = float(Alpha)
self.PulsarDopplerParams.Delta = float(Delta)
if self.binary:
self.PulsarDopplerParams.asini = float(asini)
self.PulsarDopplerParams.period = float(period)
self.PulsarDopplerParams.ecc = float(ecc)
self.PulsarDopplerParams.tp = float(tp)
self.PulsarDopplerParams.argp = float(argp)
lalpulsar.ComputeFstat(
Fstats=self.FstatResults,
input=self.FstatInput,
doppler=self.PulsarDopplerParams,
numFreqBins=1,
whatToCompute=self.whatToCompute,
)
# We operate on a single frequency bin, so we grab the 0 component
# of what is internally a twoF array.
self.twoF = np.float(self.FstatResults.twoF[0])
return self.twoF
def get_fullycoherent_single_IFO_twoFs(self):
"""Computes single-detector F-stats at a single point.
This requires `self.get_fullycoherent_twoF()` to be run first.
Returns
-------
twoFX: list
A list of the single-detector detection statistics twoF.
Also stored as `self.twoFX`.
"""
self.twoFX[: self.FstatResults.numDetectors] = [
self.FstatResults.twoFPerDet(X)
for X in range(self.FstatResults.numDetectors)
]
return self.twoFX
def get_fullycoherent_log10BSGL(self):
"""Computes the line-robust statistic log10BSGL at a single point.
This requires `self.get_fullycoherent_twoF()`
and `self.get_fullycoherent_single_IFO_twoFs()`
to be run first.
Returns
-------
log10BSGL: float
A single value of the detection statistic log10BSGL
at the input parameter values.
Also stored as | |
i1))
dims = (i2, fi, i3, gi, i1)
self.h = aobj.hfarray(np.zeros((1, 2, 1, 3, 1)), dims=dims)
dims = (i4, i5)
class _Test_hfarray(MakeData, TestCase):
pass
class Test_hfarray_checkinstance(TestCase):
def setUp(self):
class VArray(aobj._hfarray):
__array_priority__ = 10
self.VArray = VArray
def test_1(self):
a = self.VArray([1])
b = aobj.hfarray([2])
#None is placeholder for function that should never be called
chk = aobj.check_instance(None)
self.assertEqual(chk(b, a), NotImplemented)
class Test_hfarray_1(_Test_hfarray):
def test_init_copy_1(self):
b = aobj.hfarray(self.a)
b[0] = 1
self.assertAllclose(self.a, 0)
self.assertAllclose(b, 1)
def test_init_copy_2(self):
b = aobj.hfarray(self.a, copy=False)
b[0] = 1
self.assertAllclose(self.a, 1)
self.assertAllclose(b, 1)
def test_copy_1(self):
b = self.a.copy()
b[0] = 1
self.assertAllclose(self.a, 0)
self.assertAllclose(b, 1)
def test_init_1(self):
a = np.array([1, 2, 3])
A = aobj.hfarray(a)
self.assertEqual(A.dims, (ds.DimSweep("freq", 3),))
self.assertAllclose(a, A)
def test_init_2(self):
a = np.array([[1, 2, 3]] * 4)
A = aobj.hfarray(a)
self.assertEqual(A.dims, (ds.DimSweep("freq", 4), ds.DimRep("rep", 3)))
self.assertAllclose(a, A)
def test_init_3(self):
a = aobj.hfarray(self.a, outputformat="%.3f")
self.assertEqual(a.unit, self.a.unit)
self.assertEqual(a.dims, self.a.dims)
self.assertEqual(a.outputformat, "%.3f")
self.assertAllclose(a, self.a)
def test_init_4(self):
fi = aobj.DimSweep("f", [12, 13, 14], unit="Hz", outputformat="%.3f")
a = aobj.hfarray(fi)
self.assertEqual(a.outputformat, "%.3f")
self.assertEqual(a.unit, "Hz")
self.assertEqual(a.dims, (fi,))
self.assertAllclose(a, [12, 13, 14])
def test_init_5(self):
fi = aobj.DimSweep("f", [12, 13, 14], unit="m", outputformat="%.3f")
a = aobj.hfarray(fi, unit="Hz")
self.assertEqual(a.unit, "Hz")
self.assertEqual(a.outputformat, "%.3f")
self.assertEqual(a.dims, (fi,))
self.assertAllclose(a, [12, 13, 14])
def test_init_6(self):
fi = aobj.DimSweep("f", [12, 13, 14], unit="m", outputformat="%.3f")
a = aobj.hfarray(fi, unit="Hz", outputformat="%.5f")
self.assertEqual(a.unit, "Hz")
self.assertEqual(a.dims, (fi,))
self.assertEqual(a.outputformat, "%.5f")
self.assertAllclose(a, [12, 13, 14])
def test_init_error_1(self):
a = np.array([[[1, 2, 3]] * 3] * 2)
self.assertRaises(aobj.DimensionMismatchError, aobj.hfarray, a)
def test_indexing_1(self):
self.assertAllclose(self.b[0], np.array([[1, 2 + 0j], [3, 4]]))
def test_indexing_2(self):
facit = (np.array([[[1, 2 + 0j], [3, 4]]]) +
np.arange(2)[:, newaxis, newaxis])
self.assertAllclose(self.b[:2], facit)
def test_indexing_3(self):
facit = (np.array([[[1, 2 + 0j], [3, 4]]]) +
np.arange(0, 10, 2)[:, newaxis, newaxis])
self.assertAllclose(self.b[::2], facit)
def test_verify_dimension_1(self):
self.assertIsNone(self.a.verify_dimension())
def test_verify_dimension_2(self):
self.assertIsNone(self.b.verify_dimension())
def test_verify_dimension_error_1(self):
self.a.dims = self.a.dims[:-1]
self.assertRaises(aobj.HFArrayShapeDimsMismatchError,
self.a.verify_dimension)
def test_info_deprecation_1(self):
a = aobj.hfarray(1)
reset_hftools_warnings()
self.assertHFToolsDeprecationWarning(lambda x: x.info, a)
with warnings.catch_warnings():
warnings.resetwarnings()
warnings.simplefilter("ignore", HFToolsDeprecationWarning)
a.info
def test_set_info_deprecation_1(self):
a = aobj.hfarray(1)
reset_hftools_warnings()
self.assertHFToolsDeprecationWarning(setattr, a, "info", ())
with warnings.catch_warnings():
warnings.resetwarnings()
warnings.simplefilter("ignore", HFToolsDeprecationWarning)
a.info = ()
def test_info_deprecation_2(self):
reset_hftools_warnings()
self.assertHFToolsDeprecationWarning(aobj.hfarray, 1, info=())
with warnings.catch_warnings():
warnings.resetwarnings()
warnings.simplefilter("ignore", HFToolsDeprecationWarning)
aobj.hfarray(1, info=())
def test_info_deprecation_3(self):
reset_hftools_warnings()
with warnings.catch_warnings():
warnings.resetwarnings()
warnings.simplefilter("ignore", HFToolsDeprecationWarning)
self.assertRaises(ValueError, aobj.hfarray, 1, info=(), dims=())
def test_dims_index_1(self):
self.assertEqual(self.a.dims_index("f"), 0)
self.assertEqual(self.a.dims_index("i"), 1)
self.assertEqual(self.a.dims_index("j"), 2)
self.assertEqual(self.a.dims_index("f", aobj.DimSweep), 0)
self.assertEqual(self.a.dims_index("i", aobj.DimMatrix_i), 1)
self.assertEqual(self.a.dims_index("j", aobj.DimMatrix_j), 2)
def test_dims_index_2(self):
self.assertEqual(self.b.dims_index("f"), 0)
self.assertEqual(self.b.dims_index("i"), 1)
self.assertEqual(self.b.dims_index("j"), 2)
self.assertEqual(self.a.dims_index("f", aobj.DimSweep), 0)
self.assertEqual(self.a.dims_index("i", aobj.DimMatrix_i), 1)
self.assertEqual(self.a.dims_index("j", aobj.DimMatrix_j), 2)
def test_dims_index_error_1(self):
self.assertRaises(IndexError, self.a.dims_index, "Q")
self.assertRaises(IndexError, self.a.dims_index, "f", aobj.DimMatrix_i)
self.assertRaises(IndexError, self.a.dims_index, "i", aobj.DimSweep)
self.assertRaises(IndexError, self.a.dims_index, "j", aobj.DimSweep)
def test_dims_index_error_2(self):
self.assertRaises(IndexError, self.b.dims_index, "X")
self.assertRaises(IndexError, self.b.dims_index, "f", aobj.DimMatrix_i)
self.assertRaises(IndexError, self.b.dims_index, "i", aobj.DimSweep)
self.assertRaises(IndexError, self.b.dims_index, "j", aobj.DimSweep)
def test_info_index_deprecated(self):
reset_hftools_warnings()
x = aobj.hfarray([1,2], dims=(aobj.DimSweep("a", 2),))
self.assertHFToolsDeprecationWarning(x.info_index, "a", aobj.DimSweep)
with warnings.catch_warnings():
warnings.resetwarnings()
warnings.simplefilter("ignore", HFToolsDeprecationWarning)
x.info_index("a", aobj.DimSweep)
def test_help_1(self):
self.a.help()
def test_help_2(self):
self.b.help()
def test_reorder_1(self):
dims = self.c.dims
a = self.c.reorder_dimensions(*dims[-1:])
self.assertEqual(a.dims, dims[-1:] + dims[:-1])
def test_reorder_2(self):
dims = self.c.dims
a = self.c.reorder_dimensions(*dims[-2:])
self.assertEqual(a.dims, dims[-2:] + dims[:-2])
def test_reorder_3(self):
dims = self.c.dims
a = self.c.reorder_dimensions(*dims[::-1])
self.assertEqual(a.dims, dims[::-1])
def test_squeeze_1(self):
a = self.e.squeeze()
self.assertEqual(a.dims, (self.fi, self.gi))
def test_squeeze_2(self):
a = self.f.squeeze()
self.assertEqual(a.dims, (self.fi, self.gi))
def test_squeeze_3(self):
a = self.g.squeeze()
self.assertEqual(a.dims, (self.fi, self.gi))
def test_squeeze_4(self):
a = self.h.squeeze()
self.assertEqual(a.dims, (self.fi, self.gi))
def test_outpformat_at_init_int(self):
a = aobj.hfarray(1)
self.assertEqual(a.outputformat, "%d")
def test_outpformat_at_init_float(self):
a = aobj.hfarray(1.2)
self.assertEqual(a.outputformat, "%.16e")
def test_outpformat_at_init_datetime(self):
a = aobj.hfarray("2012-02-02 08:30", dtype="datetime64[us]")
self.assertEqual(a.outputformat, "%s")
def test_outpformat_at_init_other(self):
a = aobj.hfarray(bool)
self.assertEqual(a.outputformat, "%s")
class Test_hfarray_getitem(_Test_hfarray):
def test_getitem_1(self):
q = self.a[...]
self.assertEqual(q.__array_interface__, self.a.__array_interface__)
def test_get_bool_1(self):
a = aobj.DimSweep("a", [1, 2, 3])
A = aobj.hfarray([10, 20, 30], dims=(a, ))
ar = aobj.DimSweep("a", [2, 3])
Ar = aobj.hfarray([20, 30], dims=(ar, ))
bools = A > 10
self.assertAllclose(A[bools], Ar)
def test_get_bool_2(self):
a = aobj.DimSweep("a", [1, 2])
b = aobj.DimSweep("b", [1, 2, 3])
c = aobj.DimSweep("c", [1, 2, 3, 4])
A = aobj.hfarray([1, 2], dims=(a, ))
B = aobj.hfarray([10, 20, 30], dims=(b, ))
C = aobj.hfarray([100, 200, 300, 400], dims=(c, ))
ABC = A + B + C
self.assertAllclose(ABC[A == 1].squeeze(), 1 + B + C)
self.assertAllclose(ABC[B == 20].squeeze(), A + 20 + C)
self.assertAllclose(ABC[C == 300].squeeze(), A + B + 300)
self.assertRaises(ValueError, C.__getitem__, A == 1)
class Test_hfarray_take(_Test_hfarray):
def test_take(self):
a = aobj.DimSweep("a", [1, 2])
b = aobj.DimSweep("b", [1, 2, 3])
c = aobj.DimSweep("c", [1, 2, 3, 4])
A = aobj.hfarray([1, 2], dims=(a, ))
B = aobj.hfarray([10, 20, 30], dims=(b, ))
C = aobj.hfarray([100, 200, 300, 400], dims=(c, ))
ABC = A + B + C
anon = aobj.DimAnonymous("anon1", [0, 1, 2])
self.assertAllclose(ABC.take([0, 1, 2]),
aobj.hfarray([111, 211, 311],
dims=(anon, )))
class Test_hfarray_matrix(_Test_hfarray):
def test_t_1(self):
a = aobj.DimSweep("freq", [1, 2, 3])
i = aobj.DimMatrix_i("i", 2)
j = aobj.DimMatrix_j("j", 2)
A = aobj.hfarray(np.zeros((3, 2, 2), dtype=np.complex128), dims=(a, i, j))
A[:, 0, 0] = [11, 110, 1100]
A[:, 0, 1] = [12, 120, 1200]
A[:, 1, 0] = [21, 210, 2100]
A[:, 1, 1] = [22, 220, 2200]
At = A.t
self.assertAllclose(A[:, 0, 0], At[:, 0, 0])
self.assertAllclose(A[:, 0, 1], At[:, 1, 0])
self.assertAllclose(A[:, 1, 0], At[:, 0, 1])
self.assertAllclose(A[:, 1, 1], At[:, 1, 1])
def test_t_2(self):
a = aobj.DimSweep("freq", [1, 2, 3])
i = aobj.DimMatrix_i("i", 2)
j = aobj.DimMatrix_j("j", 2)
A = aobj.hfarray(np.zeros((3, 2,), dtype=np.complex128), dims=(a, i,))
A[:, 0] = [11, 110, 1100]
A[:, 1] = [22, 220, 2200]
At = A.t
self.assertAllclose(A, At)
def test_t_3(self):
a = aobj.DimSweep("freq", [1, 2, 3])
i = aobj.DimMatrix_i("i", 2)
j = aobj.DimMatrix_j("j", 2)
A = aobj.hfarray(np.zeros((3, 2,), dtype=np.complex128), dims=(a, j,))
A[:, 0] = [11, 110, 1100]
A[:, 1] = [22, 220, 2200]
At = A.t
self.assertAllclose(A, At)
class Test_rss_method(_Test_hfarray):
def test_1(self):
v, = aobj.make_same_dims_list([random_value_array(4, 5)])
a = np.array(v)
facit = np.sqrt((abs(a) ** 2).sum())
res = v.rss()
self.assertAllclose(res, facit)
def test_2(self):
v, = aobj.make_same_dims_list([random_value_array(4, 5)])
a = np.array(v)
facit = np.sqrt((abs(a) ** 2).sum(0))
res = v.rss(0)
self.assertAllclose(res, facit)
def test_3(self):
v, = aobj.make_same_dims_list([random_value_array(4, 5)])
a = np.array(v)
facit = np.sqrt((abs(a) ** 2).sum(1))
res = v.rss(1)
self.assertAllclose(res, facit)
class Test_cumsum(_Test_hfarray):
methodname = "cumsum"
kw = {}
def test_1(self):
v = random_value_array(4, 5, minsize=2)
a = np.array(v)
args = self.kw.copy()
for i in range(v.ndim):
r1 = getattr(v, self.methodname)(axis=i, **args)
r2 = getattr(a, self.methodname)(axis=i, **args)
r1.verify_dimension()
self.assertTrue(np.allclose(r1, r2))
self.assertIsInstance(r1, v.__class__)
def test_index_error(self):
v = random_value_array(4, 5, minsize=2)
method = getattr(v, self.methodname)
self.assertRaises(IndexError, method, "NONEXISTING")
class Test_mean(Test_cumsum):
methodname = "mean"
def test_axis_none(self):
v = random_value_array(4, 5)
a = np.array(v)
args = self.kw.copy()
args.update(axis=None)
r1 = getattr(v, self.methodname)(**args)
r2 = getattr(a, self.methodname)(**args)
r1.verify_dimension()
self.assertTrue(np.allclose(r1, r2))
self.assertIsInstance(r1, v.__class__)
def test_axis_dimsweep_class(self):
v = self.b
a = np.array(v)
args = self.kw.copy()
r1 = getattr(v, self.methodname)(axis=aobj.DimSweep, **args)
r2 = getattr(a, self.methodname)(axis=0, **args)
r1.verify_dimension()
self.assertTrue(np.allclose(r1, r2))
self.assertIsInstance(r1, v.__class__)
def test_defaults(self):
v = random_value_array(4, 5)
a = np.array(v)
r1 = getattr(v, self.methodname)()
r2 = getattr(a, self.methodname)()
r1.verify_dimension()
self.assertTrue(np.allclose(r1, r2))
self.assertIsInstance(r1, v.__class__)
def test_axis_specified_1(self):
v = self.b
a = np.array(v)
args = self.kw.copy()
r1 = getattr(v, self.methodname)(axis=self.bdims[0], **args)
r2 = getattr(a, self.methodname)(axis=0, **args)
r1.verify_dimension()
self.assertTrue(np.allclose(r1, r2))
self.assertIsInstance(r1, v.__class__)
def test_axis_specified_2(self):
v = self.b
a = np.array(v)
args = self.kw.copy()
r1 = getattr(v, self.methodname)(axis=self.bdims[:1], **args)
r2 = getattr(a, self.methodname)(axis=0, **args)
r1.verify_dimension()
self.assertTrue(np.allclose(r1, r2))
self.assertIsInstance(r1, v.__class__)
def test_axis_specified_not_in_dims(self):
self.assertRaises(IndexError, getattr(self.a, self.methodname),
aobj.DimRep("rep", 4))
def test_axis_specified_not_in_dims_2(self):
res = self.a.mean(axis=aobj.DimRep("rep", 4), dimerror=False)
self.assertAllclose(res, self.a)
def test_index_error_dont_raise(self):
v = random_value_array(4, 5, minsize=2)
method = getattr(v, self.methodname)
q = method("NONEXISTING", dimerror=False)
self.assertAllclose(q, v)
def test_keepdims(self):
v = random_value_array(4, 5, minsize=2)
a = np.array(v)
args = self.kw.copy()
args["keepdims"] = True
for i in range(v.ndim):
r1 = getattr(v, self.methodname)(axis=i, **args)
r2 = getattr(a, self.methodname)(axis=i, **args)
r1.verify_dimension()
self.assertTrue(np.allclose(r1, r2))
self.assertIsInstance(r1, v.__class__)
class Test_std(Test_mean):
methodname = "std"
class Test_var(Test_mean):
methodname = "var"
class Test_max(Test_mean):
methodname = "max"
class Test_min(Test_mean):
methodname = "min"
class Test_sum(Test_mean):
methodname = "sum"
class Test_cumsum2(_Test_hfarray):
methodname = "cumsum"
def test_error_1(self):
v = random_value_array(4, 5)
self.assertRaises(aobj.HFArrayError, getattr(v, self.methodname),
None)
def test_error_2(self):
v = random_value_array(4, 5)
self.assertRaises(IndexError, getattr(v, self.methodname), (0, 1))
class Test_cumprod(Test_cumsum):
methodname = "cumprod"
class Test_cumprod2(Test_cumsum2):
methodname = "cumprod"
class Test_make_matrix(TestCase):
def test_1(self):
a = random_value_array(3, 5)
m = aobj.make_matrix(np.array(a), a.dims[:-2])
self.assertAllclose(m, a)
self.assertEqual(m.dims[-2:], (ds.DimMatrix_i("i", a.shape[-2]),
ds.DimMatrix_j("j", a.shape[-1])))
def test_2(self):
a = random_value_array(3, 5)
self.assertRaises(aobj.HFArrayShapeDimsMismatchError,
aobj.make_matrix, np.array(a), a.dims[:2])
def test_3(self):
a = random_value_array(3, 5)
self.assertRaises(aobj.HFArrayShapeDimsMismatchError,
aobj.make_matrix, np.array(a), a.dims[:3])
class Test_make_vector(TestCase):
def test_1(self):
a | |
<filename>python-lophi/lophi/sensors/memory/physical.py
"""
Class for interacting with our physical memory sensor (FPGA)
(c) 2015 Massachusetts Institute of Technology
"""
# Native
import socket
import time
import logging
logger = logging.getLogger(__name__)
import multiprocessing
# LO-PHI
import lophi.globals as G
from lophi.sensors.memory import MemorySensor
from lophi.data import MemoryRapidPacket
from lophi.network import PacketReaderUDP
network_lock = multiprocessing.Lock()
MAX_THREAD_READ = 7680*10 # Unimplemented... but should it be?
READ_CHUNK = 7680
class MemorySensorPhysical(MemorySensor):
"""
This is our interface to both our NetFPGA and ML507 boards using Josh's
code.
"""
def __init__(self, sensor_ip=G.SENSOR_MEMORY.DEFAULT_IP,
sensor_port=G.SENSOR_MEMORY.DEFAULT_PORT,
cache_timeout=0,
name=None,
use_threading=False,
timeout=1,
retries=5):
"""
Initialize our memory sensor. Just saving values at this point.
@param cache_timeout: How long to keep data in the cache (seconds)
@param name: Human name of the sensor
@param use_threading: This will spawn a new process to read replys
from the sensor. Enables much faster reads, but will eventually
blow the UDP stack in the FPGA.
"""
# Sensor info
self.sensor_ip = sensor_ip
self.sensor_port = sensor_port
# Socket variables
self._sock = None
self.TIMEOUT = timeout
self.RETRIES = retries
self.TIMED_OUT = False
self.connect_count = 0
# Are we reading a separate process?
self.use_threading = use_threading
self.read_queue = None
self.packet_reader = None
# Cache
self.cache = {}
self.cache_timeouts = {}
self.CACHE_TIMEOUT = cache_timeout # seconds
# Keep track of our transaction
self.transaction_no = 1
if name is not None:
self.name = name
# Bad read regions (On XPSP3x86)
# Ref: http://wiki.osdev.org/Memory_Map_%28x86%29
self.BAD_MEM_REGIONS = [(0xA0000, 0x100000), # VGA and PCI
(0x7fe00000,0x80000000), # No Clue
(0xdbf00000,0x100000000), # High Mem PCI devices
(0x3ff00000,0x40000000) # No clue (End of memory?
]
# Initialize our superclass as well
MemorySensor.__init__(self)
def __del__(self):
"""
Clean up any connections when the object is destroyed
"""
self._disconnect()
def _connect(self):
"""
Connect our socket.
"""
# Is the socket already open?
if self._sock is None:
# Open our socket
try:
logger.debug("Connecting to memory sensor. ")
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 100000)
s.connect((self.sensor_ip, self.sensor_port))
# if self.timeout is not None:
# s.settimeout(self.timeout)
if self.use_threading:
logger.debug("Starting listener thread.")
# Start a process that will handle all of our reads
self.read_queue = multiprocessing.Queue()
self.packet_reader = PacketReaderUDP(s,self.read_queue)
self.packet_reader.start()
elif self.TIMEOUT is not None and self.TIMEOUT > 0 :
s.settimeout(self.TIMEOUT)
except:
logger.error("Could not connect to memory sensor. (%s,%d)"%(self.sensor_ip,self.sensor_port))
self.connect_count += 1
if self.connect_count > self.RETRIES:
raise socket.error("Could not connect to memory sensor.")
return False
# Save our socket
self._sock = s
return True
self.connect_count = 0
def _disconnect(self):
"""
Disconnect our socket.
"""
if self._sock is not None:
self._sock.close()
self._sock = None
if self.read_queue is not None:
self.read_queue.close()
self.read_queue = None
if self.packet_reader is not None:
self.packet_reader.terminate()
self.packet_reader = None
def _send_command(self,command,address,length, data=None):
"""
Send a command to to the card
@param command: LO-PHI command to send
@param address: Address on the SUT (Not address on the FPGA)
@param length: Length for the command (Not the length of the packet)
@param data: Any data to append to the packet
"""
logger.debug("Sending command (0x%x, 0x%x, %d)"%(command,address,length))
# Build our payload
packet = MemoryRapidPacket()
# Constants
packet.MAGIC_LOPHI = G.SENSOR_MEMORY.MAGIC_LOPHI
packet.flags = G.SENSOR_MEMORY.DEFAULT_FLAGS
# Command
packet.operation = command
# Split our address
lowaddress = (address) & 0xFFFFFFFF
highaddress = address >> 32
packet.address_high = highaddress
packet.address_low = lowaddress
# Data
packet.length = length
packet.data = data
# Transcation num
packet.transaction_no = self.transaction_no
# Increment our transcation number
self.transaction_no = (self.transaction_no+1)%0x0000ffff
# Keep trying to reconnect
while not self._connect():
time.sleep(1)
# Send payload to our sensor
sent = self._sock.send(`packet`)
if sent != len(packet):
logger.error("Only sent {0} out of {1}".format(sent, len(packet)))
dead = True
def _read_raw_packet(self,size=G.MAX_PACKET_SIZE):
"""
Read and return raw data from our socket
@return: (data, address)
"""
# If we already timed out, assume the sensor is dead
if self.TIMED_OUT:
raise socket.timeout
# Keep trying to reconnect
while not self._connect():
time.sleep(1)
# Read UDP data off the wire
if self.read_queue is not None:
recv_data, recv_addr = self.read_queue.get()
else:
recv_data, recv_addr = self._sock.recvfrom(size)
logger.debug("Read %d bytes."%len(recv_data))
return recv_data, recv_addr
def _get_read_response(self,length,read_multiple=False):
"""
"""
data = ""
if read_multiple:
transaction_no = 0
else:
transaction_no = (self.transaction_no -1)%0x0000ffff
while(len(data) < length):
# Read a LO-PHI packet
rapid_packet = self.get_rapid_packet()
if rapid_packet.MAGIC_LOPHI != G.SENSOR_MEMORY.MAGIC_LOPHI:
logger.error("Magic number mismatch. (%x)"%(rapid_packet.MAGIC_LOPHI))
return None
# Same transaction?
if(rapid_packet.transaction_no != transaction_no):
logger.error("different transaction! %x instead of %x"%(rapid_packet.transaction_no,
transaction_no))
return None
# Is this the correct response?
if(rapid_packet.operation != G.SENSOR_MEMORY.COMMAND.READ + 0x2): # RAPID reply is 0x2
logger.error("not a read?! {0}".format(rapid_packet.operation))
logger.error(rapid_packet)
continue
if(rapid_packet.data is None or len(rapid_packet.data) != rapid_packet.length):
logger.error("DATA LENGTHS DON'T MATCH! (Expected: %d, Got: %d bytes)"%(rapid_packet.length,
len(rapid_packet.data)))
# Append our data
data += rapid_packet.data
# look for next transaction
transaction_no = (transaction_no+1)%0x0000ffff
# Just in case we read more than we wanted, truncate off the end bytes
return (data[:length])
return None
def _read_from_sensor(self,address,length):
"""
This is the lowest level read command and the only read command that
will acctually perform a memory read from the sensor.
@param address: Physical memory address to read
@param length: Length of memory to read starting at @address
@TODO: Remove our horrible hack once the hardware is up-to-date
"""
with network_lock:
# This is a HACK to work around a bug in the PCI sensor that has trouble
# when not reading on word boundaries?
adjust_addr = 0
# Is our start address word aligned?
if address%4 != 0:
adjust_addr = address%4
address -= adjust_addr
length += adjust_addr
# Only read in words.
adjust_len = 0
if length%4 != 0:
adjust_len = 4-length%4
length += adjust_len
self.transaction_no = 0
rtn_data = ""
remaining_length = length
offset = 0
if not self.use_threading:
while remaining_length > 0:
# Calculate how much to read?
req_len = min(READ_CHUNK,remaining_length)
# Send our read command
# Try to read RETRIES times
attempt = 0
while attempt < self.RETRIES:
try:
# Send read command
self._send_command(G.SENSOR_MEMORY.COMMAND.READ,
address+offset,
req_len)
# get data off the wire
tmp = self._get_read_response(req_len)
# Something bad happen in the read, let's try to re-open the socket
if tmp is None:
logger.error("Didn't get a response from sensor. Trying again.")
self._disconnect()
self._connect()
continue
break
except socket.timeout:
logger.error("Memory sensor timeout (%d/%d)"%(attempt,
self.RETRIES))
pass
attempt += 1
# if we hit our retries, the card has timed out.
if attempt == self.RETRIES:
logger.error("Memory sensor timed out! (0x%16X, %d)"%
(address+offset,
req_len))
raise socket.timeout
# If nothing came back, keep trying!
if tmp is None:
# continue
rtn_data = None
break
rtn_data += tmp
# Calculate how much more we have to read
remaining_length -= req_len
offset += req_len
else:
# Try to read RETRIES times
attempt = 0
while attempt < self.RETRIES:
try:
# Send all of our read commands
while remaining_length > 0:
# Calculate how much to read?
req_len = min(READ_CHUNK,remaining_length)
# Send our read command
self._send_command(G.SENSOR_MEMORY.COMMAND.READ, address+offset, req_len)
# Calculate how much more we have to read
remaining_length -= req_len
offset += req_len
# Read all of the data back at once.
rtn_data = self._get_read_response(length, True)
# Something bad happen in the read, let's try to re-open the socket
if rtn_data is None:
logger.error("Didn't get a response from sensor. (%d/%d)"%(attempt,
self.RETRIES))
time.sleep(1)
self._disconnect()
self._connect()
remaining_length = length
else:
break
except socket.timeout:
logger.error("Memory sensor timeout (%d/%d)"%(attempt,
self.RETRIES))
pass
attempt += 1
if attempt == self.RETRIES:
logger.error("Memory sensor timed out! (0x%16X, %d)"%
(address,
length))
raise socket.timeout
# return the read data
# HACK: start from our offset and truncate extra data appended to | |
data was seriously messed up. The following
notes refer to those times (in case they come back). There is a lot
of code to catch if the problem comes back and provide appropriate
data for debugging.
There were some serious issues with unknown causes here:
1. Lightning data is crazy because lightning bins rarely add to
128. Either I misunderstand something, have missed something obvious,
or the FIS-B code isn't generating correct data, or data consistent
with the standard. Trying to get to the bottom of this is why there
is so much debugging code in this routine.
2. There is an undocumented case where ``F8`` is used to denote
32 bins (as opposed to the normal 16 bin max). Apparently, if the
bin count is ``1111`` (16 bins), polarity is ``1``, and strike count is
``000``, this will count as 32 bins. It has only been seen in the wild
as ``0xf8f8f8f8``. It is not known if the general case where bin count
can be any allowed value with a polarity of one and zero strike count
occurs in the wild. As of this time, only F8 is allowed and counted as
32 bins. Other cases will cause an exception by having bin counts that
don't total to 128. Testing has not shown any consistant way to handle
cases other than ``0xF8F8F8F8``.
Args:
ba (byte array): Byte array with ``ba[0]`` pointing to the first byte of the
block reference indicator.
Returns:
str: 128 byte string with one byte for each bin. The MSB is
the polarity, and the 3-LSBs are the strike count.
Raises:
ApduLightningBinsException: If the bins don't add to 128, or there are
128 bins, but there is space left in the frame.
"""
ros = 3
binTotal = 0
binstr = ''
# Remember the length of the array for flagging errors if we have not
# reached 128 bins by the time we have reached the end of the array.
baLen = len(ba)
errStr = '\nbytes to decode: {}\n{}\n'.format(baLen - 3, ba[3:].hex())
errStr += 'idx total-bins byte bins pol strikes spcl\n' +\
'--- ---------- ---- ---------- --- ---------- ----\n'
count = 1
# Uses a single byte for each run.
while (True):
specialFlag = ' '
# If here, the bins didn't total to 128 and we are out of array.
if ros == baLen:
errStr = '\n**** less than 128 bins\n' + errStr
raise ex.ApduLightningBinsException(errStr)
val = ba[ros]
binValue = chr(val & 0x0F)
strikes = val & 0x07
polarity = (val & 0x08) >> 3
bins = (val & 0xF0) >> 4
binsToAdd = 0
# It is really unknown what it means if the strike count
# is zero and the polarity is 1. The case of 0xF8 is well
# known... F8F8F8F8 is often sent to represent 128 bins.
# But there are many cases where the bin counts don't equal
# 128. For now, we handle the F8 case, and treat the other
# cases as non-special.
if (strikes == 0) and (polarity == 1):
specialFlag = '*'
# Handle non standard case where F8 means 32 bins
if val == 0xf8:
binsToAdd += bins + 17
else:
binsToAdd += bins + 1
else:
binsToAdd += bins + 1
binTotal += binsToAdd
# Handle zero strikes with negative polarity case
if binValue == '8':
binValue = '0'
binstr += binValue * binsToAdd
errStr += '{:03} {:03} {:02x} {:02} -> {:02} {:1} {:1} {} {}\n'.\
format(count, binTotal, ba[ros], bins, binsToAdd, polarity,\
strikes, strikeDict[strikes], specialFlag)
count += 1
ros += 1
if (binTotal == 128):
if (count - 1) != baLen -3:
errStr = '\n**** 128 bins but not all of the array used\n' + errStr
raise ex.ApduLightningBinsException(errStr)
return binstr
if (binTotal > 128):
errStr = '\n**** more than 128 bins\n' + errStr
raise ex.ApduLightningBinsException(errStr)
def icingRL(ba):
"""De-run-length icing run lengths.
To convert 3-bit altitude values to actual altitude, see
``turbRL()``-- they use identical altitudes.
Icing actually encodes 3 values: supercooled large
droplets (SLD), severity, and probability.
Results are returned as a 3-byte string, in order of
SLD probability, icing severity, and Icing probability
(empty values are reserved or not used).
+---+----------+----------+------------+
+ + SLD Prob + Severity | Icing Prob |
+===+==========+==========+============+
+ 0 | <= 5% | None | <= 5% +
+---+----------+----------+------------+
+ 1 | <= 50% | Trace | <= 20% +
+---+----------+----------+------------+
+ 2 | > 50% | Light | <= 30% +
+---+----------+----------+------------+
+ 3 | No data | Moderate | <= 40% +
+---+----------+----------+------------+
+ 4 | | Severe | <= 80% +
+---+----------+----------+------------+
+ 5 | | Heavy | <= 60% +
+---+----------+----------+------------+
+ 6 | | | > 80% +
+---+----------+----------+------------+
+ 7 | | No data | No data +
+---+----------+----------+------------+
I find it interesting that 'heavy' icing has a higher value than 'severe'
in DO-358A/B ('severe' is 4 and 'heavy' is 5).
In the
`5/7/2003 Federal Register (page 24542)
<https://www.federalregister.gov/documents/2003/05/07/03-11237/icing-terminology/>`_,
the FAA rates severe icing as more
intense than heavy. It appears that the icing severity does not appear in
numeric order and *severe* is worse than *heavy* even though *severe* is
4 and *heavy* is 5.
``ba`` is a byte array with ``ba[0]`` at the top of the block reference
indicator.
Args:
ba (byte array): Byte array with ``ba[0]`` pointing to the first byte of the
block reference indicator.
Returns:
str: 128 byte string with one byte for each bin. The bits are:
``ddsssppp`` where ``dd`` is the SLD (0-3), ``sss`` is the severity (0-7), and
``ppp`` (0-7) is the probability.
Raises:
ApduTooManyBinsException: If too many bins found.
"""
ros = 3
binCount = 0
binTotal = 0
bins = ''
# Always uses two bytes. The first is the run length. 2nd byte is the
# data for display.
while (True):
binCount = ba[ros] + 1
binValue = chr(ba[ros + 1] & 0xFF)
bins += binValue * binCount
binTotal += binCount
ros += 2
if (binTotal == 128):
return bins
if (binTotal > 128):
raise ex.ApduTooManyBinsException('Found too many bins (>128) in icingRL')
def turbRL(ba):
"""Return decoded run length for turbulence and cloud top blocks.
To decode 3-bit altitude (all values in MSL): ::
Low Level = (byte + 1) * 2000
High Level = 18000 + (byte * 2000) [only byte values 0-3 allowed]
The data is encoded for a range of Eddy Dissipation Rates (EDRs). A value
of 0 is less than 7 EDRs, and a value of 14 is >= 98 EDRs. 15 means no
data. For values 1 to 13, the low value is <= to byte * 7, and the
high value is < (byte + 1) * 7.
``ba`` is a byte array with ``ba[0]`` at the top of the block reference
indicator.
Args:
ba (byte array): Byte array with ``ba[0]`` pointing to the first byte of the
block reference indicator.
Returns:
str: 128 character string with one character for each bin.
Raises:
ApduTooManyBinsException: If too many bins found.
"""
ros = 3
binCount = 0
binTotal = 0
bins = ''
# Uses 1 or two byte for the run. If the 4 MSB bits is 0xE0, the
# next byte contains the number of bins. Else, the 4 MSB bits are
# the number of bins - 1.
while (True):
byte1 = (ba[ros] & 0xF0) >> 4
binValue = chr(ba[ros] & 0x0F)
if byte1 == 0x0E:
binCount = ba[ros + 1] + 1
bins += binValue * binCount
binTotal += binCount
ros += 2
else:
# single byte
binCount = byte1 + 1
bins += binValue * binCount
binTotal += binCount
ros += 1
if (binTotal == 128):
return | |
<filename>mlflow/gluon/__init__.py<gh_stars>1-10
from packaging.version import Version
import os
import numpy as np
import pandas as pd
import yaml
import mlflow
from mlflow import pyfunc
from mlflow.exceptions import MlflowException
from mlflow.models import Model
from mlflow.models.model import MLMODEL_FILE_NAME
from mlflow.models.signature import ModelSignature
from mlflow.models.utils import ModelInputExample, _save_example
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.environment import (
_mlflow_conda_env,
_validate_env_arguments,
_process_pip_requirements,
_process_conda_env,
_CONDA_ENV_FILE_NAME,
_REQUIREMENTS_FILE_NAME,
_CONSTRAINTS_FILE_NAME,
)
from mlflow.utils.requirements_utils import _get_pinned_requirement
from mlflow.utils.docstring_utils import format_docstring, LOG_MODEL_PARAM_DOCS
from mlflow.utils.file_utils import write_to
from mlflow.utils.autologging_utils import (
autologging_integration,
safe_patch,
batch_metrics_logger,
)
FLAVOR_NAME = "gluon"
_MODEL_SAVE_PATH = "net"
def load_model(model_uri, ctx, dst_path=None):
"""
Load a Gluon model from a local file or a run.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param ctx: Either CPU or GPU.
:param dst_path: The local filesystem path to which to download the model artifact.
This directory must already exist. If unspecified, a local output
path will be created.
:return: A Gluon model instance.
.. code-block:: python
:caption: Example
# Load persisted model as a Gluon model, make inferences against an NDArray
model = mlflow.gluon.load_model("runs:/" + gluon_random_data_run.info.run_id + "/model")
model(nd.array(np.random.rand(1000, 1, 32)))
"""
import mxnet as mx
from mxnet import gluon
from mxnet import sym
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path)
model_arch_path = os.path.join(local_model_path, "data", _MODEL_SAVE_PATH) + "-symbol.json"
model_params_path = os.path.join(local_model_path, "data", _MODEL_SAVE_PATH) + "-0000.params"
if Version(mx.__version__) >= Version("2.0.0"):
return gluon.SymbolBlock.imports(
model_arch_path, input_names=["data"], param_file=model_params_path, ctx=ctx
)
else:
symbol = sym.load(model_arch_path)
inputs = sym.var("data", dtype="float32")
net = gluon.SymbolBlock(symbol, inputs)
net.collect_params().load(model_params_path, ctx)
return net
class _GluonModelWrapper:
def __init__(self, gluon_model):
self.gluon_model = gluon_model
def predict(self, data):
"""
:param data: Either a pandas DataFrame or a numpy array containing input array values.
If the input is a DataFrame, it will be converted to an array first by a
`ndarray = df.values`.
:return: Model predictions. If the input is a pandas.DataFrame, the predictions are returned
in a pandas.DataFrame. If the input is a numpy array, the predictions are returned
as either a numpy.ndarray or a plain list for hybrid models.
"""
import mxnet as mx
if isinstance(data, pd.DataFrame):
ndarray = mx.nd.array(data.values)
preds = self.gluon_model(ndarray)
if isinstance(preds, mx.ndarray.ndarray.NDArray):
preds = preds.asnumpy()
return pd.DataFrame(preds)
elif isinstance(data, np.ndarray):
ndarray = mx.nd.array(data)
preds = self.gluon_model(ndarray)
if isinstance(preds, mx.ndarray.ndarray.NDArray):
preds = preds.asnumpy()
return preds
else:
raise TypeError("Input data should be pandas.DataFrame or numpy.ndarray")
def _load_pyfunc(path):
"""
Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``.
:param path: Local filesystem path to the MLflow Model with the ``gluon`` flavor.
"""
import mxnet as mx
m = load_model(path, mx.current_context())
return _GluonModelWrapper(m)
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name="mxnet"))
def save_model(
gluon_model,
path,
mlflow_model=None,
conda_env=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
pip_requirements=None,
extra_pip_requirements=None,
):
"""
Save a Gluon model to a path on the local file system.
:param gluon_model: Gluon model to be saved. Must be already hybridized.
:param path: Local path where the model is to be saved.
:param mlflow_model: MLflow model config this flavor is being added to.
:param conda_env: {{ conda_env }}
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example can be a Pandas DataFrame where the given
example will be serialized to json using the Pandas split-oriented
format, or a numpy array where the example will be serialized to json
by converting it to a list. Bytes are base64-encoded.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
.. code-block:: python
:caption: Example
from mxnet.gluon import Trainer
from mxnet.gluon.contrib import estimator
from mxnet.gluon.loss import SoftmaxCrossEntropyLoss
from mxnet.gluon.nn import HybridSequential
from mxnet.metric import Accuracy
import mlflow
# Build, compile, and train your model
gluon_model_path = ...
net = HybridSequential()
with net.name_scope():
...
net.hybridize()
net.collect_params().initialize()
softmax_loss = SoftmaxCrossEntropyLoss()
trainer = Trainer(net.collect_params())
est = estimator.Estimator(net=net, loss=softmax_loss, metrics=Accuracy(), trainer=trainer)
est.fit(train_data=train_data, epochs=100, val_data=validation_data)
# Save the model as an MLflow Model
mlflow.gluon.save_model(net, gluon_model_path)
"""
import mxnet as mx
_validate_env_arguments(conda_env, pip_requirements, extra_pip_requirements)
path = os.path.abspath(path)
if os.path.exists(path):
raise MlflowException("Path '{}' already exists".format(path))
data_subpath = "data"
data_path = os.path.join(path, data_subpath)
os.makedirs(data_path)
if mlflow_model is None:
mlflow_model = Model()
if signature is not None:
mlflow_model.signature = signature
if input_example is not None:
_save_example(mlflow_model, input_example, path)
# The epoch argument of the export method does not play any role in selecting
# a specific epoch's parameters, and is there only for display purposes.
gluon_model.export(os.path.join(data_path, _MODEL_SAVE_PATH))
pyfunc.add_to_model(mlflow_model, loader_module="mlflow.gluon", env=_CONDA_ENV_FILE_NAME)
mlflow_model.add_flavor(FLAVOR_NAME, mxnet_version=mx.__version__)
mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))
if conda_env is None:
if pip_requirements is None:
default_reqs = get_default_pip_requirements()
inferred_reqs = mlflow.models.infer_pip_requirements(
path,
FLAVOR_NAME,
fallback=default_reqs,
)
default_reqs = sorted(set(inferred_reqs).union(default_reqs))
else:
default_reqs = None
conda_env, pip_requirements, pip_constraints = _process_pip_requirements(
default_reqs,
pip_requirements,
extra_pip_requirements,
)
else:
conda_env, pip_requirements, pip_constraints = _process_conda_env(conda_env)
with open(os.path.join(path, _CONDA_ENV_FILE_NAME), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
# Save `constraints.txt` if necessary
if pip_constraints:
write_to(os.path.join(path, _CONSTRAINTS_FILE_NAME), "\n".join(pip_constraints))
# Save `requirements.txt`
write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), "\n".join(pip_requirements))
def get_default_pip_requirements():
"""
:return: A list of default pip requirements for MLflow Models produced by this flavor.
Calls to :func:`save_model()` and :func:`log_model()` produce a pip environment
that, at minimum, contains these requirements.
"""
return [_get_pinned_requirement("mxnet")]
def get_default_conda_env():
"""
:return: The default Conda environment for MLflow Models produced by calls to
:func:`save_model()` and :func:`log_model()`.
"""
return _mlflow_conda_env(additional_pip_deps=get_default_pip_requirements())
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name="mxnet"))
def log_model(
gluon_model,
artifact_path,
conda_env=None,
registered_model_name=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
pip_requirements=None,
extra_pip_requirements=None,
):
"""
Log a Gluon model as an MLflow artifact for the current run.
:param gluon_model: Gluon model to be saved. Must be already hybridized.
:param artifact_path: Run-relative artifact path.
:param conda_env: {{ conda_env }}
:param registered_model_name: If given, create a model version under
``registered_model_name``, also creating a registered model if one
with the given name does not exist.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example can be a Pandas DataFrame where the given
example will be serialized to json using the Pandas split-oriented
format, or a numpy array where the example will be serialized to json
by converting it to a list. Bytes are base64-encoded.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
:return: A :py:class:`ModelInfo <mlflow.models.model.ModelInfo>` instance that contains the
metadata of the logged model.
.. code-block:: python
:caption: Example
from mxnet.gluon import Trainer
from mxnet.gluon.contrib import estimator
from mxnet.gluon.loss import SoftmaxCrossEntropyLoss
from mxnet.gluon.nn import HybridSequential
from mxnet.metric import Accuracy
import mlflow
# Build, compile, and train your model
net = HybridSequential()
with net.name_scope():
...
net.hybridize()
net.collect_params().initialize()
softmax_loss = SoftmaxCrossEntropyLoss()
trainer = Trainer(net.collect_params())
est = estimator.Estimator(net=net, loss=softmax_loss, metrics=Accuracy(), trainer=trainer)
# Log metrics and log the model
with mlflow.start_run():
est.fit(train_data=train_data, epochs=100, val_data=validation_data)
mlflow.gluon.log_model(net, "model")
"""
return Model.log(
artifact_path=artifact_path,
flavor=mlflow.gluon,
gluon_model=gluon_model,
conda_env=conda_env,
registered_model_name=registered_model_name,
signature=signature,
input_example=input_example,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
@autologging_integration(FLAVOR_NAME)
def autolog(
log_models=True,
disable=False,
exclusive=False,
disable_for_unsupported_versions=False,
silent=False,
): # pylint: disable=unused-argument
"""
Enables (or disables) and configures autologging from Gluon to MLflow.
Logs loss and any other metrics specified in the fit
function, and optimizer data as parameters. Model checkpoints
are logged as artifacts to a 'models' directory.
:param log_models: If ``True``, trained models are logged as MLflow model artifacts.
If ``False``, trained models are not logged.
:param disable: If ``True``, disables the MXNet Gluon autologging | |
import numpy as np
import pandas as pd
import seaborn as sns
import os
from pathlib import Path
import seaborn as sns
import matplotlib.pyplot as plt
from multiprocessing import Pool
import torch
from src.data.data_utils import load_train_test_ims, load_train_test_femto
from src.models.utils import (
test_metrics_to_results_df,
)
from src.models.loss import RMSELoss, RMSLELoss
import h5py
from pathlib import Path
import os
from shutil import copyfile
import sys
from scipy.stats import pointbiserialr
import argparse
"""
Gather all the result csv's, combine them together, and then append the test scores.
Filter out poorly performing models and save the final result csv's in the models/final folder.
Save the top performing models also in the models/final folder.
"""
#######################################################
# Set Arguments
#######################################################
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"-s",
"--data_set",
dest="data_set",
type=str,
default="ims",
help="The data set use (either 'ims' or 'femto')",
)
# parser.add_argument(
# "-d",
# "--path_data",
# dest="path_data",
# type=str,
# help="Path to processed data"
# )
# parser.add_argument(
# "-p",
# "--proj_dir",
# dest="proj_dir",
# type=str,
# help="Location of project folder",
# )
# parser.add_argument(
# "--random_search_iter",
# dest="random_search_iter",
# type=int,
# default=3000,
# help="Number of random searches to iterate over",
# )
# parser.add_argument(
# "--epochs",
# dest="epochs",
# type=int,
# default=2000,
# help="Number of epochs to train each model",
# )
# parser.add_argument(
# "--patience",
# dest="patience",
# type=int,
# default=50,
# help="Number of epochs without change before quiting training",
# )
args = parser.parse_args()
# General Parameters
SAVE_ENTIRE_CSV = True # if you want to save the entire CSV, before filtering
ADD_TEST_RESULTS = True # if you want to append the test results
TOP_MODEL_COUNT = 2 # the number of models to save in models/final/top_models directory
# e.g. save top 10 models
# Filter parameters
R2_BOUND = 0.2 # greater than
RMSE_BOUND = 0.35 # less than
SORT_BY = "r2_test" # metric used to evaluate results
# options include: 'loss_rmse_test', 'r2_val'
# 'r2_test_avg', etc.
DATASET_TYPE = args.data_set # 'ims' or 'femto'
#####
# use multi-processing to load all the CSVs into one file
# https://stackoverflow.com/a/36590187
# wrap your csv importer in a function that can be mapped
def read_csv(filename):
"converts a filename to a pandas dataframe"
return pd.read_csv(filename)
def set_directories():
"""Sets the directory paths used for data, checkpoints, etc."""
# check if "scratch" path exists in the home directory
# if it does, assume we are on HPC
scratch_path = Path.home() / "scratch"
# set the default directories
if scratch_path.exists():
# set important folder locations
print("Assume on HPC")
root_dir = Path.cwd()
print("#### Root dir:", root_dir)
if DATASET_TYPE == "ims":
folder_data = Path.cwd() / "data/processed/IMS/"
else:
folder_data = Path.cwd() / "data/processed/FEMTO/"
folder_results = Path(
scratch_path / f"weibull_results/results_csv_{DATASET_TYPE}"
)
folder_checkpoints = Path(
scratch_path / f"weibull_results/checkpoints_{DATASET_TYPE}"
)
folder_learning_curves = Path(
scratch_path / f"weibull_results/learning_curves_{DATASET_TYPE}"
)
else:
# set important folder locations
print("Assume on local compute")
root_dir = Path.cwd()
print("#### Root dir:", root_dir)
# data folder
if DATASET_TYPE == "ims":
folder_data = root_dir / "data/processed/IMS/"
print("load IMS data", folder_data)
else:
folder_data = root_dir / "data/processed/FEMTO/"
print("load FEMTO data", folder_data)
folder_results = root_dir / f"models/interim/results_csv_{DATASET_TYPE}"
folder_checkpoints = root_dir / f"models/interim/checkpoints_{DATASET_TYPE}"
folder_learning_curves = (
root_dir / f"models/interim/learning_curves_{DATASET_TYPE}"
)
return (
folder_results,
folder_checkpoints,
folder_learning_curves,
folder_data,
root_dir,
)
def main(folder_results):
# get a list of file names
files = os.listdir(folder_results)
file_list = [
folder_results / filename for filename in files if filename.endswith(".csv")
]
# set up your pool
with Pool(processes=7) as pool: # or whatever your hardware can support
# have your pool map the file names to dataframes
df_list = pool.map(read_csv, file_list)
# reduce the list of dataframes to a single dataframe
combined_df = pd.concat(df_list, ignore_index=True)
return combined_df
if __name__ == "__main__":
(
folder_results,
folder_checkpoints,
folder_learning_curves,
folder_data,
root_dir,
) = set_directories()
df = main(folder_results)
# drop first column
try:
df = df.drop(columns="Unnamed: 0")
except:
pass
# add a unique identifier for each model architecture
df["date_time_seed"] = (
df["date_time"].astype(str) + "_" + df["rnd_seed_input"].astype(str)
)
# get name that model checkpoint was saved under
df["model_checkpoint_name"] = (
df["date_time"].astype(str)
+ "_"
+ df["loss_func"]
+ "_"
+ df["rnd_seed_input"].astype(str)
+ ".pt"
)
if SAVE_ENTIRE_CSV:
df.to_csv(
root_dir / "models/final" / f"{DATASET_TYPE}_results_summary_all.csv.gz",
index=False, compression="gzip",
)
#### append test results to df ####
if ADD_TEST_RESULTS:
if DATASET_TYPE == "ims":
(
x_train,
y_train,
x_val,
y_val,
x_test,
y_test,
x_train_2,
y_train_2,
x_train_3,
y_train_3,
) = load_train_test_ims(folder_data)
else:
(
x_train,
y_train,
x_val,
y_val,
x_test,
y_test,
x_train1_1,
y_train1_1,
x_train2_1,
y_train2_1,
x_train3_1,
y_train3_1,
x_val1_2,
y_val1_2,
x_val2_2,
y_val2_2,
x_val3_2,
y_val3_2,
x_test1_3,
y_test1_3,
x_test2_3,
y_test2_3,
x_test3_3,
y_test3_3,
) = load_train_test_femto(folder_data)
# load beta, eta for Weibull CDF
with h5py.File(folder_data / "eta_beta_r.hdf5", "r") as f:
eta_beta_r = f["eta_beta_r"][:]
ETA = eta_beta_r[0]
BETA = eta_beta_r[1]
y_train_days = torch.reshape(y_train[:, 0], (-1, 1))
y_val_days = torch.reshape(y_val[:, 0], (-1, 1))
y_test_days = torch.reshape(y_test[:, 0], (-1, 1))
y_train = torch.reshape(y_train[:, 1], (-1, 1))
y_val = torch.reshape(y_val[:, 1], (-1, 1))
y_test = torch.reshape(y_test[:, 1], (-1, 1))
if DATASET_TYPE == "ims":
y_train_days_2 = torch.reshape(y_train_2[:, 0], (-1, 1))
y_train_days_3 = torch.reshape(y_train_3[:, 0], (-1, 1))
y_train_2 = torch.reshape(y_train_2[:, 1], (-1, 1))
y_train_3 = torch.reshape(y_train_3[:, 1], (-1, 1))
# append test results onto results dataframe
df = test_metrics_to_results_df(folder_checkpoints, df, x_test, y_test)
standard_losses = ["mse", "rmse", "rmsle"]
# apply 0 or 1 for weibull, and for each unique loss func
for index, value in df["loss_func"].items():
if value in standard_losses:
df.loc[index, "weibull_loss"] = 0
else:
df.loc[index, "weibull_loss"] = 1
# convert to 'weibull_loss' column to integer
df["weibull_loss"] = df["weibull_loss"].astype(int)
# 0 of no dropping is used, otherwise 1
for index, value in df["prob_drop"].items():
if value > 0:
df.loc[index, "prob_drop_true"] = 1
else:
df.loc[index, "prob_drop_true"] = 0
df["prob_drop_true"] = df["prob_drop_true"].astype(int)
loss_func_list = df["loss_func"].unique()
for index, value in df["loss_func"].items():
for loss_func in loss_func_list:
df.loc[index, value] = 1
df[loss_func_list] = df[loss_func_list].fillna(0, downcast="infer")
if SAVE_ENTIRE_CSV:
df.to_csv(
root_dir / "models/final" / f"{DATASET_TYPE}_results_summary_all.csv.gz",
index=False, compression="gzip",
)
# how many unique model architectures?
print("No. unique model architectures:", len(df["date_time_seed"].unique()))
print(
"No. unique models (includes unique loss functions):", len(df["date_time_seed"])
)
##### Filter resutls and select top models #####
loss_func_list = df["loss_func"].unique()
sort_by = SORT_BY
dfr = df[
(df["r2_test"] > R2_BOUND)
& (df["loss_rmse_test"] < RMSE_BOUND)
& (df["r2_train"] > R2_BOUND)
& (df["loss_rmse_train"] < RMSE_BOUND)
& (df["r2_val"] > R2_BOUND)
& (df["loss_rmse_val"] < RMSE_BOUND)
& (df["beta"] == 2.0)
][:]
dfr = (
dfr.groupby(["date_time_seed"])
.apply(lambda x: x.sort_values([sort_by], ascending=False))
.reset_index(drop=True)
)
dfr = (
dfr.groupby(["date_time_seed"]).head(1).sort_values(by=sort_by, ascending=False)
)
# save filtered results csv
dfr.to_csv(
root_dir / "models/final" / f"{DATASET_TYPE}_results_filtered.csv", index=False
)
# create and save early stopping summary statistics
df0 = dfr[dfr["weibull_loss"] == 0][["epoch_stopped_on"]].describe()
df0 = df0.append(
pd.DataFrame(
[dfr[dfr["weibull_loss"] == 0][["epoch_stopped_on"]].median()],
index=["median"],
)
)
df0.columns = ["trad_loss_func"]
df1 = dfr[dfr["weibull_loss"] == 1][["epoch_stopped_on"]].describe()
df1 = df1.append(
pd.DataFrame(
[dfr[dfr["weibull_loss"] == 1][["epoch_stopped_on"]].median()],
index=["median"],
)
)
df1.columns = ["weibull_loss_func"]
df_summary = df0.merge(df1, left_index=True, right_index=True)
df_summary.to_csv(
root_dir / "models/final" / f"{DATASET_TYPE}_early_stop_summary_stats.csv",
index=True,
)
# select top N models and save in models/final/top_models directory
top_models = dfr["model_checkpoint_name"][:TOP_MODEL_COUNT]
Path(root_dir / f"models/final/top_models_{DATASET_TYPE}").mkdir(
parents=True, exist_ok=True
)
top_model_folder = root_dir / f"models/final/top_models_{DATASET_TYPE}"
for model_name in top_models:
copyfile(
folder_checkpoints / f"{model_name}", top_model_folder / f"{model_name}"
)
learning_curve = model_name.split(".")[0]
copyfile(
folder_learning_curves / f"{learning_curve}.png",
top_model_folder / f"{learning_curve}.png",
)
# copy model.py in src/models/ to the models/final/top_models directory so that we can
# easily load the saved checkpoints for later
copyfile(root_dir / "src/models/model.py", top_model_folder / "model.py")
# count up how often each loss functions type appears as a top performer
def change_loss_func_name(cols):
loss_func = cols[0]
if loss_func == "mse":
return "MSE"
elif loss_func == "rmse":
return "RMSE"
elif loss_func == "rmsle":
return "RMSLE"
elif loss_func == "weibull_mse":
return "Weibull-MSE\nCombined"
elif loss_func == "weibull_rmse":
return "Weibull-RMSE\nCombined"
elif loss_func == "weibull_rmsle":
return "Weibull-RMSLE\nCombined"
elif loss_func == "weibull_only_mse":
return "Weibull Only MSE"
elif loss_func == "weibull_only_rmse":
return "Weibull Only RMSE"
else:
return "Weibull Only RMLSE"
df_count = (
dfr.groupby(["loss_func"], as_index=False)
.count()[["loss_func", "date_time"]]
.rename(columns={"date_time": "count"})
.sort_values(by="count", ascending=False)
)
df_count["loss_func2"] = df_count[["loss_func"]].apply(
change_loss_func_name, axis=1
)
df_count = df_count.drop("loss_func", axis=1)
df_count = df_count.rename(columns={"loss_func2": "loss_func"})
df_count["count"] = df_count["count"].astype(float)
df_count["percent"] = 100 * df_count["count"] / df_count["count"].sum()
# save csv so we can use it later to create charts with
df_count.to_csv(
root_dir / "models/final" / f"{DATASET_TYPE}_count_results.csv", index=False
)
# perform correlation analysis over the various loss functions
dfr = df[
(df["r2_test"] > R2_BOUND)
& (df["loss_rmse_test"] < RMSE_BOUND)
& (df["r2_train"] > R2_BOUND)
& (df["loss_rmse_train"] < RMSE_BOUND)
& (df["r2_val"] > R2_BOUND)
& (df["loss_rmse_val"] < RMSE_BOUND)
& (df["beta"] == 2.0)
][:]
def change_loss_func_name_corr(cols):
loss_func = cols[0]
if loss_func == "mse":
return "MSE"
elif loss_func == "rmse":
return "RMSE"
elif loss_func == "rmsle":
return "RMSLE"
elif loss_func == "weibull_mse":
return "Weibull-MSE\nCombined"
elif loss_func | |
<reponame>amitschang/SciScript-Python
import json
import time
import sys
from io import StringIO, BytesIO
import requests as requests
import pandas
from SciServer import Authentication, Config
class Task:
"""
The class TaskName stores the name of the task that executes the API call.
"""
name = None
task = Task();
def getSchemaName():
"""
Returns the WebServiceID that identifies the schema for a user in MyScratch database with CasJobs.
:return: WebServiceID of the user (string).
:raises: Throws an exception if the user is not logged into SciServer (use Authentication.login for that purpose). Throws an exception if the HTTP request to the CasJobs API returns an error.
:example: wsid = CasJobs.getSchemaName()
.. seealso:: CasJobs.getTables.
"""
token = Authentication.getToken()
if token is not None and token != "":
keystoneUserId = Authentication.getKeystoneUserWithToken(token).id
taskName = ""
if Config.isSciServerComputeEnvironment():
taskName = "Compute.SciScript-Python.CasJobs.getSchemaName"
else:
taskName = "SciScript-Python.CasJobs.getSchemaName"
usersUrl = Config.CasJobsRESTUri + "/users/" + keystoneUserId + "?TaskName=" + taskName
headers={'X-Auth-Token': token,'Content-Type': 'application/json'}
getResponse = requests.get(usersUrl,headers=headers)
if getResponse.status_code != 200:
raise Exception("Error when getting schema name. Http Response from CasJobs API returned status code " + str(getResponse.status_code) + ":\n" + getResponse.content.decode());
jsonResponse = json.loads(getResponse.content.decode())
return "wsid_" + str(jsonResponse["WebServicesId"])
else:
raise Exception("User token is not defined. First log into SciServer.")
def getTables(context="MyDB"):
"""
Gets the names, size and creation date of all tables in a database context that the user has access to.
:param context: database context (string)
:return: The result is a json object with format [{"Date":seconds,"Name":"TableName","Rows":int,"Size",int},..]
:raises: Throws an exception if the user is not logged into SciServer (use Authentication.login for that purpose). Throws an exception if the HTTP request to the CasJobs API returns an error.
:example: tables = CasJobs.getTables("MyDB")
.. seealso:: CasJobs.getSchemaName
"""
token = Authentication.getToken()
if token is not None and token != "":
taskName = "";
if Config.isSciServerComputeEnvironment():
taskName = "Compute.SciScript-Python.CasJobs.getTables"
else:
taskName = "SciScript-Python.CasJobs.getTables"
TablesUrl = Config.CasJobsRESTUri + "/contexts/" + context + "/Tables" + "?TaskName=" + taskName
headers={'X-Auth-Token': token,'Content-Type': 'application/json'}
getResponse = requests.get(TablesUrl,headers=headers)
if getResponse.status_code != 200:
raise Exception("Error when getting table description from database context " + str(context) + ".\nHttp Response from CasJobs API returned status code " + str(getResponse.status_code) + ":\n" + getResponse.content.decode());
jsonResponse = json.loads(getResponse.content.decode())
return jsonResponse
else:
raise Exception("User token is not defined. First log into SciServer.")
def executeQuery(sql, context="MyDB", format="pandas"):
"""
Executes a synchronous SQL query in a CasJobs database context.
:param sql: sql query (string)
:param context: database context (string)
:param format: parameter (string) that specifies the return type:\n
\t\t'pandas': pandas.DataFrame.\n
\t\t'json': a JSON string containing the query results. \n
\t\t'dict': a dictionary created from the JSON string containing the query results.\n
\t\t'csv': a csv string.\n
\t\t'readable': an object of type io.StringIO, which has the .read() method and wraps a csv string that can be passed into pandas.read_csv for example.\n
\t\t'StringIO': an object of type io.StringIO, which has the .read() method and wraps a csv string that can be passed into pandas.read_csv for example.\n
\t\t'fits': an object of type io.BytesIO, which has the .read() method and wraps the result in fits format.\n
\t\t'BytesIO': an object of type io.BytesIO, which has the .read() method and wraps the result in fits format.\n
:return: the query result table, in a format defined by the 'format' input parameter.
:raises: Throws an exception if the user is not logged into SciServer (use Authentication.login for that purpose). Throws an exception if the HTTP request to the CasJobs API returns an error. Throws an exception if parameter 'format' is not correctly specified.
:example: table = CasJobs.executeQuery(sql="select 1 as foo, 2 as bar",format="pandas", context="MyDB")
.. seealso:: CasJobs.submitJob, CasJobs.getTables, SkyServer.sqlSearch
"""
if (format == "pandas") or (format =="json") or (format =="dict"):
acceptHeader="application/json+array"
elif (format == "csv") or (format == "readable") or (format == "StringIO"):
acceptHeader = "text/plain"
elif format == "fits":
acceptHeader = "application/fits"
elif format == "BytesIO":
acceptHeader = "application/fits" # defined later using specific serialization
else:
raise Exception("Error when executing query. Illegal format parameter specification: " + str(format));
taskName = "";
if task.name is not None:
taskName = task.name;
task.name = None;
else:
if Config.isSciServerComputeEnvironment():
taskName = "Compute.SciScript-Python.CasJobs.executeQuery"
else:
taskName = "SciScript-Python.CasJobs.executeQuery"
QueryUrl = Config.CasJobsRESTUri + "/contexts/" + context + "/query" + "?TaskName=" + taskName
query = {"Query": sql, "TaskName": taskName}
data = json.dumps(query).encode()
headers = {'Content-Type': 'application/json', 'Accept': acceptHeader}
token = Authentication.getToken()
if token is not None and token != "":
headers['X-Auth-Token'] = token
postResponse = requests.post(QueryUrl,data=data,headers=headers, stream=True)
if postResponse.status_code != 200:
raise Exception("Error when executing query. Http Response from CasJobs API returned status code " + str(postResponse.status_code) + ":\n" + postResponse.content.decode());
if (format == "readable") or (format == "StringIO"):
return StringIO(postResponse.content.decode())
elif format == "pandas":
r=json.loads(postResponse.content.decode())
if len(r['Result']) > 1:
res = []
for result in r['Result']:
res.append(pandas.DataFrame(result['Data'],columns=result['Columns']))
return res
else:
return pandas.DataFrame(r['Result'][0]['Data'],columns=r['Result'][0]['Columns'])
elif format == "csv":
return postResponse.content.decode()
elif format == "dict":
return json.loads(postResponse.content.decode())
elif format == "json":
return postResponse.content.decode()
elif format == "fits":
return BytesIO(postResponse.content)
elif format == "BytesIO":
return BytesIO(postResponse.content)
else: # should not occur
raise Exception("Error when executing query. Illegal format parameter specification: " + str(format));
def submitJob(sql, context="MyDB"):
"""
Submits an asynchronous SQL query to the CasJobs queue.
:param sql: sql query (string)
:param context: database context (string)
:return: Returns the CasJobs jobID (integer).
:raises: Throws an exception if the user is not logged into SciServer (use Authentication.login for that purpose). Throws an exception if the HTTP request to the CasJobs API returns an error.
:example: jobid = CasJobs.submitJob("select 1 as foo","MyDB")
.. seealso:: CasJobs.executeQuery, CasJobs.getJobStatus, CasJobs.waitForJob, CasJobs.cancelJob.
"""
token = Authentication.getToken()
if token is not None and token != "":
taskName = "";
if Config.isSciServerComputeEnvironment():
taskName = "Compute.SciScript-Python.CasJobs.submitJob"
else:
taskName = "SciScript-Python.CasJobs.submitJob"
QueryUrl = Config.CasJobsRESTUri + "/contexts/" + context + "/jobs" + "?TaskName=" + taskName
query = {"Query": sql, "TaskName": taskName}
data = json.dumps(query).encode()
headers = {'Content-Type': 'application/json', 'Accept': "text/plain"}
headers['X-Auth-Token']= token
putResponse = requests.put(QueryUrl,data=data,headers=headers)
if putResponse.status_code != 200:
raise Exception("Error when submitting a job. Http Response from CasJobs API returned status code " + str(putResponse.status_code) + ":\n" + putResponse.content.decode());
return int(putResponse.content.decode())
else:
raise Exception("User token is not defined. First log into SciServer.")
def getJobStatus(jobId):
"""
Shows the status of a job submitted to CasJobs.
:param jobId: id of job (integer)
:return: Returns a dictionary object containing the job status and related metadata. The "Status" field can be equal to 0 (Ready), 1 (Started), 2 (Canceling), 3(Canceled), 4 (Failed) or 5 (Finished). If jobId is the empty string, then returns a list with the statuses of all previous jobs.
:raises: Throws an exception if the user is not logged into SciServer (use Authentication.login for that purpose). Throws an exception if the HTTP request to the CasJobs API returns an error.
:example: status = CasJobs.getJobStatus(CasJobs.submitJob("select 1"))
.. seealso:: CasJobs.submitJob, CasJobs.waitForJob, CasJobs.cancelJob.
"""
token = Authentication.getToken()
if token is not None and token != "":
taskName = "";
if Config.isSciServerComputeEnvironment():
taskName = "Compute.SciScript-Python.CasJobs.getJobStatus"
else:
taskName = "SciScript-Python.CasJobs.getJobStatus"
QueryUrl = Config.CasJobsRESTUri + "/jobs/" + str(jobId) + "?TaskName=" + taskName
headers={'X-Auth-Token': token,'Content-Type': 'application/json'}
postResponse =requests.get(QueryUrl,headers=headers)
if postResponse.status_code != 200:
raise Exception("Error when getting the status of job " + str(jobId) + ".\nHttp Response from CasJobs API returned status code " + str(postResponse.status_code) + ":\n" + postResponse.content.decode());
return json.loads(postResponse.content.decode())
else:
raise Exception("User token is not defined. First log into SciServer.")
def cancelJob(jobId):
"""
Cancels a job already submitted.
:param jobId: id of job (integer)
:return: Returns True if the job was canceled successfully.
:raises: Throws an exception if the user is not logged into SciServer (use Authentication.login for that purpose). Throws an exception if the HTTP request to the CasJobs API returns an error.
:example: response = CasJobs.cancelJob(CasJobs.submitJob("select 1"))
.. seealso:: CasJobs.submitJob, CasJobs.waitForJob.
"""
token = Authentication.getToken()
if token is not None and token != "":
taskName = "";
if Config.isSciServerComputeEnvironment():
taskName = "Compute.SciScript-Python.CasJobs.cancelJob"
else:
taskName = "SciScript-Python.CasJobs.cancelJob"
QueryUrl = Config.CasJobsRESTUri + "/jobs/" + str(jobId) + "?TaskName=" + taskName
headers={'X-Auth-Token': token,'Content-Type': 'application/json'}
response =requests.delete(QueryUrl,headers=headers)
if response.status_code != 200:
raise Exception("Error when canceling job " + str(jobId) + ".\nHttp Response from CasJobs | |
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines an interface for Ito processes.
Ito processes underlie most quantitative finance models. This module defines
a framework for describing Ito processes. An Ito process is usually defined
via an Ito SDE:
```
dX = a(t, X_t) dt + b(t, X_t) dW_t
```
where `a(t, x)` is a function taking values in `R^n`, `b(t, X_t)` is a function
taking values in `n x n` matrices. For a complete mathematical definition,
including the regularity conditions that must be imposed on the coefficients
`a(t, X)` and `b(t, X)`, see Ref [1].
#### References:
[1]: <NAME>. Stochastic Differential Equations: An Introduction with
Applications. Springer. 2010.
"""
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class ItoProcess(object):
"""Interface for specifying Ito processes.
Interface for defining stochastic process defined by the Ito SDE:
```None
dX_i = a_i(t, X) dt + Sum(S_{ij}(t, X) dW_j for 1 <= j <= n), 1 <= i <= n
```
The vector coefficient `a_i` is referred to as the drift of the process and
the matrix `S_{ij}` as the volatility of the process. For the process to be
well defined, these coefficients need to satisfy certain technical conditions
which may be found in Ref. [1]. The vector `dW_j` represents independent
Brownian increments.
For a simple and instructive example of the implementation of this interface,
see `models.GenericItoProcess`.
#### References
[1]: <NAME>. Stochastic Differential Equations: An Introduction with
Applications. Springer. 2010.
"""
@abc.abstractmethod
def name(self):
"""The name to give to ops created by this class."""
pass
@abc.abstractmethod
def dim(self):
"""The dimension of the process. A positive python integer."""
pass
@abc.abstractmethod
def dtype(self):
"""The data type of process realizations."""
pass
@abc.abstractmethod
def drift_fn(self):
"""Python callable calculating instantaneous drift.
The callable should accept two real `Tensor` arguments of the same dtype.
The first argument is the scalar time t, the second argument is the value of
Ito process X as a tensor of shape `batch_shape + [dim]`. The result is
value of drift a(t, X). The return value of the callable is a real `Tensor`
of the same dtype as the input arguments and of shape `batch_shape + [dim]`.
"""
pass
@abc.abstractmethod
def volatility_fn(self):
"""Python callable calculating the instantaneous volatility matrix.
The callable should accept two real `Tensor` arguments of the same dtype.
The first argument is the scalar time t and the second argument is the value
of Ito process X as a tensor of shape `batch_shape + [dim]`. The result is
the instantaneous volatility matrix at time t and location X: S(t, X). The
return value of the callable is a real `Tensor` of the same dtype as the
input arguments and of shape `batch_shape + [dim, dim]`.
"""
pass
@abc.abstractmethod
def sample_paths(self,
times,
num_samples=1,
initial_state=None,
random_type=None,
seed=None,
**kwargs):
"""Returns a sample of paths from the process.
Args:
times: Rank 1 `Tensor` of increasing positive real values. The times at
which the path points are to be evaluated.
num_samples: Positive scalar `int`. The number of paths to draw.
initial_state: `Tensor` of shape `[dim]`. The initial state of the
process.
Default value: None which maps to a zero initial state.
random_type: Enum value of `RandomType`. The type of (quasi)-random number
generator to use to generate the paths.
Default value: None which maps to the standard pseudo-random numbers.
seed: Seed for the random number generator. The seed is
only relevant if `random_type` is one of
`[STATELESS, PSEUDO, HALTON_RANDOMIZED, PSEUDO_ANTITHETIC,
STATELESS_ANTITHETIC]`. For `PSEUDO`, `PSEUDO_ANTITHETIC` and
`HALTON_RANDOMIZED` the seed should be an Python integer. For
`STATELESS` and `STATELESS_ANTITHETIC `must be supplied as an integer
`Tensor` of shape `[2]`.
Default value: `None` which means no seed is set.
**kwargs: Any other keyword args needed by an implementation.
Returns:
A real `Tensor` of shape [num_samples, k, n] where `k` is the size of the
`times`, `n` is the dimension of the process.
"""
pass
@abc.abstractmethod
def fd_solver_backward(self,
start_time,
end_time,
coord_grid,
values_grid,
discounting=None,
one_step_fn=None,
boundary_conditions=None,
start_step_count=0,
num_steps=None,
time_step=None,
values_transform_fn=None,
dtype=None,
**kwargs):
"""Returns a solver for Feynman-Kac PDE associated to the process.
This method applies a finite difference method to solve the final value
problem as it appears in the Feynman-Kac formula associated to this Ito
process. The Feynman-Kac PDE is closely related to the backward Kolomogorov
equation associated to the stochastic process and allows for the inclusion
of a discounting function.
For more details of the Feynman-Kac theorem see [1]. The PDE solved by this
method is:
```None
V_t + Sum[mu_i(t, x) V_i, 1<=i<=n] +
(1/2) Sum[ D_{ij} V_{ij}, 1 <= i,j <= n] - r(t, x) V = 0
```
In the above, `V_t` is the derivative of `V` with respect to `t`,
`V_i` is the partial derivative with respect to `x_i` and `V_{ij}` the
(mixed) partial derivative with respect to `x_i` and `x_j`. `mu_i` is the
drift of this process and `D_{ij}` are the components of the diffusion
tensor:
```None
D_{ij}(t,x) = (Sigma(t,x) . Transpose[Sigma(t,x)])_{ij}
```
This method evolves a spatially discretized solution of the above PDE from
time `t0` to time `t1 < t0` (i.e. backwards in time).
The solution `V(t,x)` is assumed to be discretized on an `n`-dimensional
rectangular grid. A rectangular grid, G, in n-dimensions may be described
by specifying the coordinates of the points along each axis. For example,
a 2 x 4 grid in two dimensions can be specified by taking the cartesian
product of [1, 3] and [5, 6, 7, 8] to yield the grid points with
coordinates: `[(1, 5), (1, 6), (1, 7), (1, 8), (3, 5) ... (3, 8)]`.
This method allows batching of solutions. In this context, batching means
the ability to represent and evolve multiple independent functions `V`
(e.g. V1, V2 ...) simultaneously. A single discretized solution is specified
by stating its values at each grid point. This can be represented as a
`Tensor` of shape [d1, d2, ... dn] where di is the grid size along the `i`th
axis. A batch of such solutions is represented by a `Tensor` of shape:
[K, d1, d2, ... dn] where `K` is the batch size. This method only requires
that the input parameter `values_grid` be broadcastable with shape
[K, d1, ... dn].
The evolution of the solution from `t0` to `t1` is often done by
discretizing the differential equation to a difference equation along
the spatial and temporal axes. The temporal discretization is given by a
(sequence of) time steps [dt_1, dt_2, ... dt_k] such that the sum of the
time steps is equal to the total time step `t0 - t1`. If a uniform time
step is used, it may equivalently be specified by stating the number of
steps (n_steps) to take. This method provides both options via the
`time_step` and `num_steps` parameters. However, not all methods need
discretization along time direction (e.g. method of lines) so this argument
may not be applicable to some implementations.
The workhorse of this method is the `one_step_fn`. For the commonly used
methods, see functions in `math.pde.steppers` module.
The mapping between the arguments of this method and the above
equation are described in the Args section below.
For a simple instructive example of implementation of this method, see
`models.GenericItoProcess.fd_solver_backward`.
TODO(b/142309558): Complete documentation.
Args:
start_time: Real positive scalar `Tensor`. The start time of the grid.
Corresponds to time `t0` above.
end_time: Real scalar `Tensor` smaller than the `start_time` and greater
than zero. The time to step back to. Corresponds to time `t1` above.
coord_grid: List of | |
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ListFirmwaresRequest(AbstractModel):
"""ListFirmwares请求参数结构体
"""
def __init__(self):
r"""
:param PageNum: 获取的页数
:type PageNum: int
:param PageSize: 分页的大小
:type PageSize: int
:param ProductID: 产品ID
:type ProductID: str
:param Filters: 搜索过滤条件
:type Filters: list of SearchKeyword
"""
self.PageNum = None
self.PageSize = None
self.ProductID = None
self.Filters = None
def _deserialize(self, params):
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.ProductID = params.get("ProductID")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = SearchKeyword()
obj._deserialize(item)
self.Filters.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ListFirmwaresResponse(AbstractModel):
"""ListFirmwares返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 固件总数
:type TotalCount: int
:param Firmwares: 固件列表
:type Firmwares: list of FirmwareInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Firmwares = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Firmwares") is not None:
self.Firmwares = []
for item in params.get("Firmwares"):
obj = FirmwareInfo()
obj._deserialize(item)
self.Firmwares.append(obj)
self.RequestId = params.get("RequestId")
class ModifyDataForwardRequest(AbstractModel):
"""ModifyDataForward请求参数结构体
"""
def __init__(self):
r"""
:param ProductId: 产品ID。
:type ProductId: str
:param ForwardAddr: 转发地址。如果有鉴权Token,则需要自行传入,例如 [{\"forward\":{\"api\":\"http://172.16.31.10:1080/sub.php\",\"token\":\"testtoken\"}}]
:type ForwardAddr: str
:param DataChose: 1-数据信息转发 2-设备上下线状态转发 3-数据信息转发&设备上下线状态转发
:type DataChose: int
"""
self.ProductId = None
self.ForwardAddr = None
self.DataChose = None
def _deserialize(self, params):
self.ProductId = params.get("ProductId")
self.ForwardAddr = params.get("ForwardAddr")
self.DataChose = params.get("DataChose")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDataForwardResponse(AbstractModel):
"""ModifyDataForward返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyDataForwardStatusRequest(AbstractModel):
"""ModifyDataForwardStatus请求参数结构体
"""
def __init__(self):
r"""
:param ProductId: 产品ID。
:type ProductId: str
:param Status: 转发状态,1启用,0禁用。
:type Status: int
"""
self.ProductId = None
self.Status = None
def _deserialize(self, params):
self.ProductId = params.get("ProductId")
self.Status = params.get("Status")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDataForwardStatusResponse(AbstractModel):
"""ModifyDataForwardStatus返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyDeviceLogLevelRequest(AbstractModel):
"""ModifyDeviceLogLevel请求参数结构体
"""
def __init__(self):
r"""
:param ProductId: 产品ID
:type ProductId: str
:param DeviceName: 设备名称
:type DeviceName: str
:param LogLevel: 日志级别,0:关闭,1:错误,2:告警,3:信息,4:调试
:type LogLevel: int
"""
self.ProductId = None
self.DeviceName = None
self.LogLevel = None
def _deserialize(self, params):
self.ProductId = params.get("ProductId")
self.DeviceName = params.get("DeviceName")
self.LogLevel = params.get("LogLevel")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDeviceLogLevelResponse(AbstractModel):
"""ModifyDeviceLogLevel返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyDeviceRequest(AbstractModel):
"""ModifyDevice请求参数结构体
"""
def __init__(self):
r"""
:param ProductId: 设备所属产品id
:type ProductId: str
:param DeviceName: 设备名称
:type DeviceName: str
:param EnableState: 要设置的设备状态,1为启用,0为禁用
:type EnableState: int
"""
self.ProductId = None
self.DeviceName = None
self.EnableState = None
def _deserialize(self, params):
self.ProductId = params.get("ProductId")
self.DeviceName = params.get("DeviceName")
self.EnableState = params.get("EnableState")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDeviceResponse(AbstractModel):
"""ModifyDevice返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyForwardRuleRequest(AbstractModel):
"""ModifyForwardRule请求参数结构体
"""
def __init__(self):
r"""
:param ProductID: 产品ID
:type ProductID: str
:param MsgType: 消息类型
:type MsgType: int
:param Skey: 控制台Skey
:type Skey: str
:param QueueRegion: 队列区域
:type QueueRegion: str
:param QueueType: 队列类型 0.CMQ 1.CKafka
:type QueueType: int
:param Consecretid: 临时密钥
:type Consecretid: str
:param InstanceId: 实例ID
:type InstanceId: str
:param InstanceName: 实例名称
:type InstanceName: str
:param QueueID: 队列或主题ID
:type QueueID: str
:param QueueName: 队列或主题名称
:type QueueName: str
"""
self.ProductID = None
self.MsgType = None
self.Skey = None
self.QueueRegion = None
self.QueueType = None
self.Consecretid = None
self.InstanceId = None
self.InstanceName = None
self.QueueID = None
self.QueueName = None
def _deserialize(self, params):
self.ProductID = params.get("ProductID")
self.MsgType = params.get("MsgType")
self.Skey = params.get("Skey")
self.QueueRegion = params.get("QueueRegion")
self.QueueType = params.get("QueueType")
self.Consecretid = params.get("Consecretid")
self.InstanceId = params.get("InstanceId")
self.InstanceName = params.get("InstanceName")
self.QueueID = params.get("QueueID")
self.QueueName = params.get("QueueName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyForwardRuleResponse(AbstractModel):
"""ModifyForwardRule返回参数结构体
"""
def __init__(self):
r"""
:param Endpoint: 腾讯云账号
:type Endpoint: str
:param ProductID: 产品ID
:type ProductID: str
:param Result: 结果
:type Result: int
:param ErrMsg: 错误信息
:type ErrMsg: str
:param QueueType: 队列类型 0.CMQ 1.CKafka
:type QueueType: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Endpoint = None
self.ProductID = None
self.Result = None
self.ErrMsg = None
self.QueueType = None
self.RequestId = None
def _deserialize(self, params):
self.Endpoint = params.get("Endpoint")
self.ProductID = params.get("ProductID")
self.Result = params.get("Result")
self.ErrMsg = params.get("ErrMsg")
self.QueueType = params.get("QueueType")
self.RequestId = params.get("RequestId")
class ModifyModelDefinitionRequest(AbstractModel):
"""ModifyModelDefinition请求参数结构体
"""
def __init__(self):
r"""
:param ProductId: 产品ID
:type ProductId: str
:param ModelSchema: 数据模板定义
:type ModelSchema: str
"""
self.ProductId = None
self.ModelSchema = None
def _deserialize(self, params):
self.ProductId = params.get("ProductId")
self.ModelSchema = params.get("ModelSchema")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyModelDefinitionResponse(AbstractModel):
"""ModifyModelDefinition返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyProductDynamicRegisterRequest(AbstractModel):
"""ModifyProductDynamicRegister请求参数结构体
"""
def __init__(self):
r"""
:param ProductId: 产品ID
:type ProductId: str
:param RegisterType: 动态注册类型,0-关闭 1-预创建设备 2-自动创建设备
:type RegisterType: int
:param RegisterLimit: 动态注册设备上限
:type RegisterLimit: int
"""
self.ProductId = None
self.RegisterType = None
self.RegisterLimit = None
def _deserialize(self, params):
self.ProductId = params.get("ProductId")
self.RegisterType = params.get("RegisterType")
self.RegisterLimit = params.get("RegisterLimit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyProductDynamicRegisterResponse(AbstractModel):
"""ModifyProductDynamicRegister返回参数结构体
"""
def __init__(self):
r"""
:param RegisterType: 动态注册类型,0-关闭 1-预创建设备 2-自动创建设备
:type RegisterType: int
:param ProductSecret: 动态注册产品密钥
:type ProductSecret: str
:param RegisterLimit: 动态注册设备上限
:type RegisterLimit: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RegisterType = None
self.ProductSecret = None
self.RegisterLimit = None
self.RequestId = None
def _deserialize(self, params):
self.RegisterType = params.get("RegisterType")
self.ProductSecret = params.get("ProductSecret")
self.RegisterLimit = params.get("RegisterLimit")
self.RequestId = params.get("RequestId")
class ModifyProductRequest(AbstractModel):
"""ModifyProduct请求参数结构体
"""
def __init__(self):
r"""
:param ProductId: 产品id
:type ProductId: str
:param ProductName: 修改的产品名称 (支持中文、英文、数字、下划线组合,最多不超过20个字符)
:type ProductName: str
:param ProductDescription: 修改的产品描述 (最多不超过128个字符)
:type ProductDescription: str
"""
self.ProductId = None
self.ProductName = None
self.ProductDescription = None
def _deserialize(self, params):
self.ProductId = params.get("ProductId")
self.ProductName = params.get("ProductName")
self.ProductDescription = params.get("ProductDescription")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyProductResponse(AbstractModel):
"""ModifyProduct返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ProductModelDefinition(AbstractModel):
"""产品模型定义
"""
def __init__(self):
r"""
:param ProductId: 产品ID
:type ProductId: str
:param ModelDefine: 模型定义
:type ModelDefine: str
:param UpdateTime: 更新时间,秒级时间戳
:type UpdateTime: int
:param CreateTime: 创建时间,秒级时间戳
:type CreateTime: int
:param CategoryModel: 产品所属分类的模型快照(产品创建时刻的)
注意:此字段可能返回 null,表示取不到有效值。
:type CategoryModel: str
:param NetTypeModel: 产品的连接类型的模型
注意:此字段可能返回 null,表示取不到有效值。
:type NetTypeModel: str
"""
self.ProductId = None
self.ModelDefine = None
self.UpdateTime = None
self.CreateTime = None
self.CategoryModel = None
self.NetTypeModel = None
def _deserialize(self, params):
self.ProductId = params.get("ProductId")
self.ModelDefine = params.get("ModelDefine")
self.UpdateTime = params.get("UpdateTime")
self.CreateTime = params.get("CreateTime")
self.CategoryModel = params.get("CategoryModel")
self.NetTypeModel = params.get("NetTypeModel")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ProductTemplate(AbstractModel):
"""产品分类实体
"""
def __init__(self):
r"""
:param Id: 实体ID
:type Id: int
:param CategoryKey: 分类字段
:type CategoryKey: str
:param CategoryName: 分类名称
:type CategoryName: str
:param ParentId: 上层实体ID
:type ParentId: int
:param ModelTemplate: 物模型
:type ModelTemplate: str
:param ListOrder: 排列顺序
注意:此字段可能返回 null,表示取不到有效值。
:type ListOrder: int
:param IconUrl: 分类图标地址
注意:此字段可能返回 null,表示取不到有效值。
:type IconUrl: str
:param IconUrlGrid: 九宫格图片地址
注意:此字段可能返回 null,表示取不到有效值。
:type IconUrlGrid: str
"""
self.Id = None
self.CategoryKey = None
self.CategoryName = None
self.ParentId = None
self.ModelTemplate | |
<filename>examples/inspection/plot_linear_model_coefficient_interpretation.py
"""
======================================================================
Common pitfalls in the interpretation of coefficients of linear models
======================================================================
In linear models, the target value is modeled as
a linear combination of the features (see the :ref:`linear_model` User Guide
section for a description of a set of linear models available in
scikit-learn).
Coefficients in multiple linear models represent the relationship between the
given feature, :math:`X_i` and the target, :math:`y`, assuming that all the
other features remain constant (`conditional dependence
<https://en.wikipedia.org/wiki/Conditional_dependence>`_).
This is different from plotting :math:`X_i` versus :math:`y` and fitting a
linear relationship: in that case all possible values of the other features are
taken into account in the estimation (marginal dependence).
This example will provide some hints in interpreting coefficient in linear
models, pointing at problems that arise when either the linear model is not
appropriate to describe the dataset, or when features are correlated.
We will use data from the `"Current Population Survey"
<https://www.openml.org/d/534>`_ from 1985 to predict
wage as a function of various features such as experience, age, or education.
.. contents::
:local:
:depth: 1
"""
print(__doc__)
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %%
# The dataset: wages
# ------------------
#
# We fetch the data from `OpenML <http://openml.org/>`_.
# Note that setting the parameter `as_frame` to True will retrieve the data
# as a pandas dataframe.
from sklearn.datasets import fetch_openml
survey = fetch_openml(data_id=534, as_frame=True)
# %%
# Then, we identify features `X` and targets `y`: the column WAGE is our
# target variable (i.e., the variable which we want to predict).
#
X = survey.data[survey.feature_names]
X.describe(include="all")
# %%
# Note that the dataset contains categorical and numerical variables.
# We will need to take this into account when preprocessing the dataset
# thereafter.
X.head()
# %%
# Our target for prediction: the wage.
# Wages are described as floating-point number in dollars per hour.
y = survey.target.values.ravel()
survey.target.head()
# %%
# We split the sample into a train and a test dataset.
# Only the train dataset will be used in the following exploratory analysis.
# This is a way to emulate a real situation where predictions are performed on
# an unknown target, and we don't want our analysis and decisions to be biased
# by our knowledge of the test data.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# %%
# First, let's get some insights by looking at the variable distributions and
# at the pairwise relationships between them. Only numerical
# variables will be used. In the following plot, each dot represents a sample.
#
# .. _marginal_dependencies:
train_dataset = X_train.copy()
train_dataset.insert(0, "WAGE", y_train)
_ = sns.pairplot(train_dataset, kind="reg", diag_kind="kde")
# %%
# Looking closely at the WAGE distribution reveals that it has a
# long tail. For this reason, we should take its logarithm
# to turn it approximately into a normal distribution (linear models such
# as ridge or lasso work best for a normal distribution of error).
#
# The WAGE is increasing when EDUCATION is increasing.
# Note that the dependence between WAGE and EDUCATION
# represented here is a marginal dependence, i.e., it describes the behavior
# of a specific variable without keeping the others fixed.
#
# Also, the EXPERIENCE and AGE are strongly linearly correlated.
#
# .. _the-pipeline:
#
# The machine-learning pipeline
# -----------------------------
#
# To design our machine-learning pipeline, we first manually
# check the type of data that we are dealing with:
survey.data.info()
# %%
# As seen previously, the dataset contains columns with different data types
# and we need to apply a specific preprocessing for each data types.
# In particular categorical variables cannot be included in linear model if not
# coded as integers first. In addition, to avoid categorical features to be
# treated as ordered values, we need to one-hot-encode them.
# Our pre-processor will
#
# - one-hot encode (i.e., generate a column by category) the categorical
# columns;
# - as a first approach (we will see after how the normalisation of numerical
# values will affect our discussion), keep numerical values as they are.
from sklearn.compose import make_column_transformer
from sklearn.preprocessing import OneHotEncoder
categorical_columns = ["RACE", "OCCUPATION", "SECTOR", "MARR", "UNION", "SEX", "SOUTH"]
numerical_columns = ["EDUCATION", "EXPERIENCE", "AGE"]
preprocessor = make_column_transformer(
(OneHotEncoder(drop="if_binary"), categorical_columns),
remainder="passthrough",
verbose_feature_names_out=False,
)
# %%
# To describe the dataset as a linear model we use a ridge regressor
# with a very small regularization and to model the logarithm of the WAGE.
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import Ridge
from sklearn.compose import TransformedTargetRegressor
model = make_pipeline(
preprocessor,
TransformedTargetRegressor(
regressor=Ridge(alpha=1e-10), func=np.log10, inverse_func=sp.special.exp10
),
)
# %%
# Processing the dataset
# ----------------------
#
# First, we fit the model.
_ = model.fit(X_train, y_train)
# %%
# Then we check the performance of the computed model plotting its predictions
# on the test set and computing,
# for example, the median absolute error of the model.
from sklearn.metrics import median_absolute_error
y_pred = model.predict(X_train)
mae = median_absolute_error(y_train, y_pred)
string_score = f"MAE on training set: {mae:.2f} $/hour"
y_pred = model.predict(X_test)
mae = median_absolute_error(y_test, y_pred)
string_score += f"\nMAE on testing set: {mae:.2f} $/hour"
fig, ax = plt.subplots(figsize=(5, 5))
plt.scatter(y_test, y_pred)
ax.plot([0, 1], [0, 1], transform=ax.transAxes, ls="--", c="red")
plt.text(3, 20, string_score)
plt.title("Ridge model, small regularization")
plt.ylabel("Model predictions")
plt.xlabel("Truths")
plt.xlim([0, 27])
_ = plt.ylim([0, 27])
# %%
# The model learnt is far from being a good model making accurate predictions:
# this is obvious when looking at the plot above, where good predictions
# should lie on the red line.
#
# In the following section, we will interpret the coefficients of the model.
# While we do so, we should keep in mind that any conclusion we draw is
# about the model that we build, rather than about the true (real-world)
# generative process of the data.
#
# Interpreting coefficients: scale matters
# ---------------------------------------------
#
# First of all, we can take a look to the values of the coefficients of the
# regressor we have fitted.
feature_names = model[:-1].get_feature_names_out()
coefs = pd.DataFrame(
model.named_steps["transformedtargetregressor"].regressor_.coef_,
columns=["Coefficients"],
index=feature_names,
)
coefs
# %%
# The AGE coefficient is expressed in "dollars/hour per living years" while the
# EDUCATION one is expressed in "dollars/hour per years of education". This
# representation of the coefficients has the benefit of making clear the
# practical predictions of the model: an increase of :math:`1` year in AGE
# means a decrease of :math:`0.030867` dollars/hour, while an increase of
# :math:`1` year in EDUCATION means an increase of :math:`0.054699`
# dollars/hour. On the other hand, categorical variables (as UNION or SEX) are
# adimensional numbers taking either the value 0 or 1. Their coefficients
# are expressed in dollars/hour. Then, we cannot compare the magnitude of
# different coefficients since the features have different natural scales, and
# hence value ranges, because of their different unit of measure. This is more
# visible if we plot the coefficients.
coefs.plot(kind="barh", figsize=(9, 7))
plt.title("Ridge model, small regularization")
plt.axvline(x=0, color=".5")
plt.subplots_adjust(left=0.3)
# %%
# Indeed, from the plot above the most important factor in determining WAGE
# appears to be the
# variable UNION, even if our intuition might tell us that variables
# like EXPERIENCE should have more impact.
#
# Looking at the coefficient plot to gauge feature importance can be
# misleading as some of them vary on a small scale, while others, like AGE,
# varies a lot more, several decades.
#
# This is visible if we compare the standard deviations of different
# features.
X_train_preprocessed = pd.DataFrame(
model.named_steps["columntransformer"].transform(X_train), columns=feature_names
)
X_train_preprocessed.std(axis=0).plot(kind="barh", figsize=(9, 7))
plt.title("Features std. dev.")
plt.subplots_adjust(left=0.3)
# %%
# Multiplying the coefficients by the standard deviation of the related
# feature would reduce all the coefficients to the same unit of measure.
# As we will see :ref:`after<scaling_num>` this is equivalent to normalize
# numerical variables to their standard deviation,
# as :math:`y = \sum{coef_i \times X_i} =
# \sum{(coef_i \times std_i) \times (X_i / std_i)}`.
#
# In that way, we emphasize that the
# greater the variance of a feature, the larger the weight of the corresponding
# coefficient on the output, all else being equal.
coefs = pd.DataFrame(
model.named_steps["transformedtargetregressor"].regressor_.coef_
* X_train_preprocessed.std(axis=0),
columns=["Coefficient importance"],
index=feature_names,
)
coefs.plot(kind="barh", figsize=(9, 7))
plt.title("Ridge model, small regularization")
plt.axvline(x=0, color=".5")
plt.subplots_adjust(left=0.3)
# %%
# Now that the coefficients have been scaled, we can safely compare them.
#
# .. warning::
#
# Why does the plot above suggest that an increase in age leads to a
# decrease in wage? Why the :ref:`initial pairplot
# <marginal_dependencies>` is telling the opposite?
#
# The plot above tells us about dependencies between a specific feature and
# the target when all other features remain constant, i.e., **conditional
# dependencies**. An increase of the AGE will induce a decrease
# of the WAGE when all other features remain constant. On the contrary, an
# increase of the EXPERIENCE will induce an increase of the WAGE when all
# other features remain constant.
# Also, AGE, EXPERIENCE and EDUCATION | |
E501
return data
def generate_packing_slip_specific_dc_with_http_info(self, distribution_center_code, order_id, **kwargs): # noqa: E501
"""Generate a packing slip for this order for the given distribution center. # noqa: E501
The packing slip PDF that is returned is base 64 encoded # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.generate_packing_slip_specific_dc_with_http_info(distribution_center_code, order_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str distribution_center_code: Distribution center code (required)
:param str order_id: Order ID (required)
:return: OrdersResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['distribution_center_code', 'order_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method generate_packing_slip_specific_dc" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'distribution_center_code' is set
if ('distribution_center_code' not in params or
params['distribution_center_code'] is None):
raise ValueError("Missing the required parameter `distribution_center_code` when calling `generate_packing_slip_specific_dc`") # noqa: E501
# verify the required parameter 'order_id' is set
if ('order_id' not in params or
params['order_id'] is None):
raise ValueError("Missing the required parameter `order_id` when calling `generate_packing_slip_specific_dc`") # noqa: E501
collection_formats = {}
path_params = {}
if 'distribution_center_code' in params:
path_params['distribution_center_code'] = params['distribution_center_code'] # noqa: E501
if 'order_id' in params:
path_params['order_id'] = params['order_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/order/orders/{order_id}/packing_slip/{distribution_center_code}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OrdersResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_accounts_receivable_retry_config(self, **kwargs): # noqa: E501
"""Retrieve A/R Retry Configuration # noqa: E501
Retrieve A/R Retry Configuration. This is primarily an internal API call. It is doubtful you would ever need to use it. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_accounts_receivable_retry_config(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: AccountsReceivableRetryConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_accounts_receivable_retry_config_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_accounts_receivable_retry_config_with_http_info(**kwargs) # noqa: E501
return data
def get_accounts_receivable_retry_config_with_http_info(self, **kwargs): # noqa: E501
"""Retrieve A/R Retry Configuration # noqa: E501
Retrieve A/R Retry Configuration. This is primarily an internal API call. It is doubtful you would ever need to use it. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_accounts_receivable_retry_config_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: AccountsReceivableRetryConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_accounts_receivable_retry_config" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/order/accountsReceivableRetryConfig', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccountsReceivableRetryConfigResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_accounts_receivable_retry_stats(self, **kwargs): # noqa: E501
"""Retrieve A/R Retry Statistics # noqa: E501
Retrieve A/R Retry Statistics. This is primarily an internal API call. It is doubtful you would ever need to use it. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_accounts_receivable_retry_stats(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _from:
:param str to:
:return: AccountsReceivableRetryStatsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_accounts_receivable_retry_stats_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_accounts_receivable_retry_stats_with_http_info(**kwargs) # noqa: E501
return data
def get_accounts_receivable_retry_stats_with_http_info(self, **kwargs): # noqa: E501
"""Retrieve A/R Retry Statistics # noqa: E501
Retrieve A/R Retry Statistics. This is primarily an internal API call. It is doubtful you would ever need to use it. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_accounts_receivable_retry_stats_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _from:
:param str to:
:return: AccountsReceivableRetryStatsResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_from', 'to'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_accounts_receivable_retry_stats" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_from' in params:
query_params.append(('from', params['_from'])) # noqa: E501
if 'to' in params:
query_params.append(('to', params['to'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/order/accountsReceivableRetryConfig/stats', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccountsReceivableRetryStatsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_order(self, order_id, **kwargs): # noqa: E501
"""Retrieve an order # noqa: E501
Retrieves a single order using the specified order id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_order(order_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str order_id: The order id to retrieve. (required)
:param str expand: The object expansion to perform on the result. See documentation for examples
:return: OrderResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_order_with_http_info(order_id, **kwargs) # noqa: E501
else:
(data) = self.get_order_with_http_info(order_id, **kwargs) # noqa: E501
return data
def get_order_with_http_info(self, order_id, **kwargs): # noqa: E501
"""Retrieve an order # noqa: E501
Retrieves a single order using the specified order id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_order_with_http_info(order_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str order_id: The order id to retrieve. (required)
:param str expand: The object expansion to perform on the result. See documentation for examples
:return: OrderResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['order_id', 'expand'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_order" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'order_id' is set
if ('order_id' not in params or
params['order_id'] is None):
raise ValueError("Missing the required parameter `order_id` when calling `get_order`") # noqa: E501
collection_formats = {}
path_params = {}
if 'order_id' in params:
path_params['order_id'] = params['order_id'] # noqa: E501
query_params = []
if 'expand' in params:
query_params.append(('_expand', params['expand'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/order/orders/{order_id}', 'GET',
path_params,
query_params,
| |
<reponame>lush-tech-warriors/saleor
from collections import defaultdict
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
import graphene
from django.contrib.auth import models as auth_models
from django.core.exceptions import ValidationError
from django.db import transaction
from ....account.error_codes import PermissionGroupErrorCode
from ....core.permissions import AccountPermissions, get_permissions
from ...account.utils import (
can_user_manage_group,
get_not_manageable_permissions_after_group_deleting,
get_not_manageable_permissions_after_removing_perms_from_group,
get_not_manageable_permissions_after_removing_users_from_group,
get_out_of_scope_permissions,
get_out_of_scope_users,
)
from ...core.enums import PermissionEnum
from ...core.mutations import ModelDeleteMutation, ModelMutation
from ...core.types.common import PermissionGroupError
from ...core.utils import get_duplicates_ids
from ..types import Group
if TYPE_CHECKING:
from ....account.models import User
class PermissionGroupInput(graphene.InputObjectType):
add_permissions = graphene.List(
graphene.NonNull(PermissionEnum),
description="List of permission code names to assign to this group.",
required=False,
)
add_users = graphene.List(
graphene.NonNull(graphene.ID),
description="List of users to assign to this group.",
required=False,
)
class PermissionGroupCreateInput(PermissionGroupInput):
name = graphene.String(description="Group name.", required=True)
class PermissionGroupCreate(ModelMutation):
group = graphene.Field(Group, description="The newly created group.")
class Arguments:
input = PermissionGroupCreateInput(
description="Input fields to create permission group.", required=True
)
class Meta:
description = "Create new permission group."
model = auth_models.Group
permissions = (AccountPermissions.MANAGE_STAFF,)
error_type_class = PermissionGroupError
error_type_field = "permission_group_errors"
@classmethod
@transaction.atomic
def _save_m2m(cls, info, instance, cleaned_data):
add_permissions = cleaned_data.get("add_permissions")
if add_permissions:
instance.permissions.add(*add_permissions)
users = cleaned_data.get("add_users")
if users:
instance.user_set.add(*users)
@classmethod
def clean_input(
cls, info, instance, data,
):
cleaned_input = super().clean_input(info, instance, data)
requestor = info.context.user
errors = defaultdict(list)
cls.clean_permissions(requestor, instance, errors, cleaned_input)
cls.clean_users(requestor, errors, cleaned_input, instance)
if errors:
raise ValidationError(errors)
return cleaned_input
@classmethod
def clean_permissions(
cls,
requestor: "User",
group: auth_models.Group,
errors: Dict[Optional[str], List[ValidationError]],
cleaned_input: dict,
):
field = "add_permissions"
permission_items = cleaned_input.get(field)
if permission_items:
cleaned_input[field] = get_permissions(permission_items)
if not requestor.is_superuser:
cls.ensure_can_manage_permissions(
requestor, errors, field, permission_items
)
@classmethod
def ensure_can_manage_permissions(
cls,
requestor: "User",
errors: Dict[Optional[str], List[ValidationError]],
field: str,
permission_items: List[str],
):
"""Check if requestor can manage permissions from input.
Requestor cannot manage permissions witch he doesn't have.
"""
missing_permissions = get_out_of_scope_permissions(requestor, permission_items)
if missing_permissions:
# add error
error_msg = "You can't add permission that you don't have."
code = PermissionGroupErrorCode.OUT_OF_SCOPE_PERMISSION.value
params = {"permissions": missing_permissions}
cls.update_errors(errors, error_msg, field, code, params)
@classmethod
def clean_users(
cls,
requestor: "User",
errors: dict,
cleaned_input: dict,
group: auth_models.Group,
):
user_items = cleaned_input.get("add_users")
if user_items:
cls.ensure_users_are_staff(errors, "add_users", cleaned_input)
@classmethod
def ensure_users_are_staff(
cls,
errors: Dict[Optional[str], List[ValidationError]],
field: str,
cleaned_input: dict,
):
"""Ensure all of the users are staff members, raise error if not."""
users = cleaned_input[field]
non_staff_users = [user.pk for user in users if not user.is_staff]
if non_staff_users:
# add error
ids = [graphene.Node.to_global_id("User", pk) for pk in non_staff_users]
error_msg = "User must be staff member."
code = PermissionGroupErrorCode.ASSIGN_NON_STAFF_MEMBER.value
params = {"users": ids}
cls.update_errors(errors, error_msg, field, code, params)
@classmethod
def update_errors(
cls,
errors: Dict[Optional[str], List[ValidationError]],
msg: str,
field: Optional[str],
code: str,
params: dict,
):
"""Create ValidationError and add it to error list."""
error = ValidationError(message=msg, code=code, params=params)
errors[field].append(error)
class PermissionGroupUpdateInput(PermissionGroupInput):
name = graphene.String(description="Group name.", required=False)
remove_permissions = graphene.List(
graphene.NonNull(PermissionEnum),
description="List of permission code names to unassign from this group.",
required=False,
)
remove_users = graphene.List(
graphene.NonNull(graphene.ID),
description="List of users to unassign from this group.",
required=False,
)
class PermissionGroupUpdate(PermissionGroupCreate):
group = graphene.Field(Group, description="Group which was edited.")
class Arguments:
id = graphene.ID(description="ID of the group to update.", required=True)
input = PermissionGroupUpdateInput(
description="Input fields to create permission group.", required=True
)
class Meta:
description = "Update permission group."
model = auth_models.Group
permissions = (AccountPermissions.MANAGE_STAFF,)
error_type_class = PermissionGroupError
error_type_field = "permission_group_errors"
@classmethod
@transaction.atomic
def _save_m2m(cls, info, instance, cleaned_data):
super()._save_m2m(info, instance, cleaned_data)
remove_users = cleaned_data.get("remove_users")
if remove_users:
instance.user_set.remove(*remove_users)
remove_permissions = cleaned_data.get("remove_permissions")
if remove_permissions:
instance.permissions.remove(*remove_permissions)
@classmethod
def clean_input(
cls, info, instance, data,
):
requestor = info.context.user
cls.ensure_requestor_can_manage_group(requestor, instance)
errors = defaultdict(list)
permission_fields = ("add_permissions", "remove_permissions", "permissions")
user_fields = ("add_users", "remove_users", "users")
cls.check_for_duplicates(errors, data, permission_fields)
cls.check_for_duplicates(errors, data, user_fields)
if errors:
raise ValidationError(errors)
cleaned_input = super().clean_input(info, instance, data)
return cleaned_input
@classmethod
def ensure_requestor_can_manage_group(
cls, requestor: "User", group: auth_models.Group
):
"""Check if requestor can manage group.
Requestor cannot manage group with wider scope of permissions.
"""
if not requestor.is_superuser and not can_user_manage_group(requestor, group):
error_msg = "You can't manage group with permissions out of your scope."
code = PermissionGroupErrorCode.OUT_OF_SCOPE_PERMISSION.value
raise ValidationError(error_msg, code)
@classmethod
def clean_permissions(
cls,
requestor: "User",
group: auth_models.Group,
errors: Dict[Optional[str], List[ValidationError]],
cleaned_input: dict,
):
super().clean_permissions(requestor, group, errors, cleaned_input)
field = "remove_permissions"
permission_items = cleaned_input.get(field)
if permission_items:
cleaned_input[field] = get_permissions(permission_items)
if not requestor.is_superuser:
cls.ensure_can_manage_permissions(
requestor, errors, field, permission_items
)
cls.ensure_permissions_can_be_removed(errors, group, permission_items)
@classmethod
def ensure_permissions_can_be_removed(
cls, errors: dict, group: auth_models.Group, permissions: List["str"],
):
missing_perms = get_not_manageable_permissions_after_removing_perms_from_group(
group, permissions
)
if missing_perms:
# add error
permission_codes = [PermissionEnum.get(code) for code in permissions]
msg = (
"Permissions cannot be removed, "
"some of permissions will not be manageable."
)
code = PermissionGroupErrorCode.LEFT_NOT_MANAGEABLE_PERMISSION.value
params = {"permissions": permission_codes}
cls.update_errors(errors, msg, "remove_permissions", code, params)
@classmethod
def clean_users(
cls,
requestor: "User",
errors: dict,
cleaned_input: dict,
group: auth_models.Group,
):
super().clean_users(requestor, errors, cleaned_input, group)
remove_users = cleaned_input.get("remove_users")
if remove_users:
cls.ensure_can_manage_users(
requestor, errors, "remove_users", cleaned_input
)
cls.clean_remove_users(requestor, errors, cleaned_input, group)
@classmethod
def ensure_can_manage_users(
cls,
requestor: "User",
errors: Dict[Optional[str], List[ValidationError]],
field: str,
cleaned_input: dict,
):
"""Check if requestor can manage users from input.
Requestor cannot manage users with wider scope of permissions.
"""
if requestor.is_superuser:
return
users = cleaned_input[field]
out_of_scope_users = get_out_of_scope_users(requestor, users)
if out_of_scope_users:
# add error
ids = [
graphene.Node.to_global_id("User", user_instance.pk)
for user_instance in out_of_scope_users
]
error_msg = "You can't manage these users."
code = PermissionGroupErrorCode.OUT_OF_SCOPE_USER.value
params = {"users": ids}
cls.update_errors(errors, error_msg, field, code, params)
@classmethod
def clean_remove_users(
cls,
requestor: "User",
errors: dict,
cleaned_input: dict,
group: auth_models.Group,
):
cls.check_if_removing_user_last_group(requestor, errors, cleaned_input)
cls.check_if_users_can_be_removed(requestor, errors, cleaned_input, group)
@classmethod
def check_if_removing_user_last_group(
cls, requestor: "User", errors: dict, cleaned_input: dict
):
"""Ensure user doesn't remove user's last group."""
remove_users = cleaned_input["remove_users"]
if requestor in remove_users and requestor.groups.count() == 1:
# add error
error_msg = "You cannot remove yourself from your last group."
code = PermissionGroupErrorCode.CANNOT_REMOVE_FROM_LAST_GROUP.value
params = {"users": [graphene.Node.to_global_id("User", requestor.pk)]}
cls.update_errors(errors, error_msg, "remove_users", code, params)
@classmethod
def check_if_users_can_be_removed(
cls,
requestor: "User",
errors: dict,
cleaned_input: dict,
group: auth_models.Group,
):
"""Check if after removing users from group all permissions will be manageable.
After removing users from group, for each permission, there should be
at least one staff member who can manage it (has both “manage staff”
and this permission).
"""
if requestor.is_superuser:
return
remove_users = cleaned_input["remove_users"]
add_users = cleaned_input.get("add_users")
manage_staff_permission = AccountPermissions.MANAGE_STAFF.value
# check if user with manage staff will be added to the group
if add_users:
if any([user.has_perm(manage_staff_permission) for user in add_users]):
return True
permissions = get_not_manageable_permissions_after_removing_users_from_group(
group, remove_users
)
if permissions:
# add error
permission_codes = [PermissionEnum.get(code) for code in permissions]
msg = "Users cannot be removed, some of permissions will not be manageable."
code = PermissionGroupErrorCode.LEFT_NOT_MANAGEABLE_PERMISSION.value
params = {"permissions": permission_codes}
cls.update_errors(errors, msg, "remove_users", code, params)
@classmethod
def check_for_duplicates(
cls, errors: dict, input_data: dict, fields: Tuple[str, str, str],
):
"""Check if any items are on both input field.
Raise error if some of items are duplicated.
"""
add_field, remove_field, error_class_field = fields
duplicated_ids = get_duplicates_ids(
input_data.get(add_field), input_data.get(remove_field)
)
if duplicated_ids:
# add error
error_msg = (
"The same object cannot be in both list"
"for adding and removing items."
)
code = PermissionGroupErrorCode.DUPLICATED_INPUT_ITEM.value
params = {error_class_field: list(duplicated_ids)}
cls.update_errors(errors, error_msg, None, code, params)
class PermissionGroupDelete(ModelDeleteMutation):
class Arguments:
id = graphene.ID(description="ID of the group to delete.", required=True)
class Meta:
description = "Delete permission group."
model = auth_models.Group
permissions = (AccountPermissions.MANAGE_STAFF,)
error_type_class = PermissionGroupError
error_type_field = "permission_group_errors"
@classmethod
def clean_instance(cls, info, instance):
requestor = info.context.user
if requestor.is_superuser:
return
if not can_user_manage_group(requestor, instance):
error_msg = "You can't manage group with permissions out of your scope."
code = PermissionGroupErrorCode.OUT_OF_SCOPE_PERMISSION.value
raise ValidationError(error_msg, code)
cls.check_if_group_can_be_removed(requestor, instance)
@classmethod
def check_if_group_can_be_removed(cls, requestor, group):
cls.ensure_deleting_not_left_not_manageable_permissions(group)
cls.ensure_not_removing_requestor_last_group(group, requestor)
@classmethod
def ensure_deleting_not_left_not_manageable_permissions(cls, group):
"""Return true if management of all permissions is provided by other groups.
After removing group, for each permission, there should be at least one staff
member who can manage it (has both “manage staff” and this permission).
"""
permissions = get_not_manageable_permissions_after_group_deleting(group)
if permissions:
permission_codes = [PermissionEnum.get(code) for code in permissions]
msg = "Group cannot be removed, some of permissions will not be manageable."
code = PermissionGroupErrorCode.LEFT_NOT_MANAGEABLE_PERMISSION.value
params = {"permissions": permission_codes}
raise ValidationError(
{"id": ValidationError(message=msg, code=code, params=params)}
)
@classmethod
def ensure_not_removing_requestor_last_group(cls, group, requestor):
"""Ensure user doesn't remove user's last group."""
if requestor in group.user_set.all() and requestor.groups.count() == 1:
msg = "You cannot delete your last group."
code = PermissionGroupErrorCode.CANNOT_REMOVE_FROM_LAST_GROUP.value
| |
<reponame>pyarnold/Mailpile
import copy
import email.header
import email.parser
import email.utils
import errno
import mailbox
import mimetypes
import os
import re
import StringIO
import threading
import traceback
from gettext import gettext as _
from email import encoders
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
from lxml.html.clean import Cleaner
from mailpile.util import *
from platform import system
from urllib import quote, unquote
from mailpile.crypto.gpgi import GnuPG
from mailpile.crypto.gpgi import OpenPGPMimeSigningWrapper
from mailpile.crypto.gpgi import OpenPGPMimeEncryptingWrapper
from mailpile.crypto.mime import UnwrapMimeCrypto
from mailpile.crypto.state import EncryptionInfo, SignatureInfo
from mailpile.mail_generator import Generator
MBX_ID_LEN = 4 # 4x36 == 1.6 million mailboxes
class NotEditableError(ValueError):
pass
class NoFromAddressError(ValueError):
pass
class NoRecipientError(ValueError):
pass
class InsecureSmtpError(ValueError):
pass
class NoSuchMailboxError(OSError):
pass
def ParseMessage(fd, pgpmime=True):
message = email.parser.Parser().parse(fd)
if pgpmime and GnuPG:
UnwrapMimeCrypto(message, protocols={
'openpgp': GnuPG
})
else:
for part in message.walk():
part.signature_info = SignatureInfo()
part.encryption_info = EncryptionInfo()
return message
def ExtractEmails(string, strip_keys=True):
emails = []
startcrap = re.compile('^[\'\"<(]')
endcrap = re.compile('[\'\">);]$')
string = string.replace('<', ' <').replace('(', ' (')
for w in [sw.strip() for sw in re.compile('[,\s]+').split(string)]:
atpos = w.find('@')
if atpos >= 0:
while startcrap.search(w):
w = w[1:]
while endcrap.search(w):
w = w[:-1]
if strip_keys and '#' in w[atpos:]:
w = w[:atpos] + w[atpos:].split('#', 1)[0]
# E-mail addresses are only allowed to contain ASCII
# characters, so we just strip everything else away.
emails.append(CleanText(w,
banned=CleanText.WHITESPACE,
replace='_').clean)
return emails
def ExtractEmailAndName(string):
email = (ExtractEmails(string) or [''])[0]
name = (string
.replace(email, '')
.replace('<>', '')
.replace('"', '')
.replace('(', '')
.replace(')', '')).strip()
return email, (name or email)
def MessageAsString(part, unixfrom=False):
buf = StringIO.StringIO()
Generator(buf).flatten(part, unixfrom=unixfrom, linesep='\r\n')
return buf.getvalue()
def CleanMessage(config, msg):
replacements = []
for key, value in msg.items():
lkey = key.lower()
# Remove headers we don't want to expose
if (lkey.startswith('x-mp-internal-') or
lkey in ('bcc', 'encryption')):
replacements.append((key, None))
# Strip the #key part off any e-mail addresses:
elif lkey in ('to', 'from', 'cc'):
if '#' in value:
replacements.append((key, re.sub(
r'(@[^<>\s#]+)#[a-fxA-F0-9]+([>,\s]|$)', r'\1\2', value)))
for key, val in replacements:
del msg[key]
for key, val in replacements:
if val:
msg[key] = val
return msg
def PrepareMessage(config, msg, sender=None, rcpts=None, events=None):
msg = copy.deepcopy(msg)
# Short circuit if this message has already been prepared.
if 'x-mp-internal-sender' in msg and 'x-mp-internal-rcpts' in msg:
return (sender or msg['x-mp-internal-sender'],
rcpts or [r.strip()
for r in msg['x-mp-internal-rcpts'].split(',')],
msg,
events)
crypto_policy = config.prefs.crypto_policy.lower()
rcpts = rcpts or []
# Iterate through headers to figure out what we want to do...
need_rcpts = not rcpts
for hdr, val in msg.items():
lhdr = hdr.lower()
if lhdr == 'from':
sender = sender or val
elif lhdr == 'encryption':
crypto_policy = val.lower()
elif need_rcpts and lhdr in ('to', 'cc', 'bcc'):
rcpts += ExtractEmails(val, strip_keys=False)
# Are we sane?
if not sender:
raise NoFromAddressError()
if not rcpts:
raise NoRecipientError()
# Are we encrypting? Signing?
if crypto_policy == 'default':
crypto_policy = config.prefs.crypto_policy
# This is the BCC hack that Brennan hates!
rcpts += [sender]
sender = ExtractEmails(sender, strip_keys=False)[0]
sender_keyid = None
if config.prefs.openpgp_header:
try:
gnupg = GnuPG()
seckeys = dict([(x["email"], y["fingerprint"])
for y in gnupg.list_secret_keys().values()
for x in y["uids"]])
sender_keyid = seckeys[sender]
except:
pass
rcpts, rr = [sender], rcpts
for r in rr:
for e in ExtractEmails(r, strip_keys=False):
if e not in rcpts:
rcpts.append(e)
# Add headers we require
if 'date' not in msg:
msg['Date'] = email.utils.formatdate()
if sender_keyid and config.prefs.openpgp_header:
msg["OpenPGP"] = "id=%s; preference=%s" % (sender_keyid,
config.prefs.openpgp_header)
if 'openpgp' in crypto_policy:
# FIXME: Make a more efficient sign+encrypt wrapper
cleaner = lambda m: CleanMessage(config, m)
if 'sign' in crypto_policy:
msg = OpenPGPMimeSigningWrapper(config,
sender=sender,
cleaner=cleaner,
recipients=rcpts).wrap(msg)
if 'encrypt' in crypto_policy:
msg = OpenPGPMimeEncryptingWrapper(config,
sender=sender,
cleaner=cleaner,
recipients=rcpts).wrap(msg)
rcpts = set([r.rsplit('#', 1)[0] for r in rcpts])
msg['x-mp-internal-readonly'] = str(int(time.time()))
msg['x-mp-internal-sender'] = sender
msg['x-mp-internal-rcpts'] = ', '.join(rcpts)
return (sender, rcpts, msg, events)
MUA_HEADERS = ('date', 'from', 'to', 'cc', 'subject', 'message-id', 'reply-to',
'mime-version', 'content-disposition', 'content-type',
'user-agent', 'list-id', 'list-subscribe', 'list-unsubscribe',
'x-ms-tnef-correlator', 'x-ms-has-attach')
DULL_HEADERS = ('in-reply-to', 'references')
def HeaderPrintHeaders(message):
"""Extract message headers which identify the MUA."""
headers = [k for k, v in message.items()]
# The idea here, is that MTAs will probably either prepend or append
# headers, not insert them in the middle. So we strip things off the
# top and bottom of the header until we see something we are pretty
# comes from the MUA itself.
while headers and headers[0].lower() not in MUA_HEADERS:
headers.pop(0)
while headers and headers[-1].lower() not in MUA_HEADERS:
headers.pop(-1)
# Finally, we return the "non-dull" headers, the ones we think will
# uniquely identify this particular mailer and won't vary too much
# from message-to-message.
return [h for h in headers if h.lower() not in DULL_HEADERS]
def HeaderPrint(message):
"""Generate a fingerprint from message headers which identifies the MUA."""
return b64w(sha1b64('\n'.join(HeaderPrintHeaders(message)))).lower()
class Email(object):
"""This is a lazy-loading object representing a single email."""
def __init__(self, idx, msg_idx_pos,
msg_parsed=None, msg_info=None, ephemeral_mid=None):
self.index = idx
self.config = idx.config
self.msg_idx_pos = msg_idx_pos
self.ephemeral_mid = ephemeral_mid
self.msg_info = msg_info
self.msg_parsed = msg_parsed
def msg_mid(self):
return self.ephemeral_mid or b36(self.msg_idx_pos)
@classmethod
def encoded_hdr(self, msg, hdr, value=None):
hdr_value = value or msg[hdr]
try:
hdr_value.encode('us-ascii')
except:
if hdr.lower() in ('from', 'to', 'cc', 'bcc'):
addrs = []
for addr in [a.strip() for a in hdr_value.split(',')]:
name, part = [], []
words = addr.split()
for w in words:
if w[0] == '<' or '@' in w:
part.append((w, 'us-ascii'))
else:
name.append(w)
if name:
name = ' '.join(name)
try:
part[0:0] = [(name.encode('us-ascii'), 'us-ascii')]
except:
part[0:0] = [(name, 'utf-8')]
addrs.append(email.header.make_header(part).encode())
hdr_value = ', '.join(addrs)
else:
parts = [(hdr_value, 'utf-8')]
hdr_value = email.header.make_header(parts).encode()
return hdr_value
@classmethod
def Create(cls, idx, mbox_id, mbx,
msg_to=None, msg_cc=None, msg_bcc=None, msg_from=None,
msg_subject=None, msg_text=None, msg_references=None,
save=True, ephemeral_mid='not:saved'):
msg = MIMEMultipart()
msg.signature_info = SignatureInfo()
msg.encryption_info = EncryptionInfo()
msg_ts = int(time.time())
if not msg_from:
msg_from = idx.config.get_profile().get('email', None)
from_name = idx.config.get_profile().get('name', None)
if msg_from and from_name:
msg_from = '%s <%s>' % (from_name, msg_from)
if not msg_from:
raise NoFromAddressError()
msg['From'] = cls.encoded_hdr(None, 'from', value=msg_from)
msg['Date'] = email.utils.formatdate(msg_ts)
msg['Message-Id'] = email.utils.make_msgid('mailpile')
msg_subj = (msg_subject or 'New message')
msg['Subject'] = cls.encoded_hdr(None, 'subject', value=msg_subj)
if msg_to:
msg['To'] = cls.encoded_hdr(None, 'to',
value=', '.join(set(msg_to)))
if msg_cc:
msg['Cc'] = cls.encoded_hdr(None, 'cc',
value=', '.join(set(msg_cc)))
if msg_bcc:
msg['Bcc'] = cls.encoded_hdr(None, 'bcc',
value=', '.join(set(msg_bcc)))
if msg_references:
msg['In-Reply-To'] = msg_references[-1]
msg['References'] = ', '.join(msg_references)
if msg_text:
try:
msg_text.encode('us-ascii')
charset = 'us-ascii'
except UnicodeEncodeError:
charset = 'utf-8'
textpart = MIMEText(msg_text, _subtype='plain', _charset=charset)
textpart.signature_info = SignatureInfo()
textpart.encryption_info = EncryptionInfo()
msg.attach(textpart)
del textpart['MIME-Version']
if save:
msg_key = mbx.add(msg)
msg_to = msg_cc = []
msg_ptr = mbx.get_msg_ptr(mbox_id, msg_key)
msg_id = idx.get_msg_id(msg, msg_ptr)
msg_idx, msg_info = idx.add_new_msg(msg_ptr, msg_id, msg_ts,
msg_from, msg_to, msg_cc, 0,
msg_subj, '', [])
idx.set_conversation_ids(msg_info[idx.MSG_MID], msg,
subject_threading=False)
return cls(idx, msg_idx)
else:
msg_info = idx.edit_msg_info(idx.BOGUS_METADATA[:],
msg_mid=ephemeral_mid or '',
msg_id=msg['Message-ID'],
msg_ts=msg_ts,
msg_subject=msg_subj,
msg_from=msg_from,
msg_to=msg_to,
msg_cc=msg_cc)
return cls(idx, -1,
msg_parsed=msg, msg_info=msg_info,
ephemeral_mid=ephemeral_mid)
def is_editable(self):
return (self.ephemeral_mid or
self.config.is_editable_message(self.get_msg_info()))
MIME_HEADERS = ('mime-version', 'content-type', 'content-disposition',
'content-transfer-encoding')
UNEDITABLE_HEADERS = ('message-id', ) + MIME_HEADERS
MANDATORY_HEADERS = ('From', 'To', 'Cc', 'Bcc', 'Subject', 'Encryption')
HEADER_ORDER = {
'in-reply-to': -2,
'references': -1,
'date': 1,
'from': 2,
'subject': 3,
'to': 4,
'cc': 5,
'bcc': 6,
'encryption': 99,
}
def get_editing_strings(self, tree=None):
tree = tree or self.get_message_tree()
strings = {
'from': '', 'to': '', 'cc': '', 'bcc': '', 'subject': '',
'encryption': '', 'attachments': {}
}
header_lines = []
body_lines = []
# We care about header order and such things...
hdrs = dict([(h.lower(), h) for h in tree['headers'].keys()
if h.lower() not in self.UNEDITABLE_HEADERS])
for mandate in self.MANDATORY_HEADERS:
hdrs[mandate.lower()] = hdrs.get(mandate.lower(), mandate)
keys = hdrs.keys()
keys.sort(key=lambda k: (self.HEADER_ORDER.get(k, 99), k))
lowman = [m.lower() for m in self.MANDATORY_HEADERS]
for hdr in [hdrs[k] for k in keys]:
data = tree['headers'].get(hdr, '')
if hdr.lower() in lowman:
strings[hdr.lower()] = unicode(data)
else:
header_lines.append(unicode('%s: %s' % (hdr, data)))
for att in tree['attachments']:
strings['attachments'][att['count']] = (att['filename']
or '(unnamed)')
if not strings['encryption']:
strings['encryption'] = unicode(self.config.prefs.crypto_policy)
def _fixup(t):
try:
return unicode(t)
except UnicodeDecodeError:
return t.decode('utf-8')
strings['headers'] = '\n'.join(header_lines).replace('\r\n', '\n')
strings['body'] = unicode(''.join([_fixup(t['data'])
for t in tree['text_parts']])
).replace('\r\n', '\n')
return strings
def get_editing_string(self, tree=None):
tree = tree or self.get_message_tree()
estrings = self.get_editing_strings(tree)
bits = [estrings['headers']]
for mh in self.MANDATORY_HEADERS:
bits.append('%s: %s' % (mh, estrings[mh.lower()]))
bits.append('')
bits.append(estrings['body'])
return '\n'.join(bits)
def make_attachment(self, fn, filedata=None):
if filedata and fn in filedata:
data = filedata[fn]
else:
data = open(fn, 'rb').read()
ctype, encoding = mimetypes.guess_type(fn)
maintype, subtype = (ctype | |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2020 <NAME> <<EMAIL>>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTI-
# TUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUP-
# TION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY,OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
from threading import RLock
from luxon import exceptions
from luxon.core.logger import GetLogger
from luxon.core.db.base.cursor import Cursor
from luxon.core.db.base.exceptions import Exceptions as BaseExeptions
# LOCALIZE Exceptions to Module as pep-0249
from luxon.core.db.base.exceptions import (Error, Warning,
InterfaceError,
DatabaseError,
DataError,
OperationalError,
IntegrityError,
InternalError,
ProgrammingError,
NotSupportedError)
# Globals as per pep-0249
#########################
# String constant stating the supported DB API level.
# Currently only the strings "1.0" and "2.0" are allowed.
# If not given, a DB-API 1.0 level interface should be assumed.
apilevel = "2.0"
#
# threadsafety
# 0 Threads may not share the module.
# 1 Threads may share the module, but not connections.
# 2 Threads may share the module and connections.
# 3 Threads may share the module, connections and cursors.
threadsafety = 1
# Sharing in the above context means that two threads may use a resource
# without wrapping it using a mutex semaphore to implement resource locking.
# Note that you cannot always make external resources thread safe by managing
# access using a mutex: the resource may rely on global variables or other
# external sources that are beyond your control.
#
# paramstyle
paramstyle = "qmark"
# paramstyle Meaning
# qmark Question mark style, e.g. ...WHERE name=?
# numeric Numeric, positional style, e.g. ...WHERE name=:1
# named Named style, e.g. ...WHERE name=:name
# format ANSI C printf format codes, e.g. ...WHERE name=%s
# pyformat Python extended format codes, e.g. ...WHERE name=%(name)s
log = GetLogger(__name__)
error_map = (
)
cast_map = (
)
class Connection(BaseExeptions):
DB_API = None
CHARSET = 'utf-8'
DEST_FORMAT = None
ERROR_MAP = error_map
CAST_MAP = cast_map
_crsr_cls_args = []
THREADSAFETY = threadsafety
_instances = {}
def __new__(cls, *args, **kwargs):
if cls.THREADSAFETY == 0:
if cls not in Connection._instances:
Connection._instances[cls] = object.__new__(cls)
Connection._instances[cls]._lock = RLock()
Connection._instances[cls]._lock.acquire()
return Connection._instances[cls]
else:
return object.__new__(cls)
def __init__(self, *args, **kwargs):
try:
self._conn = self.DB_API.connect(*args, **kwargs)
self._cached_crsr = None
self._crsr_cls = None
self._cursors = []
except Exception as e:
self._error_handler(self, e, self.ERROR_MAP)
def __repr__(self):
return str(self)
def cursor(self):
"""Return a new Cursor Object using the connection.
If the database does not provide a direct cursor concept, the module
will have to emulate cursors using other means to the extent needed by
this specification.
Reference PEP-0249
"""
crsr = Cursor(self)
self._cursors.append(crsr)
return crsr
@property
def _crsr(self):
if self._cached_crsr is None:
self._cached_crsr = self.cursor()
return self._cached_crsr
@property
def messages(self):
"""Cursor.messages except that the list are connection oriented.
The list is cleared automatically by all standard connection methods
calls (prior to executing the call) to avoid excessive memory usage
and can also be cleared by executing del connection.messages[:].
Warning Message: "DB-API extension connection.messages used"
Reference PEP-0249
"""
raise NotImplementedError()
def execute(self, *args, **kwargs):
"""Prepare and execute a database operation (query or command).
This method is for conveniance and non-standard.
Parameters may be provided as sequence or mapping and will be bound to
variables in the operation. Variables are specified in a
database-specific notation (see the module's paramstyle attribute for
details).
A reference to the operation will be retained by the cursor. If the
same operation object is passed in again, then the cursor can optimize
its behavior. This is most effective for algorithms where the same
operation is used, but different parameters are bound to it (many
times).
For maximum efficiency when reusing an operation, it is best to use the
.setinputsizes() method to specify the parameter types and sizes ahead
of time. It is legal for a parameter to not match the predefined
information; the implementation should compensate, possibly with a loss
of efficiency.
The parameters may also be specified as list of tuples to e.g. insert
multiple rows in a single operation, but this kind of usage is
deprecated: .executemany() should be used instead.
Return values are not defined in the standard. However goal is to
always return a list of rows being dictionary of column/key values in
this "IMPLEMENTATION".
Reference PEP-0249
"""
return self._crsr.execute(*args, **kwargs)
def has_table(self, table):
try:
query = 'SELECT * FROM %s limit 0' % table
self.execute(query)
return True
except exceptions.SQLOperationalError:
# THIS ONE MATCHES FOR SQLLITE3? Kinda wrong.
return False
except exceptions.SQLProgrammingError:
# MYSQL USES THIS ONE
return False
def has_field(self, table, field):
try:
query = 'SELECT %s FROM %s LIMIT 0' % (field,
table)
self.execute(query)
return True
except exceptions.SQLOperationalError:
# THIS ONE MATCHES FOR SQLLITE3? Kinda wrong.
return False
except exceptions.SQLDatabaseError:
# MYSQL USES THIS ONE?
return False
except exceptions.SQLProgrammingError:
# MYSQL USES THIS ONE?
return False
def insert(self, table, data):
"""Insert data into table.
Args:
table (str): Table name.
data (list): List of rows containing values.
"""
self._crsr.insert(table, data)
def clean_up(self):
"""Cleanup server Session.
Auto rollback and Auto commit neccessary for next request to start
new transactions. If not applied select queries will return cached
results.
Pool runs this method to ensure new requests start up in clean state.
"""
self._cached_crsr = None
for crsr in self._cursors[:]:
self._cursors.remove(crsr)
crsr.clean_up()
def close(self):
"""Close the connection
Close the connection now (rather than whenever .__del__() is called).
The connection will be unusable from this point forward; an Error (or
subclass) exception will be raised if any operation is attempted with
the connection. The same applies to all cursor objects trying to use
the connection. Note that closing a connection without committing the
changes first will cause an implicit rollback to be performed.
Reference PEP-0249
"""
self.clean_up()
for crsr in self._cursors:
crsr.close()
try:
self._lock.release()
self._cached_crsr = None
except AttributeError:
# NOTE(cfrademan) Its not got locking so close it..
# locking objects are singleton object.
self._conn.close()
def commit(self):
"""Commit Transactionl Queries.
Commit any pending transaction to the database.
Note that if the database supports an auto-commit feature, this
must be initially off. An interface method may be provided to
turn it back on.
Database modules that do not support transactions should implement
this method with void functionality.
Reference PEP-0249
"""
self._crsr.commit()
def rollback(self):
"""Rollback current transaction.
This method is optional since not all databases provide transaction
support.
In case a database does provide transactions this method causes the
database to roll back to the start of any pending transaction. Closing
a connection without committing the changes first will cause an
implicit rollback to be performed.
Reference PEP-0249
"""
self._crsr.rollback()
def last_row_id(self):
"""Return last row id.
This method returns the value generated for an AUTO_INCREMENT
column by the previous INSERT or UPDATE statement or None
is no column available or rather AUTO_INCREMENT not used.
"""
return self._crsr.lastrowid
def last_row_count(self):
"""Return last row count.
This method returns the number of rows returned for SELECT statements,
or the number of rows affected by DML statements such as INSERT
or UPDATE.
"""
return self._crsr.rowcount
def __enter__(self):
return self
def __exit__(self, *args, | |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to create a Markov Chain Monte Carlo Metropolis step.
@@evolve
@@uniform_random_proposal
@@normal_random_proposal
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
__all__ = [
'evolve',
'uniform_random_proposal',
'normal_random_proposal',
]
def _single_iteration(current_state, current_log_density,
log_unnormalized_prob_fn, proposal_fn, seed=None,
name='None'):
"""Performs a single Metropolis-Hastings step.
Args:
current_state: Float-like `Tensor` (i.e., `dtype` is either
`tf.float16`, `tf.float32` or `tf.float64`) of any shape that can
be consumed by the `log_unnormalized_prob_fn` and `proposal_fn`
callables.
current_log_density: Float-like `Tensor` with `dtype` and shape equivalent
to `log_unnormalized_prob_fn(current_state)`, i.e., matching the result of
`log_unnormalized_prob_fn` invoked at `current_state`.
log_unnormalized_prob_fn: A Python callable evaluated at
`current_state` and returning a float-like `Tensor` of log target-density
up to a normalizing constant. In other words,
`log_unnormalized_prob_fn(x) = log(g(x))`, where
`target_density = g(x)/Z` for some constant `A`. The shape of the input
tensor is the same as the shape of the `current_state`. The shape of the
output tensor is either
(a). Same as the input shape if the density being sampled is one
dimensional, or
(b). If the density is defined for `events` of shape
`event_shape = [E1, E2, ... Ee]`, then the input tensor should be of
shape `batch_shape + event_shape`, where `batch_shape = [B1, ..., Bb]`
and the result must be of shape [B1, ..., Bb]. For example, if the
distribution that is being sampled is a 10 dimensional normal,
then the input tensor may be of shape [100, 10] or [30, 20, 10]. The
last dimension will then be 'consumed' by `log_unnormalized_prob_fn`
and it should return tensors of shape [100] and [30, 20] respectively.
proposal_fn: A callable accepting a real valued `Tensor` of current sample
points and returning a tuple of two `Tensors`. The first element of the
pair is a `Tensor` containing the proposal state and should have
the same shape as the input `Tensor`. The second element of the pair gives
the log of the ratio of the probability of transitioning from the
proposal points to the input points and the probability of transitioning
from the input points to the proposal points. If the proposal is
symmetric (e.g., random walk, where the proposal is either
normal or uniform centered at `current_state`), i.e.,
Probability(Proposal -> Current) = Probability(Current -> Proposal)
the second value should be set to `None` instead of explicitly supplying a
tensor of zeros. In addition to being convenient, this also leads to a
more efficient graph.
seed: `int` or None. The random seed for this `Op`. If `None`, no seed is
applied.
name: Python `str` name prefix for ops managed by this function.
Returns:
next_state: `Tensor` with `dtype` and shape matching `current_state`.
Created by propagating the chain by one step, starting from
`current_state`.
next_log_density: `Tensor` with `dtype` and shape matching
`current_log_density`, which is equal to the value of the unnormalized
`log_unnormalized_prob_fn` computed at `next_state`.
log_accept_ratio: `Tensor` with `dtype` and shape matching
`current_log_density`. Stands for the log of Metropolis-Hastings
acceptance ratio used in generating the `next_state`.
"""
with ops.name_scope(name, 'single_iteration', [current_state]):
# The proposed state and the log of the corresponding Hastings ratio.
proposal_state, log_transit_ratio = proposal_fn(current_state)
# If the log ratio is None, assume that the transitions are symmetric,
# i.e., Prob(Current -> Proposed) = Prob(Proposed -> Current).
if log_transit_ratio is None:
log_transit_ratio = 0.
# Log-density of the proposal state.
proposal_log_density = log_unnormalized_prob_fn(proposal_state)
# Ops to compute the log of the acceptance ratio. Recall that the
# acceptance ratio is: [Prob(Proposed) / Prob(Current)] *
# [Prob(Proposed -> Current) / Prob(Current -> Proposed)]. The log of the
# second term is the log_transit_ratio.
with ops.name_scope('accept_reject'):
# The log of the acceptance ratio.
log_accept_ratio = (proposal_log_density - current_log_density
+ log_transit_ratio)
# A proposal is accepted or rejected depending on the acceptance ratio.
# If the acceptance ratio is greater than 1 then it is always accepted.
# If the acceptance ratio is less than 1 then the proposal is accepted
# with probability = acceptance ratio. As we are working in log space to
# prevent over/underflows, this logic is expressed in log terms below.
# If a proposal is accepted we place a True in the acceptance state
# tensor and if it is to be rejected we place a False.
# The log_draws below have to be compared to the log_accept_ratio so we
# make sure that they have the same data type.
log_draws = math_ops.log(random_ops.random_uniform(
array_ops.shape(current_log_density), seed=seed,
dtype=log_accept_ratio.dtype))
is_proposal_accepted = log_draws < log_accept_ratio
# The acceptance state decides which elements of the current state are to
# be replaced with the corresponding elements in the proposal state.
with ops.name_scope(name, 'metropolis_single_step',
[current_state, current_log_density]):
next_log_density = array_ops.where(is_proposal_accepted,
proposal_log_density,
current_log_density)
next_state = array_ops.where(is_proposal_accepted, proposal_state,
current_state)
return next_state, next_log_density, log_accept_ratio
def evolve(initial_sample,
initial_log_density,
initial_log_accept_ratio,
log_unnormalized_prob_fn,
proposal_fn,
n_steps=1,
seed=None,
name=None):
"""Performs `n_steps` of the Metropolis-Hastings update.
Given a probability density function, `f(x)` and a proposal scheme which
generates new points from old, this `Op` returns a tensor
which may be used to generate approximate samples from the target distribution
using the Metropolis-Hastings algorithm. These samples are from a Markov chain
whose equilibrium distribution matches the target distribution.
The probability distribution may have an unknown normalization constan.
We parameterize the probability density as follows:
```
f(x) = exp(L(x) + constant)
```
Here `L(x)` is any continuous function with an (possibly unknown but finite)
upper bound, i.e. there exists a number beta such that
`L(x)< beta < infinity` for all x. The constant is the normalization needed
to make `f(x)` a probability density (as opposed to just a finite measure).
Although `initial_sample` can be arbitrary, a poor choice may result in a
slow-to-mix chain. In many cases the best choice is the one that maximizes
the target density, i.e., choose `initial_sample` such that
`f(initial_sample) >= f(x)` for all `x`.
If the support of the distribution is a strict subset of R^n (but of non zero
measure), then the unnormalized log-density `L(x)` should return `-infinity`
outside the support domain. This effectively forces the sampler to only
explore points in the regions of finite support.
Usage:
This function is meant to be wrapped up with some of the common proposal
schemes (e.g. random walk, Langevin diffusion etc) to produce a more user
friendly interface. However, it may also be used to create bespoke samplers.
The following example, demonstrates the use to generate a 1000 uniform random
walk Metropolis samplers run in parallel for the normal target distribution.
```python
n = 3 # dimension of the problem
# Generate 1000 initial values randomly. Each of these would be an
# independent starting point for a Markov chain.
state = tf.get_variable(
'state',initializer=tf.random_normal([1000, n], mean=3.0,
dtype=tf.float64, seed=42))
# Computes the log(p(x)) for the unit normal density and ignores the
# normalization constant.
def log_density(x):
return - tf.reduce_sum(x * x, reduction_indices=-1) / 2.0
# Initial log-density value
state_log_density = tf.get_variable(
'state_log_density', initializer=log_density(state.initialized_value()))
# A variable to store the log_acceptance_ratio:
log_acceptance_ratio = tf.get_variable(
'log_acceptance_ratio', initializer=tf.zeros([1000], dtype=tf.float64))
# Generates random proposals by moving each coordinate uniformly and
# independently in a box of size 2 centered around the current value.
# Returns the new point and also the log of the Hastings ratio (the
# ratio of the probability of going from the proposal to origin and the
# probability of the reverse transition). When this | |
\"[[HEADER_RAND]]\"
#include \"[[SOURCE_RAND]]\"
#else
#define gnu_rand rand
#define gnu_srand srand
#endif
#endif
#endif
""")
g_template_include_sdl = Template("""
#include \"SDL.h\"
""")
g_template_include_sndfile = Template("""
#include \"sndfile.h\"
""")
g_template_und_symbols = Template("""
#if defined(__FreeBSD__)
/** Symbol required by libc. */
void *environ SHRINKY_VISIBILITY;
/** Symbol required by libc. */
void *__progname SHRINKY_VISIBILITY;
#endif
""")
########################################
# Functions ############################
########################################
def collect_libraries(libraries, symbols, compilation_mode):
"""Collect libraries to link against from symbols given."""
if not libraries:
if "dlfcn" == compilation_mode:
raise RuntimeError("cannot autodetect libraries for compilation mode '%s'" % compilation_mode)
library_set = set()
for ii in symbols:
library_set = library_set.union(set([ii.get_library().get_name()]))
libraries = list(library_set)
output_message = "Autodetected libraries to link against: "
else:
# Warn if libraries seem to be missing something.
library_set = set()
for ii in symbols:
library_set = library_set.union(set([ii.get_library().get_name()]))
missing_libraries = library_set.difference(set(libraries))
if missing_libraries:
print("WARNING: found symbols suggest libraries: %s" % (str(list(missing_libraries))))
output_message = "Linking against libraries: "
# Reorder libraries to ensure there is no problems with library scouring and UND symbols.
problematic_libraries = ["gcc", "c", "m", "bcm_host"] # Order is important.
front = []
for ii in problematic_libraries:
if ii in libraries:
libraries.remove(ii)
front += [ii]
# Only use renamed library names if constructing the header manually.
if "maximum" == compilation_mode:
ret = [collect_libraries_rename(x) for x in front + sorted(libraries)]
else:
ret = front + sorted(libraries)
if is_verbose():
print("%s%s" % (output_message, str(ret)))
return ret
def collect_libraries_rename(op):
"""Find replacement name for a library if it's problematic."""
# TODO: Remove when FreeBSD/Linux handles libGL.so correctly.
if ("FreeBSD" == g_osname) or ("Linux" == g_osname):
if "GL" == op:
return "libGL.so.1"
return op
def compress_file(compression, pretty, src, dst):
"""Compress a file to be a self-extracting file-dumping executable."""
str_tail = "sed 1,2d"
# Many compos require that the temporary file is removed after running
str_cleanup = ";rm ~;exit"
if pretty:
str_tail = "tail -n+3"
# #!bin/sh is needed when running zsh
if "lzma" == compression:
command = ["xz", "--format=lzma", "--lzma1=preset=9,lc=1,lp=0,nice=273,pb=0", "--stdout"]
header = "#!/bin/sh\nHOME=/tmp/i;%s $0|lzcat>~;chmod +x ~;~%s" % (str_tail, str_cleanup)
elif "raw" == compression:
command = ["xz", "-9", "--extreme", "--format=raw", "--stdout"]
header = "#!/bin/sh\nHOME=/tmp/i;%s $0|xzcat -F raw>~;chmod +x ~;~%s" % (str_tail, str_cleanup)
elif "xz" == compression:
command = ["xz", "--format=xz", "--lzma2=preset=9,lc=1,nice=273,pb=0", "--stdout"]
header = "#!/bin/sh\nHOME=/tmp/i;%s $0|xzcat>~;chmod +x ~;~%s" % (str_tail, str_cleanup)
else:
raise RuntimeError("unknown compression format '%s'" % compression)
(compressed, se) = run_command(command + [src], False)
wfd = open(dst, "wb")
wfd.write((header + "\n").encode())
wfd.write(compressed)
wfd.close()
make_executable(dst)
print("Wrote '%s': %i bytes" % (dst, os.path.getsize(dst)))
def extract_symbol_names(source, prefix):
"""Analyze given preprocessed C source for symbol names."""
symbolre = re.compile(r"[\s:;&\|\<\>\=\^\+\-\*/\(\)\?]" + prefix + "([a-zA-Z0-9_]+)(?=[\s\(])")
results = symbolre.findall(source, re.MULTILINE)
ret = set()
for ii in results:
symbolset = set()
symbolset.add(ii)
ret = ret.union(symbolset)
return ret
def find_library_definition(op):
"""Find library definition with name."""
for ii in g_library_definitions:
if ii.get_name() == op:
return ii
return None
def find_symbol(op):
"""Find single symbol with name."""
for ii in g_library_definitions:
ret = ii.find_symbol(op)
if ret:
return ret
raise RuntimeError("symbol '%s' not known, please add it to the script" % (op))
def find_symbols(lst):
"""Find symbol object(s) corresponding to symbol string(s)."""
ret = []
for ii in lst:
ret += [find_symbol(ii)]
return ret
def generate_binary_minimal(source_file, compiler, assembler, linker, objcopy, elfling, libraries, output_file,
additional_sources=[]):
"""Generate a binary using all possible tricks. Return whether or not reprocess is necessary."""
if source_file:
compiler.compile_asm(source_file, output_file + ".S", True)
segment_ehdr = AssemblerSegment(g_assembler_ehdr)
if osarch_is_32_bit():
segment_phdr_dynamic = AssemblerSegment(g_assembler_phdr32_dynamic)
segment_phdr_interp = AssemblerSegment(g_assembler_phdr32_interp)
elif osarch_is_64_bit():
segment_phdr_dynamic = AssemblerSegment(g_assembler_phdr64_dynamic)
segment_phdr_interp = AssemblerSegment(g_assembler_phdr64_interp)
else:
raise_unknown_address_size()
segment_dynamic = AssemblerSegment(g_assembler_dynamic)
segment_hash = AssemblerSegment(g_assembler_hash)
segment_interp = AssemblerSegment(g_assembler_interp)
segment_strtab = AssemblerSegment(g_assembler_strtab)
segment_symtab = AssemblerSegment(g_assembler_symtab)
# There may be symbols necessary for addition.
und_symbols = get_platform_und_symbols()
if is_listing(und_symbols):
segment_symtab.add_symbol_empty()
for ii in und_symbols:
segment_symtab.add_symbol_und(ii)
for ii in reversed(und_symbols):
segment_strtab.add_strtab(ii)
segment_dynamic.add_dt_symtab("symtab")
segment_dynamic.add_dt_hash("hash")
segment_hash.add_hash(und_symbols)
else:
segment_dynamic.add_dt_symtab(0)
# Add libraries.
for ii in reversed(libraries):
library_name = linker.get_library_name(ii)
segment_dynamic.add_dt_needed(library_name)
segment_strtab.add_strtab(library_name)
# Assembler file generation is more complex when elfling is enabled.
if elfling:
asm = generate_elfling(output_file, compiler, elfling, definition_ld)
else:
asm = AssemblerFile(output_file + ".S")
# Additional sources may have been specified, add them.
if additional_sources:
for ii in range(len(additional_sources)):
fname = additional_sources[ii]
additional_asm = AssemblerFile(fname)
asm.incorporate(additional_asm)
# Assemble content without headers to check for missing symbols.
if asm.write(output_file + ".S", assembler):
assembler.assemble(output_file + ".S", output_file + ".o")
extra_symbols = readelf_list_und_symbols(output_file + ".o")
additional_file = g_symbol_sources.compile_asm(compiler, assembler, extra_symbols, output_file + ".extra")
# If additional code was needed, add it to our asm source.
if additional_file:
additional_asm = AssemblerFile(additional_file)
asm.incorporate(additional_asm, re.sub(r'[\/\.]', '_', output_file + "_extra"))
# Sort sections after generation.
asm.sort_sections(assembler)
# May be necessary to have two PT_LOAD headers as opposed to one.
bss_section = asm.generate_fake_bss(assembler, und_symbols, elfling)
if 0 < bss_section.get_alignment():
replace_platform_variable("phdr_count", 4)
if osarch_is_32_bit():
segment_phdr_load_double = AssemblerSegment(g_assembler_phdr32_load_double)
segment_phdr_load_bss = AssemblerSegment(g_assembler_phdr32_load_bss)
elif osarch_is_64_bit():
segment_phdr_load_double = AssemblerSegment(g_assembler_phdr64_load_double)
segment_phdr_load_bss = AssemblerSegment(g_assembler_phdr64_load_bss)
else:
raise_unknown_address_size()
load_segments = [segment_phdr_load_double, segment_phdr_load_bss]
else:
if osarch_is_32_bit():
segment_phdr_load_single = AssemblerSegment(g_assembler_phdr32_load_single)
elif osarch_is_64_bit():
segment_phdr_load_single = AssemblerSegment(g_assembler_phdr64_load_single)
else:
raise_unknown_address_size()
load_segments = [segment_phdr_load_single]
# Collapse headers.
segments_head = [segment_ehdr, segment_phdr_interp]
segments_tail = [segment_phdr_dynamic]
if is_listing(und_symbols):
segments_tail += [segment_hash]
segments_tail += [segment_dynamic]
if is_listing(und_symbols):
segments_tail += [segment_symtab]
segments_tail += [segment_interp, segment_strtab]
segments = merge_segments(segments_head) + load_segments + merge_segments(segments_tail)
# Create content of earlier sections and write source when done.
if asm.hasSectionAlignment():
asm.getSectionAlignment().create_content(assembler)
bss_section.create_content(assembler, "end")
# Write headers out first.
fname = output_file + ".final"
fd = open(fname + ".S", "w")
header_sizes = 0
for ii in segments:
ii.write(fd, assembler)
header_sizes += ii.size()
if is_verbose():
print("Size of headers: %i bytes" % (header_sizes))
# Write content after headers.
asm.write(fd, assembler)
fd.close()
if is_verbose():
print("Wrote assembler source: '%s'" % (fname + ".S"))
# Assemble headers
assembler.assemble(fname + ".S", fname + ".o")
link_files = [fname + ".o"]
# Link all generated files.
linker.generate_linker_script(output_file + ".ld", True)
linker.set_linker_script(output_file + ".ld")
linker.link_binary(link_files, output_file + ".bin")
run_command([objcopy, "--output-target=binary", output_file + ".bin", output_file + ".unprocessed"])
if bss_section.get_alignment():
readelf_zero(output_file + ".unprocessed", output_file + ".stripped")
else:
readelf_truncate(output_file + ".unprocessed", output_file + ".stripped")
def generate_elfling(output_file, compiler, elfling, definition_ld):
"""Generate elfling stub."""
elfling.write_c_source(output_file + ".elfling.cpp", definition_ld)
compiler.compile_asm(output_file + ".elfling.cpp", output_file + ".elfling.S")
asm = AssemblerFile(output_file + ".elfling.S")
additional_asm = AssemblerFile(output_file + ".S")
# Entry point is used as compression start information.
elfling_align = int(PlatformVar("memory_page"))
if elfling.has_data():
alignment_section = AssemblerSectionAlignment(elfling_align, ELFLING_PADDING, ELFLING_OUTPUT, "end")
set_program_start("_start")
else:
alignment_section = AssemblerSectionAlignment(elfling_align, ELFLING_PADDING, ELFLING_OUTPUT)
set_program_start(ELFLING_OUTPUT)
asm.add_sections(alignment_section)
asm.incorporate(additional_asm, "_incorporated", ELFLING_UNCOMPRESSED)
return asm
def generate_glsl(filenames, preprocessor, definition_ld, mode, inlines, renames, simplifys):
"""Generate GLSL, processing given GLSL source files."""
glsl_db = Glsl()
for ii in filenames:
# If there's a listing, the order is filename, varname, output name.
if is_listing(ii):
if 3 == len(ii):
glsl_db.read(preprocessor, definition_ld, ii[0], ii[1], ii[2])
elif 2 == len(ii):
varname = re.sub(r'\.', r'_', os.path.basename(ii[0]))
glsl_db.read(preprocessor, definition_ld, ii[0], varname, ii[1])
else:
raise RuntimeError("invalid glsl file listing input: '%s'" % (str(ii)))
# Otherwise only filename exists.
else:
varname = re.sub(r'\.', r'_', os.path.basename(ii))
glsl_db.read(preprocessor, definition_ld, ii, varname)
glsl_db.parse()
glsl_db.crunch(mode, inlines, renames, simplifys)
return glsl_db
def generate_glsl_extract(fname, preprocessor, definition_ld, mode, inlines, renames, simplifys):
"""Generate GLSL, extracting from source file."""
src_path, src_basename = os.path.split(fname)
if src_path:
src_path += "/"
fd = open(fname, "r")
lines = fd.readlines()
fd.close()
filenames = []
glslre = re.compile(r'#\s*include [\<\"](.*\.glsl)\.(h|hh|hpp|hxx)[\>\"]\s*(\/\*|\/\/)\s*([^\*\/\s]+)', re.I)
for ii in lines:
match = glslre.match(ii)
if match:
glsl_path, glsl_base_filename = os.path.split(match.group(1))
# Try with base path of source file first to limit location.
glsl_filename = locate(src_path + glsl_path, glsl_base_filename)
if not glsl_filename and src_base_path:
glsl_filename = locate(glsl_path, glsl_base_filename)
if not glsl_filename:
raise RuntimeError("could not locate GLSL source '%s'" % (glsl_base_filename))
glsl_varname = match.group(4)
glsl_output_name = glsl_filename + "." + match.group(2)
filenames += [[glsl_filename, glsl_varname, glsl_output_name]]
if filenames:
glsl_db = generate_glsl(filenames, preprocessor, definition_ld, mode, inlines, renames, simplifys)
glsl_db.write()
def generate_include_rand(implementation_rand, target_search_path, definition_ld):
"""Generates the rand()/srand() include."""
regex_rand_header = re.compile(r'%s[-_\s]+rand\.h(h|pp|xx)?' % (implementation_rand))
regex_rand_source = re.compile(r'%s[-_\s]+rand\.c(c|pp|xx)?' % (implementation_rand))
header_rand = locate(target_search_path, regex_rand_header)
source_rand = locate(target_search_path, regex_rand_source)
if (not header_rand) or (not source_rand):
raise RuntimeError("could not find rand implementation for '%s'" % (implementation_rand))
header_rand_path, header_rand = os.path.split(header_rand)
source_rand_path, source_rand = os.path.split(source_rand)
if is_verbose:
print("Using rand() implementation: '%s'" % (header_rand))
replace_platform_variable("function_rand", "%s_rand" % (implementation_rand))
replace_platform_variable("function_srand", "%s_srand" % (implementation_rand))
rand_type_bsd = str(int(implementation_rand == "bsd"))
rand_type_gnu = str(int(implementation_rand == "gnu"))
return g_template_include_rand.format({"DEFINITION_LD": definition_ld,
"RAND_TYPE_BSD": rand_type_bsd, "RAND_TYPE_GNU": rand_type_gnu,
"HEADER_RAND": header_rand, "SOURCE_RAND": source_rand})
def get_platform_und_symbols():
"""Get the UND symbols required for this platform."""
ret = None
if osname_is_freebsd():
ret | |
<filename>ags-ds2.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""ags-ds2.py: A German Spy's Devil Summoner 2 ellipses challenge."""
__author__ = "TetrisFinalBoss"
__version__ = "0.4.3"
import sys
import cv2
import numpy
import pafy
import re
import os.path
import getopt
AGS_DS2_PLAYLIST = 'PL_ftpUY_ldBTtHOUQLt5irghX1XfIzoy-'
def getFile(media):
for stream in media.streams:
if stream.dimensions[1] == 360 and stream.extension=='mp4':
m = re.search('\[Part\s?([\d]+)(\s-\sFinal)?\]',media.title)
fname = "%s - %s.%s"%(m.group(1),media.videoid,stream.extension)
if not os.path.isfile(fname):
print 'Downloading video %s from playlist'%(m.group(1))
stream.download(fname)
else:
print "File '%s' already exists, skipping download"%(fname)
return fname
def tsum(t1,t2):
return tuple(map(lambda x,y: x + y,t1,t2))
class DialogLocator:
D_MATCHMINIMUM = 0.5
D_WINDOW = {'left':396, 'top':61, 'right':418, 'bottom':84}
def __init__(self):
self.__dialogPattern = cv2.imread(os.path.dirname(sys.argv[0]) + "/dialog_pattern.png",cv2.IMREAD_GRAYSCALE)
def locate(self, img):
res = cv2.matchTemplate(img[self.D_WINDOW['top']:self.D_WINDOW['bottom'],self.D_WINDOW['left']:self.D_WINDOW['right']],
self.__dialogPattern,
cv2.TM_SQDIFF_NORMED)
min_val = cv2.minMaxLoc(res)[0]
if min_val>self.D_MATCHMINIMUM:
return False
return True
class SomethingExplainedDetector:
MATCHMINIMUM = 0.4
QUOT_WINDOW = {'left':20, 'top':15, 'right':50, 'bottom':42}
EXPL_WINDOW = {'left':20, 'top':15, 'right':400, 'bottom':42}
def __init__(self):
self.__count = 0
self.__ncount = 0
self.__quotPattern = cv2.imread(os.path.dirname(sys.argv[0]) + "/quot_pattern.png",cv2.IMREAD_GRAYSCALE)
self.__explPattern = cv2.imread(os.path.dirname(sys.argv[0]) + "/expl_pattern.png",cv2.IMREAD_GRAYSCALE)
self.__quotDim = self.__quotPattern.shape
self.__explDim = self.__explPattern.shape
def detect(self, img):
ret = []
# Search for patterns, first quotation mark
res = cv2.matchTemplate(img[self.QUOT_WINDOW['top']:self.QUOT_WINDOW['bottom'],
self.QUOT_WINDOW['left']:self.QUOT_WINDOW['right']],
self.__quotPattern,
cv2.TM_SQDIFF_NORMED)
minmax = cv2.minMaxLoc(res)
if minmax[0] < self.MATCHMINIMUM:
top_left = tsum(minmax[2], (self.QUOT_WINDOW['left'],self.QUOT_WINDOW['top']))
bottom_right = tsum(top_left, (self.__quotDim[1],self.__quotDim[0]))
ret.append((top_left,bottom_right))
else:
# No new objects, but __count stays the same until dialog is over
self.__ncount = 0
return ret
# Second 'explained' word
res = cv2.matchTemplate(img[self.EXPL_WINDOW['top']:self.EXPL_WINDOW['bottom'],
self.EXPL_WINDOW['left']:self.EXPL_WINDOW['right']],
self.__explPattern,
cv2.TM_SQDIFF_NORMED)
minmax = cv2.minMaxLoc(res)
if minmax[0] < self.MATCHMINIMUM:
top_left = tsum(minmax[2], (self.EXPL_WINDOW['left'],self.EXPL_WINDOW['top']))
bottom_right = tsum(top_left, (self.__explDim[1],self.__explDim[0]))
ret.append((top_left,bottom_right))
else:
# No new objects, but __count stays the same until dialog is over
self.__ncount = 0
return ret
# Both are found, mess with counters
self.__ncount = self.__count==0 and 1 or 0
self.__count = 1
return ret
def reset(self):
self.__count = 0
self.__ncount = 0
def dialogClosed(self):
# If no dialog arrow is found reset all values
self.__ncount = 0
self.__count = 0
def name(self):
return "'someone explained something'"
def uniqueObjects(self):
return False
def objectsCount(self):
return self.__count
def newObjectsCount(self):
return self.__ncount
class CircumstancesExplainedDetector:
MATCHMINIMUM = 0.4
SEARCH_WINDOW = {'left':10, 'top':10, 'right':400, 'bottom':84}
def __init__(self):
self.__count = 0
self.__ncount = 0
self.__pobj = []
self.__ec1Pattern = cv2.imread(os.path.dirname(sys.argv[0]) + "/etc_pattern.png",cv2.IMREAD_GRAYSCALE)
self.__ec1Dim = self.__ec1Pattern.shape
def detect(self, img):
ret = self.__pobj
# Search for pattern
res = cv2.matchTemplate(img[self.SEARCH_WINDOW['top']:self.SEARCH_WINDOW['bottom'],
self.SEARCH_WINDOW['left']:self.SEARCH_WINDOW['right']],
self.__ec1Pattern,
cv2.TM_SQDIFF_NORMED)
minmax = cv2.minMaxLoc(res)
if minmax[0] < self.MATCHMINIMUM:
top_left = tsum(minmax[2], (self.SEARCH_WINDOW['left'],self.SEARCH_WINDOW['top']))
bottom_right = tsum(top_left, (self.__ec1Dim[1],self.__ec1Dim[0]))
ret = [(top_left,bottom_right)]
self.__pobj = ret
self.__ncount = self.__count==0 and 1 or 0
self.__count = 1
else:
# Nothing is found, but if we've already found something in this dialog box
# let's assume that this object is still present, because this detector is blocking one
self.__count = len(self.__pobj)
self.__ncount = 0
return ret
def reset(self):
self.__count = 0
self.__ncount = 0
self.__pobj = []
def dialogClosed(self):
# If no dialog arrow is found reset all values
self.__ncount = 0
self.__count = 0
self.__pobj = []
def name(self):
return "'explained the circumstances'"
def uniqueObjects(self):
return True
def objectsCount(self):
return self.__count
def newObjectsCount(self):
return self.__ncount
class MeaningfulSilenceDetector:
MATCHMINIMUM = 0.4
PATTERN_SIZE = (16,60,1)
PATTERN_OFFSET = {'x':18,'y':10}
SEARCH_WINDOW = {'left':10, 'top':10, 'right':110, 'bottom':40}
PATTERN_COLOR = 127
def __init__(self):
self.__pattern = numpy.zeros(self.PATTERN_SIZE, numpy.uint8)
for i in xrange(6):
cv2.rectangle(self.__pattern,
(self.PATTERN_OFFSET['x']+7*i,self.PATTERN_OFFSET['y']),
(self.PATTERN_OFFSET['x']+1+7*i,self.PATTERN_OFFSET['y']+1),
self.PATTERN_COLOR,
-1)
self.__count = 0
self.__ncount = 0
self.__pobj = []
def detect(self, img):
# Set "default" return value to previously found object in this dialog entry
# which is reset to [] when dialog is closed
ret = self.__pobj
# Search for pattern
res = cv2.matchTemplate(img[self.SEARCH_WINDOW['top']:self.SEARCH_WINDOW['bottom'],
self.SEARCH_WINDOW['left']:self.SEARCH_WINDOW['right']],
self.__pattern,
cv2.TM_SQDIFF_NORMED)
# There can be only one "......" in dialog, so we totally fine with global minimum
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if min_val < self.MATCHMINIMUM:
top_left = tsum(min_loc, (self.SEARCH_WINDOW['left'],self.SEARCH_WINDOW['top']))
bottom_right = tsum(top_left, (self.PATTERN_SIZE[1],self.PATTERN_SIZE[0]))
# Something is found, set return value and store this object for future use
ret = [(top_left,bottom_right)]
self.__pobj = ret
self.__ncount = self.__count==0 and 1 or 0
self.__count = 1
else:
# Nothing is found, but if we've already found something in this dialog box
# let's assume that this object is still present
self.__count = len(self.__pobj)
self.__ncount = 0
return ret
def dialogClosed(self):
self.__count = 0
self.__ncount = 0
self.__pobj = []
def reset(self):
self.__count = 0
self.__ncount = 0
self.__pobj = []
def name(self):
return "'......'"
def uniqueObjects(self):
return True
def objectsCount(self):
return self.__count
def newObjectsCount(self):
return self.__ncount
class MidSentenceEllipsesDetector:
MATCHMINIMUM = 0.5
PATTERN_SIZE = (8,20,1)
PATTERN_OFFSET = {'x':1,'y':3}
PATTERN_COLOR = 127
def __init__(self):
self.__pattern = numpy.zeros(self.PATTERN_SIZE, numpy.uint8)
for i in xrange(3):
cv2.rectangle(self.__pattern,
(self.PATTERN_OFFSET['x']+7*i,self.PATTERN_OFFSET['y']),
(self.PATTERN_OFFSET['x']+1+7*i,self.PATTERN_OFFSET['y']+1),
self.PATTERN_COLOR,
-1)
self.__ncount = 0
self.__count = 0
def detect(self, img):
ret = []
res = cv2.matchTemplate(img,self.__pattern,cv2.TM_SQDIFF_NORMED)
# For each row in dialog do recursive search for global minimums
def localMinInRow(row,offset):
# Current dimensions
h,w = row.shape
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(row)
if min_val < self.MATCHMINIMUM:
x,y = min_loc
# Recalculate absolute position and append value
min_loc = tsum(min_loc,offset)
ret.append((min_loc,tsum(min_loc, (self.PATTERN_SIZE[1],self.PATTERN_SIZE[0]))))
# Add threshold around this point
mthresh = self.PATTERN_SIZE[1]
# Now search minimums in left region
if x-mthresh>self.PATTERN_SIZE[1]:
localMinInRow(row[0:h,0:x-mthresh],offset)
# And in right region
if w-x-mthresh > self.PATTERN_SIZE[1]:
localMinInRow(row[0:h,x+mthresh:w],tsum(offset,(x+mthresh,0)))
for i in xrange(3):
yoff = 20+i*20+4
localMinInRow(res[yoff:yoff+18,20:400],(20,yoff))
# Sometimes objects may be lost and caught again later
# Let's try to address this issue
l = len(ret)
# Get new objects count
self.__ncount = l - self.__count
if self.__ncount<0:
self.__ncount = 0
# Store object count, but assuming, that objects can't disappear
# during same dialog line, so it alway stays at maximum level
self.__count = max(l,self.__count)
return ret
def reset(self):
self.__ncount = 0
self.__count = 0
def dialogClosed(self):
self.__ncount = 0
self.__count = 0
def name(self):
return "'...'"
def uniqueObjects(self):
return False
def objectsCount(self):
return self.__count
def newObjectsCount(self):
return self.__ncount
class EllipsesSearcher:
# Threshold values
THRESHOLD_VALUE = 90
THRESHOLD_COLOR = 127
# Dialog box window
DIALOG = {'left':104,'top':248,'right':538,'bottom':340}
# Dialog box highlight
DIALOG_HIGHLIGHT = {'lt': (1,1), 'br': (432, 90)}
def __init__(self):
# Init detectors
self.__detectors = []
self.__detectors.append(MeaningfulSilenceDetector())
self.__detectors.append(MidSentenceEllipsesDetector())
self.__detectors.append(CircumstancesExplainedDetector())
self.__detectors.append(SomethingExplainedDetector())
# Init dialog locator
self.__dialogLocator = DialogLocator()
# Reset other values
self.__total = len(self.__detectors)*[0]
self.__frames = len(self.__detectors)*[0]
self.snapshots = False
self.statFile = None
self.useStatFile = False
self.ignoreStat = False
self.preview = False
self.detectorMask = 0xff
def __thresh(self,img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, t = cv2.threshold(gray, self.THRESHOLD_VALUE, self.THRESHOLD_COLOR, cv2.THRESH_TOZERO)
return t
def __writeUserStatObject(self, det, m, s):
if self.useStatFile:
self.statFile.write("%s is found at %s:%s\n"%(det,m,s))
self.statFile.flush()
def __writeUserStatHeader(self, fname):
if self.useStatFile:
self.statFile.write("===\n%s\n===\n"%(fname))
self.statFile.flush()
def __writeUserStatTotal(self, lst):
if self.useStatFile:
self.statFile.write("===\n")
for e in lst:
self.statFile.write("%s is said %d times (%d frames)\n"%e)
self.statFile.write("\n")
self.statFile.flush()
def __readStat(self,fname):
if self.ignoreStat:
return False
try:
statfile = open('statistics/'+fname+'.stat','r')
count = len(self.__detectors)*[0]
frames = len(self.__detectors)*[0]
for ln in statfile.readlines():
m = re.search('OBJECT\s([\d]+)\s([\d]+):([\d]+)',ln)
if m:
# Last parameter is object type - i.e. detector number
det = int(m.group(1))
self.__writeUserStatObject(self.__detectors[det].name(),m.group(2),m.group(3))
# And increase counter
count[det]+=1
continue
m = re.search('FRAMES\s([\d]+)\s([\d]+)',ln)
if m:
frames[int(m.group(1))]+=int(m.group(2))
continue
statfile.close()
# Increase total value
self.__total = map(lambda x,y: x+y, count, self.__total)
self.__frames = map(lambda x,y: x+y, frames, self.__frames)
# Display progress
print "Reading statistics from file: Done - %d objects detected"%(sum(count))
# And also write to user specified file
self.__writeUserStatTotal(zip(map(lambda x: x.name(), self.__detectors), count, frames))
# And that's it, this file is done
return True
except (OSError, IOError):
return False
def count(self,fname):
self.__writeUserStatHeader(fname)
# First - try to get statistics from file,
# so we don't have to recalculate stats once again
if self.__readStat(fname):
return
count = len(self.__detectors)*[0]
frames = | |
agent_id in range(self.num_agents):
self.info['explored_reward'].append(agent_explored_area[agent_id])
self.info['explored_ratio'].append(agent_explored_ratio[agent_id])
if self.timestep % self.args.num_local_steps == 0:
agents_explored_map = np.maximum(agents_explored_map, self.transform(self.current_explored_gt[agent_id], agent_id))
if self.timestep % self.args.num_local_steps == 0 and self.merge_ratio < self.explored_ratio_threshold and self.use_repeat_penalty:
self.info['merge_explored_reward'] -= (agents_explored_map[self.prev_merge_exlored_map == 1].sum() * (25./10000) * 0.02)
self.prev_merge_exlored_map = curr_merge_explored_map
self.save_position()
if self.info['time'][0] >= self.args.max_episode_length:
done = [True for _ in range(self.num_agents)]
if self.merge_ratio >= self.explored_ratio_threshold and self.use_complete_reward:
self.info['merge_explored_reward'] += 1.0
if self.args.save_trajectory_data:
self.save_trajectory_data()
else:
done = [False for _ in range(self.num_agents)]
return state, rew, done, self.info
def get_reward_range(self):
# This function is not used, Habitat-RLEnv requires this function
return (0., 1.0)
def get_reward(self, observations, agent_id):
# This function is not used, Habitat-RLEnv requires this function
return 0.
def get_global_reward(self):
agent_explored_rewards = []
agent_explored_ratios = []
# calculate individual reward
curr_merge_explored_map = np.zeros_like(self.explored_map[0]) # global
merge_explorable_map = np.zeros_like(self.explored_map[0]) # global
for agent_id in range(self.num_agents):
curr_agent_explored_map = self.explored_map[agent_id] * self.explorable_map[agent_id]
curr_merge_explored_map = np.maximum(curr_merge_explored_map, self.transform(curr_agent_explored_map, agent_id))
merge_explorable_map = np.maximum(merge_explorable_map, self.transform(self.explorable_map[agent_id], agent_id))
curr_agent_explored_area = curr_agent_explored_map.sum()
agent_explored_reward = (curr_agent_explored_area - self.prev_explored_area[agent_id]) * 1.0
self.prev_explored_area[agent_id] = curr_agent_explored_area
# converting to m^2 * Reward Scaling 0.02 * reward time penalty
agent_explored_rewards.append(agent_explored_reward * (25./10000) * 0.02 * self.reward_gamma)
reward_scale = self.explorable_map[agent_id].sum()
agent_explored_ratios.append(agent_explored_reward/reward_scale)
# calculate merge reward
curr_merge_explored_area = curr_merge_explored_map.sum()
merge_explored_reward_scale = merge_explorable_map.sum()
merge_explored_reward = (curr_merge_explored_area - self.prev_merge_explored_area) * 1.0
self.prev_merge_explored_area = curr_merge_explored_area
merge_explored_ratio = merge_explored_reward / merge_explored_reward_scale
merge_explored_reward = merge_explored_reward * (25./10000.) * 0.02 * self.reward_gamma
if self.use_time_penalty:
self.reward_gamma *= self.reward_decay
return agent_explored_rewards, agent_explored_ratios, merge_explored_reward, merge_explored_ratio, curr_merge_explored_map
def get_done(self, observations, agent_id):
# This function is not used, Habitat-RLEnv requires this function
return False
def get_info(self, observations, agent_id):
# This function is not used, Habitat-RLEnv requires this function
info = {}
return info
def seed(self, seed):
self._env.seed(seed)
self.rng = np.random.RandomState(seed)
def get_spaces(self):
return self.observation_space, self.action_space
def build_mapper(self):
params = {}
params['frame_width'] = self.args.env_frame_width
params['frame_height'] = self.args.env_frame_height
params['fov'] = self.args.hfov
params['resolution'] = self.map_resolution
params['map_size_cm'] = self.map_size_cm
params['agent_min_z'] = 25
params['agent_max_z'] = 150
params['agent_height'] = self.args.camera_height * 100
params['agent_view_angle'] = 0
params['du_scale'] = self.args.du_scale
params['vision_range'] = self.args.vision_range
params['visualize'] = self.use_render
params['obs_threshold'] = self.args.obs_threshold
params['num_local_steps'] = self.args.num_local_steps
self.selem = skimage.morphology.disk(self.args.obstacle_boundary /
self.map_resolution)
mapper = MapBuilder(params)
return mapper
def get_sim_location(self, agent_id):
agent_state = super().habitat_env.sim.get_agent_state(agent_id)
x = -agent_state.position[2]
y = -agent_state.position[0]
axis = quaternion.as_euler_angles(agent_state.rotation)[0]
if (axis % (2*np.pi)) < 0.1 or (axis % (2*np.pi)) > 2*np.pi - 0.1:
o = quaternion.as_euler_angles(agent_state.rotation)[1]
else:
o = 2*np.pi - quaternion.as_euler_angles(agent_state.rotation)[1]
if o > np.pi:
o -= 2 * np.pi
return x, y, o
def get_gt_pose_change(self, agent_id):
curr_sim_pose = self.get_sim_location(agent_id)
dx, dy, do = pu.get_rel_pose_change(
curr_sim_pose, self.last_sim_location[agent_id])
self.last_sim_location[agent_id] = curr_sim_pose
return dx, dy, do
def get_base_pose_change(self, action, gt_pose_change):
dx_gt, dy_gt, do_gt = gt_pose_change
if action == 1: # Forward
x_err, y_err, o_err = self.sensor_noise_fwd.sample()[0][0]
elif action == 3: # Right
x_err, y_err, o_err = self.sensor_noise_right.sample()[0][0]
elif action == 2: # Left
x_err, y_err, o_err = self.sensor_noise_left.sample()[0][0]
else: # Stop
x_err, y_err, o_err = 0., 0., 0.
x_err = x_err * self.args.noise_level
y_err = y_err * self.args.noise_level
o_err = o_err * self.args.noise_level
return dx_gt + x_err, dy_gt + y_err, do_gt + np.deg2rad(o_err)
def transform(self, inputs, agent_id):
inputs = torch.from_numpy(inputs)
n_rotated = F.grid_sample(inputs.unsqueeze(0).unsqueeze(
0).float(), self.n_rot[agent_id].float(), align_corners=True)
n_map = F.grid_sample(
n_rotated.float(), self.n_trans[agent_id].float(), align_corners=True)
n_map = n_map[0, 0, :, :].numpy()
return n_map
def get_short_term_goal(self, inputs):
args = self.args
self.extrinsic_rew = []
self.intrinsic_rew = []
self.relative_angle = []
def discretize(dist):
dist_limits = [0.25, 3, 10]
dist_bin_size = [0.05, 0.25, 1.]
if dist < dist_limits[0]:
ddist = int(dist/dist_bin_size[0])
elif dist < dist_limits[1]:
ddist = int((dist - dist_limits[0])/dist_bin_size[1]) + \
int(dist_limits[0]/dist_bin_size[0])
elif dist < dist_limits[2]:
ddist = int((dist - dist_limits[1])/dist_bin_size[2]) + \
int(dist_limits[0]/dist_bin_size[0]) + \
int((dist_limits[1] - dist_limits[0])/dist_bin_size[1])
else:
ddist = int(dist_limits[0]/dist_bin_size[0]) + \
int((dist_limits[1] - dist_limits[0])/dist_bin_size[1]) + \
int((dist_limits[2] - dist_limits[1])/dist_bin_size[2])
return ddist
# Get Map prediction
map_pred = inputs['map_pred']
exp_pred = inputs['exp_pred']
output = [np.zeros((args.goals_size + 1))
for _ in range(self.num_agents)]
for agent_id in range(self.num_agents):
grid = np.rint(map_pred[agent_id])
explored = np.rint(exp_pred[agent_id])
# Get pose prediction and global policy planning window
start_x, start_y, start_o, gx1, gx2, gy1, gy2 = inputs['pose_pred'][agent_id]
gx1, gx2, gy1, gy2 = int(gx1), int(gx2), int(gy1), int(gy2)
planning_window = [gx1, gx2, gy1, gy2]
# Get last loc
last_start_x, last_start_y = self.last_loc[agent_id][0], self.last_loc[agent_id][1]
r, c = last_start_y, last_start_x
last_start = [int(r * 100.0/self.map_resolution - gx1),
int(c * 100.0/self.map_resolution - gy1)]
last_start = pu.threshold_poses(last_start, grid.shape)
# Get curr loc
self.curr_loc[agent_id] = [start_x, start_y, start_o]
r, c = start_y, start_x
start = [int(r * 100.0/self.map_resolution - gx1),
int(c * 100.0/self.map_resolution - gy1)]
start = pu.threshold_poses(start, grid.shape)
# TODO: try reducing this
self.visited[agent_id][gx1:gx2, gy1:gy2][start[0]-2:start[0]+3,
start[1]-2:start[1]+3] = 1
steps = 25 # ! wrong
for i in range(steps):
x = int(last_start[0] + (start[0] -
last_start[0]) * (i+1) / steps)
y = int(last_start[1] + (start[1] -
last_start[1]) * (i+1) / steps)
self.visited_vis[agent_id][gx1:gx2, gy1:gy2][x, y] = 1
# Get last loc ground truth pose
last_start_x, last_start_y = self.last_loc_gt[agent_id][0], self.last_loc_gt[agent_id][1]
r, c = last_start_y, last_start_x
last_start = [int(r * 100.0/self.map_resolution),
int(c * 100.0/self.map_resolution)]
last_start = pu.threshold_poses(
last_start, self.visited_gt[agent_id].shape)
# Get ground truth pose
start_x_gt, start_y_gt, start_o_gt = self.curr_loc_gt[agent_id]
r, c = start_y_gt, start_x_gt
start_gt = [int(r * 100.0/self.map_resolution),
int(c * 100.0/self.map_resolution)]
start_gt = pu.threshold_poses(start_gt, self.visited_gt[agent_id].shape)
steps = 25 # ! wrong
for i in range(steps):
x = int(last_start[0] + (start_gt[0] -
last_start[0]) * (i+1) / steps)
y = int(last_start[1] + (start_gt[1] -
last_start[1]) * (i+1) / steps)
self.visited_gt[agent_id][x, y] = 1
# Get goal
goal = inputs['goal'][agent_id]
goal = pu.threshold_poses(goal, grid.shape)
# Get intrinsic reward for global policy
# Negative reward for exploring explored areas i.e.
# for choosing explored cell as long-term goal
self.extrinsic_rew.append(-pu.get_l2_distance(10, goal[0], 10, goal[1]))
self.intrinsic_rew.append(-exp_pred[agent_id][goal[0], goal[1]])
# Get short-term goal
stg = self._get_stg(grid, explored, start, np.copy(goal), planning_window, agent_id)
# Find GT action
if self.args.use_eval or self.args.use_render or not self.args.train_local:
gt_action = 0
else:
gt_action = self._get_gt_action(1 - self.explorable_map[agent_id], start,
[int(stg[0]), int(stg[1])],
planning_window, start_o, agent_id)
(stg_x, stg_y) = stg
relative_dist = pu.get_l2_distance(stg_x, start[0], stg_y, start[1])
relative_dist = relative_dist*5./100.
angle_st_goal = math.degrees(math.atan2(stg_x - start[0],
stg_y - start[1]))
angle_agent = (start_o) % 360.0
if angle_agent > 180:
angle_agent -= 360
relative_angle = (angle_agent - angle_st_goal) % 360.0
if relative_angle > 180:
relative_angle -= 360
output[agent_id][0] = int((relative_angle % 360.)/5.)
output[agent_id][1] = discretize(relative_dist)
output[agent_id][2] = gt_action
self.relative_angle.append(relative_angle)
if self.use_render:
gif_dir = '{}/gifs/{}/episode_{}/all/'.format(self.run_dir, self.scene_id, self.episode_no)
if not os.path.exists(gif_dir):
os.makedirs(gif_dir)
self.render(inputs, grid, map_pred, gif_dir)
if self.render_merge:
gif_dir = '{}/gifs/{}/episode_{}/merge/'.format(self.run_dir, self.scene_id, self.episode_no)
if not os.path.exists(gif_dir):
os.makedirs(gif_dir)
self.render_merged_map(inputs, grid, map_pred, gif_dir)
return output
def _get_gt_map(self, full_map_size, agent_id):
self.scene_name = self.habitat_env.sim.config.SCENE
# logger.error('Computing map for %s', self.scene_name)
# Get map in habitat simulator coordinates
self.map_obj = HabitatMaps(self.habitat_env)
if self.map_obj.size[0] < 1 or self.map_obj.size[1] < 1:
logger.error("Invalid map: {}/{}".format(self.scene_name, self.episode_no))
return None
print(self._env.sim.get_agent_state(agent_id).position.tolist())
agent_y = self._env.sim.get_agent_state(agent_id).position.tolist()[1]*100. # cm
if self.use_restrict_map:
sim_map = self.map_obj.get_restrict_map(agent_y, -50., 50.0)
else:
sim_map = self.map_obj.get_map()
sim_map[sim_map > 0] = 1.
# Transform the map to align with the agent
min_x, min_y = self.map_obj.origin/100.0
x, y, o = self.get_sim_location(agent_id)
x, y = -x - min_x, -y - min_y
range_x, range_y = self.map_obj.max/100. - self.map_obj.origin/100.
map_size = sim_map.shape
scale = 2.
self.grid_size = int(scale*max(map_size))
grid_map = np.zeros((self.grid_size, self.grid_size))
grid_map[(self.grid_size - map_size[0])//2:
(self.grid_size - map_size[0])//2 + map_size[0],
(self.grid_size - map_size[1])//2:
(self.grid_size - map_size[1])//2 + map_size[1]] = sim_map
if map_size[0] > map_size[1]:
self.agent_st.append(torch.tensor([[
(x - range_x/2.) * 2. / (range_x * scale) \
* map_size[1] * 1. / map_size[0],
(y - range_y/2.) * 2. / (range_y * scale),
180.0 + np.rad2deg(o)
]]))
else:
self.agent_st.append(torch.tensor([[
(x - range_x/2.) * 2. / (range_x * scale),
(y - range_y/2.) * 2. / (range_y * scale)
* map_size[0] * 1. / map_size[1],
180.0 + np.rad2deg(o)
]]))
rot_mat, trans_mat, n_rot_mat, n_trans_mat = get_grid_full(self.agent_st[agent_id], (1, 1,
self.grid_size, self.grid_size), (1, 1,
full_map_size, full_map_size), torch.device("cpu"))
grid_map = torch.from_numpy(grid_map).float()
grid_map = grid_map.unsqueeze(0).unsqueeze(0)
translated = F.grid_sample(grid_map, trans_mat, align_corners=True)
rotated = F.grid_sample(translated, rot_mat, align_corners=True)
episode_map = torch.zeros((full_map_size, full_map_size)).float()
if full_map_size > self.grid_size:
episode_map[(full_map_size - self.grid_size)//2:
(full_map_size - self.grid_size)//2 + self.grid_size,
(full_map_size - self.grid_size)//2:
(full_map_size - self.grid_size)//2 + self.grid_size] = \
rotated[0, 0]
else:
episode_map = rotated[0, 0,
(self.grid_size | |
<filename>linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/UI/OpenGL/OpenGLDisplay.py
#!/usr/bin/env python
#
# Copyright (C) 2005 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: <EMAIL>
# to discuss alternative licensing.
# -------------------------------------------------------------------------
"""\
======================
OpenGL Display Service
======================
This component provides an OpenGL window and manages input events,
positioning and drawing of other components. It handles both OpenGL and
Pygame components.
OpenGLDisplay is a service that registers with the Coordinating
Assistant Tracker (CAT).
Example Usage
-------------
If you want to change some of the default parameters, like the viewport,
you first have to create an OpenGLDisplay object and then register it.
The following would show a simple cube from a slightly changed viewer
position::
display = OpenGLDisplay(viewerposition=(0,-10,0), lookat=(0,0,-15)).activate()
OpenGLDisplay.setDisplayService(display)
SimpleCube(position=(0,0,-15)).activate()
If you want to use pygame components, you have to override the
PygameDisplay service before creating any pygame components::
display = OpenGLDisplay.getDisplayService()
PygameDisplay.setDisplayService(display[0])
For examples of how components have to interfrere with OpenGLDisplay,
please have a look at OpenGLComponent.py and Interactor.py.
How does it work?
-----------------
OpenGLDisplay is a service. obtain it by calling the
OpenGLDisplay.getDisplayService(...) static method. Any existing instance
will be returned, otherwise a new one is automatically created.
Alternatively, if you wish to configure OpenGLDisplay with options other
than the defaults, create your own instance, then register it as a
service by calling the PygameDisplay.setDisplayService(...) static
method. NOTE that it is only advisable to do this at the top level of
your system, as other components may have already requested and created
a OpenGLDisplay component!
When using only OpenGL components and no special display settings have
to be made, you won't see OpenGLDisplay as it is registered
automatically when it is first requested (by invoking the
getDisplayService(...) static method).
You can also use an instance of OpenGLDisplay to override the
PygameDisplay service as it implements most of the functionality of
PygameDisplay. You will want to do this when you want to use Pygame
components along with OpenGL components.
pygame only supports one display window at a time, you must not make more than
one OpenGLDisplay component.
OpenGLDisplay listens for requests arriving at its "notify" inbox. A request can
currently be to:
- register an OpenGL component (OGL_DISPLAYREQUEST)
- register a pygame component (DISPLAYREQUEST)
- register a pygame wrapper (WRAPPERREQUEST)
- register an eventspy (EVENTSPYREQUEST)
- listen or stop listening to events (ADDLISTENEVENT, REMOVELISTENEVENT)
- update the displaylist of an OpenGL component (UPDATE_DISPLAYLIST)
- update the transform of an OpenGL component (UPDATE_TRANSFORM)
- invoke a redraw of a pygame surface (REDRAW)
OpenGL components
^^^^^^^^^^^^^^^^^
OpenGL components get registered by an OGL_DISPLAYREQUEST. Such a
request is a dictionary with the following keys::
{
"OGL_DISPLAYREQUEST": True, # OpenGL Display request
"objectid" : id(object), # id of requesting object (for identification)
"callback" : (component,"inboxname"), # to send the generated event id to
"events" : (component, "inboxname"), # to send event notification (optional)
"size": (x,y,z), # size of object (not yet used)
}
When OpenGLDisplay received such a request it generates an identifier
and returns it to the box you specify by "callback". This identifier can
later be used to determine if a mouse event "hit" the object.
It is important to note that OpenGL don't draw and transform themselves
directly but only hand displaylists and Transform objects to the display
service. After an OpenGL component has been registered, it can send
displaylist- and transform-updates. These requests are dictionaries of
the following form::
{
"DISPLAYLIST_UPDATE": True, # update displaylist
"objectid": id(object), # id of requesting object
"displaylist": displaylist # new displaylist
}
If an object is static, i.e. does not change its geometry, it only needs
to send this update one time. Dynamic objects can provide new
displaylists as often as they need to.::
{
"TRANSFORM_UPDATE": True, # update transform
"objectid": id(self), # id of requesting object
"transform": self.transform # new transform
}
A transform update should be sent every time the object transform
changes, i.e. it is moved.
OpenGL components can also request listening to events. See "Listening
to events" below.
It is generally recommended to use the class OpenGLComponent as base
class for OpenGL components. It implements all the functionality
required to create, draw, move OpenGL components and to handle events
(see OpenGLComponent.py for the class and e.g. SimpleCube.py, Button.py
and other components for examples).
Pygame components
^^^^^^^^^^^^^^^^^
OpenGLDisplay is designed to be compatible with PygameDisplay. After
overriding the PygameDisplay service, pygame components can be created
as usual. See the documentation of PygameDisplay
(Kamaelia/UI/PygameDisplay.py) for how to do this.
NOTE: Overlays are not supported yet.
Pygame wrappers
^^^^^^^^^^^^^^^
It is possibly, by sending a WRAPPERREQUEST, to wrap an already
registered pygame component by a OpenGL component. The surface of the
pygame component is then excluded from normal drawing and this
responsibility is handed to the requesting component by giving it the
texture name corresponding to the surface. The event processing of mouse
events is then also relinked to be done by the wrapper.
The wrapper request is a dictionary with the following keys::
{
"WRAPPERREQUEST" : True, # wrap a pygame component
"wrapcallback" : (object, "inboxname"), # send response here
"eventrequests" : (object, "inboxname"), # to receive event requests by the wrapped component
"wrap_objectid": id(wrapped_component) # object id of the component to be wrapped
}
When a WRAPPERREQUEST is received for a component which is not
registered yet, it is stored until the component to be wrapped gets
registered.
When a wrapper request was received, the OpenGL display service returns
a dictionary to the box specified by "wrapcallback" containing the
following keys::
{
"texname": texname, # OpenGL texture name
"texsize": (width, height), # texture coordinate size
"size": (width, height) # size of pygame surface in pixels
}
See PygameWrapperPlane.py for an example implementation of a wrapper.
Listening to events
^^^^^^^^^^^^^^^^^^^
Once your component has been registered, it can request to be notified
of specific pygame events. The same requests are used for Pygame and
OpenGL components, only the keys are slightly different.
To request to listen to a given event, send a dictionary to the "notify"
inbox, containing the following::
{
"ADDLISTENEVENT" : pygame_eventtype, # example: pygame.KEYDOWN
"surface" : your_surface, # for pygame components
"objectid" : id(object), # for OpenGL components
}
To unsubscribe from a given event, send a dictionary containing::
{
"REMOVELISTENEVENT" : pygame_eventtype,
"surface" : your_surface, # for pygame components
"objectid" : id(object), # for OpenGL components
}
Events will be sent to the inbox specified in the "events" key of the
"DISPLAYREQUEST" or "OGL_DISPLAYREQUEST" message. They arrive as a list
of pygame event objects.
The events objects of type Bunch with the following variables:
- type -- Pygame event type
For events of type pygame.KEYDOWN, pygame.KEYUP:
- key -- Pressed or released key
For events of type pygame.MOUSEBUTTONDOWN, pygame.MOUSEBUTTONUP:
- pos -- Mouse position
- button -- Pressed or released mouse button number
For events of type pygame.MOUSEMOTION:
- rel -- Relative mouse motion.
- buttons -- Buttons pressed while mousemotion
For events of type pygame.MOUSEBUTTONDOWN, pygame.MOUSEBUTTONUP, pygame.MOUSEMOTION when sent to OpenGL components:
- viewerposition -- Position of viewer
- dir -- Direction vector of generated from mouse position
- hitobjects -- List of hit objects
NOTE: If the event is MOUSEMOTION, MOUSEBUTTONUP or MOUSEBUTTONDOWN then
you will instead receive a replacement object, with the same attributes
as the pygame event. But for pygame components, the 'pos' attribute
adjusted so that (0,0) is the top left corner of *your* surface. For
OpenGL components the origin and direction of the intersection vector
determined using the mouse position and viewport will be added as well
as a list of identfiers of objects that has been hit.
If a component has requested reception of an event type, it gets every
event that happens of that type, regardless if it is of any concern to
the component. In the case of mouse events there is a list of hit
objects included which are determined by using OpenGL picking.
Eventspies
^^^^^^^^^^
Eventspies are components that basically listen to events for other
components. They are registered by sending an | |
{
'withscores': withscores,
'score_cast_func': score_cast_func,
'callback': callback
}
self._execute_command(*pieces, **options)
def zrank(self, name, value, callback=None):
"""
Returns a 0-based value indicating the rank of ``value`` in sorted set
``name``
"""
self._execute_command('ZRANK', name, value, callback=callback)
def zrem(self, name, *values, **kwargs):
"Remove member ``values`` from sorted set ``name``"
callback = kwargs.get('callback', None)
self._execute_command('ZREM', name, *values, callback=callback)
def zremrangebyrank(self, name, min, max, callback=None):
"""
Remove all elements in the sorted set ``name`` with ranks between
``min`` and ``max``. Values are 0-based, ordered from smallest score
to largest. Values can be negative indicating the highest scores.
Returns the number of elements removed
"""
self._execute_command('ZREMRANGEBYRANK', name, min, max,
callback=callback)
def zremrangebyscore(self, name, min, max, callback=None):
"""
Remove all elements in the sorted set ``name`` with scores
between ``min`` and ``max``. Returns the number of elements removed.
"""
self._execute_command('ZREMRANGEBYSCORE', name, min, max,
callback=callback)
def zrevrange(self, name, start, end, withscores=False,
score_cast_func=float, callback=None):
"""
Return a range of values from sorted set ``name`` between
``start`` and ``end`` sorted in descending order.
``start`` and ``end`` can be negative, indicating the end of the range.
``withscores`` indicates to return the scores along with the values
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
pieces = ['ZREVRANGE', name, start, end]
if withscores:
pieces.append('withscores')
options = {
'withscores': withscores,
'score_cast_func': score_cast_func,
'callback': callback
}
self._execute_command(*pieces, **options)
def zrevrangebyscore(self, name, max, min, start=None, num=None,
withscores=False, score_cast_func=float,
callback=None):
"""
Return a range of values from the sorted set ``name`` with scores
between ``min`` and ``max`` in descending order.
If ``start`` and ``num`` are specified, then return a slice
of the range.
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = ['ZREVRANGEBYSCORE', name, max, min]
if start is not None and num is not None:
pieces.extend(['LIMIT', start, num])
if withscores:
pieces.append('withscores')
options = {
'withscores': withscores,
'score_cast_func': score_cast_func,
'callback': callback
}
self._execute_command(*pieces, **options)
def zrevrank(self, name, value, callback=None):
"""
Returns a 0-based value indicating the descending rank of
``value`` in sorted set ``name``
"""
self._execute_command('ZREVRANK', name, value, callback=callback)
def zscore(self, name, value, callback=None):
"Return the score of element ``value`` in sorted set ``name``"
self._execute_command('ZSCORE', name, value, callback=callback)
def zunionstore(self, dest, keys, aggregate=None, callback=None):
"""
Union multiple sorted sets specified by ``keys`` into
a new sorted set, ``dest``. Scores in the destination will be
aggregated based on the ``aggregate``, or SUM if none is provided.
"""
self._zaggregate('ZUNIONSTORE', dest, keys, aggregate, callback)
def _zaggregate(self, command, dest, keys, aggregate, callback):
pieces = [command, dest, len(keys)]
if isinstance(keys, dict):
keys, weights = keys.iterkeys(), keys.itervalues()
else:
weights = None
pieces.extend(keys)
if weights:
pieces.append('WEIGHTS')
pieces.extend(weights)
if aggregate:
pieces.append('AGGREGATE')
pieces.append(aggregate)
return self._execute_command(*pieces, callback=callback)
### HASH COMMANDS
def hdel(self, name, *keys, **kwargs):
"Delete ``keys`` from hash ``name``"
callback = kwargs.get('callback')
self._execute_command('HDEL', name, *keys, callback=callback)
def hexists(self, name, key, callback=None):
"Returns a boolean indicating if ``key`` exists within hash ``name``"
self._execute_command('HEXISTS', name, key, callback=callback)
def hget(self, name, key, callback=None):
"Return the value of ``key`` within the hash ``name``"
self._execute_command('HGET', name, key, callback=callback)
def hgetall(self, name, callback=None):
"Return a Python dict of the hash's name/value pairs"
self._execute_command('HGETALL', name, callback=callback)
def hincrby(self, name, key, amount=1, callback=None):
"Increment the value of ``key`` in hash ``name`` by ``amount``"
self._execute_command('HINCRBY', name, key, amount, callback=callback)
def hincrbyfloat(self, name, key, amount=1.0, callback=None):
"""
Increment the value of ``key`` in hash ``name`` by floating ``amount``
"""
self._execute_command('HINCRBYFLOAT', name, key, amount,
callback=callback)
def hkeys(self, name, callback=None):
"Return the list of keys within hash ``name``"
self._execute_command('HKEYS', name, callback=callback)
def hlen(self, name, callback=None):
"Return the number of elements in hash ``name``"
self._execute_command('HLEN', name, callback=callback)
def hset(self, name, key, value, callback=None):
"""
Set ``key`` to ``value`` within hash ``name``
Returns 1 if HSET created a new field, otherwise 0
"""
self._execute_command('HSET', name, key, value, callback=callback)
def hsetnx(self, name, key, value, callback=None):
"""
Set ``key`` to ``value`` within hash ``name`` if ``key`` does not
exist. Returns 1 if HSETNX created a field, otherwise 0.
"""
self._execute_command('HSETNX', name, key, value, callback=callback)
def hmset(self, name, mapping, callback=None):
"""
Set key to value within hash ``name`` for each corresponding
key and value from the ``mapping`` dict.
"""
items = [i for k, v in mapping.iteritems() for i in (k, v)]
self._execute_command('HMSET', name, *items, callback=callback)
def hmget(self, name, keys, callback=None):
"Returns a list of values ordered identically to ``keys``"
self._execute_command('HMGET', name, *keys, callback=callback)
def hvals(self, name, callback=None):
"Return the list of values within hash ``name``"
self._execute_command('HVALS', name, callback=callback)
def publish(self, channel, message, callback=None):
"""
Publish ``message`` on ``channel``.
Returns the number of subscribers the message was delivered to.
"""
self._execute_command('PUBLISH', channel, message, callback=callback)
def eval(self, script, numkeys, *keys_and_args, **kwargs):
"""
Execute the Lua ``script``, specifying the ``numkeys`` the script
will touch and the key names and argument values in ``keys_and_args``.
Returns the result of the script.
In practice, use the object returned by ``register_script``. This
function exists purely for Redis API completion.
"""
callback = kwargs.get('callback')
self._execute_command(
'EVAL',
script, numkeys, *keys_and_args, callback=callback)
def evalsha(self, sha, numkeys, *keys_and_args, **kwargs):
"""
Use the ``sha`` to execute a Lua script already registered via EVAL
or SCRIPT LOAD. Specify the ``numkeys`` the script will touch and the
key names and argument values in ``keys_and_args``. Returns the result
of the script.
In practice, use the object returned by ``register_script``. This
function exists purely for Redis API completion.
"""
callback = kwargs.get('callback')
self._execute_command(
'EVALSHA',
sha, numkeys, *keys_and_args, callback=callback)
def script_exists(self, *args, **kwargs):
"""
Check if a script exists in the script cache by specifying the SHAs of
each script as ``args``. Returns a list of boolean values indicating if
if each already script exists in the cache.
"""
options = {'parse': 'EXISTS', 'callback': kwargs.get('callback')}
self._execute_command('SCRIPT', 'EXISTS', *args, **options)
def script_flush(self, **options):
"Flush all scripts from the script cache"
options.setdefault('parse', 'FLUSH')
self._execute_command('SCRIPT', 'FLUSH', **options)
def script_kill(self, **options):
"Kill the currently executing Lua script"
options.setdefault('parse', 'KILL')
self._execute_command('SCRIPT', 'KILL', **options)
def script_load(self, script, **options):
"Load a Lua ``script`` into the script cache. Returns the SHA."
options.setdefault('parse', 'LOAD')
self._execute_command('SCRIPT', 'LOAD', script, **options)
def script_run(self, name, keys=[], args=[], **options):
"""
Run a script of server specified by shard_hint. If not present,
use first key as hint
"""
server, _ = self._get_server(options.get('shard_hint', keys[0]))
try:
script = server.script_instances.get(name)
except AttributeError:
script, server.script_instances = None, {}
finally:
if script is None:
script = self.register_script(self.scripts[name])
server.script_instances[name] = script
# run it
script(keys, args, **options)
class RedisProtocol(RedisCommands, ProtocolMixin):
SCHEMES = ['rd', 'redis']
DEFAULT_PORT = 6379
CONNECTION = RedisConnection
def __init__(self, *args, **kwargs):
self.parser = RedisParser(**kwargs)
self.scripts = kwargs.pop('scripts', {})
super(RedisProtocol, self).__init__(*args, **kwargs)
def _get_redis_server(self, *args, **options):
assert args
if isinstance(args[0], basestring):
sharding = args[1] if len(args) > 1 else None
cmd, sharding = args[0], options.pop('shard_hint', sharding)
connection, _ = self._get_server(sharding)
else:
connection = args.pop(0)
cmd = args[0]
# return tupple
return (connection, cmd, args)
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
"""
return Pipeline(
self,
self.parser.response_callbacks,
transaction,
shard_hint)
def register_script(self, script):
"""
Register a Lua ``script`` specifying the ``keys`` it will touch.
Returns a Script object that is callable and hides the complexity of
deal with scripts, keys, and shas. This is the preferred way to work
with Lua scripts.
"""
return Script(self, script)
def shard(self, keys, as_pipes=False, **pipe_kwargs):
"""
Group keys based on expected server. By defauult return keys
grouped by server, but if as_pipes is present, it also returns a ready
| |
of columns
for (name, index) in controlGroupNames.items():
data[geneID][CONTROL_GROUP_KEY][name].append(vals[index])
# get values for second group of columns
for (name, index) in dataGroupNames.items():
data[geneID][DATA_GROUP_KEY][name].append(vals[index])
#end else
#endfor
## merge duplicates by averaging
for geneID in data.keys():
for atrName in data[geneID][CONTROL_GROUP_KEY].keys():
values = data[geneID][CONTROL_GROUP_KEY][atrName]
data[geneID][CONTROL_GROUP_KEY][atrName] = sum(values) / float(len(values))
for atrName in data[geneID][DATA_GROUP_KEY].keys():
values = data[geneID][DATA_GROUP_KEY][atrName]
data[geneID][DATA_GROUP_KEY][atrName] = sum(values) / float(len(values))
## merge duplicates by averaging
#if self.ui.meanRadioButton.isChecked():
#for geneID in data.keys():
#for atrName in data[geneID][CONTROL_GROUP_KEY].keys():
#values = data[geneID][CONTROL_GROUP_KEY][atrName]
#data[geneID][CONTROL_GROUP_KEY][atrName] = sum(values) / float(len(values))
#for atrName in data[geneID][DATA_GROUP_KEY].keys():
#values = data[geneID][DATA_GROUP_KEY][atrName]
#data[geneID][DATA_GROUP_KEY][atrName] = sum(values) / float(len(values))
## merge duplicates by median
#elif self.ui.medianRadioButton.isChecked():
#for geneID in data.keys():
#for atrName in data[geneID][CONTROL_GROUP_KEY].keys():
#values = data[geneID][CONTROL_GROUP_KEY][atrName]
#data[geneID][CONTROL_GROUP_KEY][atrName] = median(values)
#for atrName in data[geneID][DATA_GROUP_KEY].keys():
#values = data[geneID][DATA_GROUP_KEY][atrName]
#data[geneID][DATA_GROUP_KEY][atrName] = median(values)
## take one duplicate at random
#elif self.ui.randomRadioButton.isChecked():
#for geneID in data.keys():
#for atrName in data[geneID][CONTROL_GROUP_KEY].keys():
#values = data[geneID][CONTROL_GROUP_KEY][atrName]
#data[geneID][CONTROL_GROUP_KEY][atrName] = choice(values)
#for atrName in data[geneID][DATA_GROUP_KEY].keys():
#values = data[geneID][DATA_GROUP_KEY][atrName]
#data[geneID][DATA_GROUP_KEY][atrName] = choice(values)
##end
namesDict = {CONTROL_GROUP_KEY: controlGroupNames, DATA_GROUP_KEY: dataGroupNames}
table = __makeExampleTable(namesDict, data)
logFCs = {}
if calcMethod == 'ratio':
if dataFormat == 'log2': # log2 data have to be transformed for ratio computation
for geneID in data.keys():
for attrName in namesDict[CONTROL_GROUP_KEY]:
data[geneID][CONTROL_GROUP_KEY][attrName] = math.pow(2, data[geneID][CONTROL_GROUP_KEY][attrName])
for attrName in namesDict[DATA_GROUP_KEY]:
data[geneID][DATA_GROUP_KEY][attrName] = math.pow(2, data[geneID][DATA_GROUP_KEY][attrName])
for geneID in data.keys():
control_array = [data[geneID][CONTROL_GROUP_KEY][attrName] for attrName in namesDict[CONTROL_GROUP_KEY]]
data_array = [data[geneID][DATA_GROUP_KEY][attrName] for attrName in namesDict[DATA_GROUP_KEY]]
numerator = mean(data_array)
denumerator = mean(control_array)
if numerator < 0 or denumerator < 0:
print 'Invalid values, gene %s' % str(geneID)
continue
logFCs[geneID] = numerator / denumerator
# for those less than 1 invert and give negative sign
if logFCs[geneID] < 1:
logFCs[geneID] = -1.0 / logFCs[geneID]
else:
# difference
if dataFormat == 'linear': # linear data have to be transformed for log2 difference computation
for geneID in data.keys():
for attrName in namesDict[CONTROL_GROUP_KEY]:
if data[geneID][CONTROL_GROUP_KEY][attrName] <= 0:
raise ValueError('Cannot transform linear data to log2: value is <= 0 for gene %s' % str(geneID))
else:
data[geneID][CONTROL_GROUP_KEY][attrName] = math.log(data[geneID][CONTROL_GROUP_KEY][attrName], 2)
for attrName in namesDict[DATA_GROUP_KEY]:
if data[geneID][DATA_GROUP_KEY][attrName] <= 0:
raise ValueError('Cannot transform linear data to log2: value is <= 0 for gene %s' % str(geneID))
else:
data[geneID][DATA_GROUP_KEY][attrName] = math.log(data[geneID][DATA_GROUP_KEY][attrName], 2)
for geneID in data.keys():
control_array = [data[geneID][CONTROL_GROUP_KEY][attrName] for attrName in namesDict[CONTROL_GROUP_KEY]]
data_array = [data[geneID][DATA_GROUP_KEY][attrName] for attrName in namesDict[DATA_GROUP_KEY]]
logFCs[geneID] = mean(data_array) - mean(control_array)
#end
# print dataGroupNames
# print controlGroupNames
sortedLogFCs = [(elt[1], elt[0]) for elt in sorted([(logFCs[geneID], geneID) for geneID in logFCs.keys()], reverse=True)] #data.keys()], reverse=True)]
return {'table': table, 'fold_change': sortedLogFCs}
#end
# this function creates a table from SEGS rules where columns are terms
def __make_rule_term_example_table(tableDict, allTerms):
import orange
import constants as const
attrList = [orange.EnumVariable(name=str(term), values=[const.PRESENT, const.ABSENT]) for term in allTerms]
# three meta attributes
ruleName = orange.StringVariable(const.NAME_ATTR)
mid = orange.newmetaid()
ruleTerms = orange.StringVariable(const.TERMS_ATTR)
mid1 = orange.newmetaid()
#ruleNumber = orange.EnumVariable(SEQ_NUM_ATTR) #StringVariable(SEQ_NUM_ATTR)
ruleNumber = orange.FloatVariable(const.SEQ_NUM_ATTR, startValue=1, endValue=len(tableDict), stepValue=1, numberOfDecimals=0)
mid2 = orange.newmetaid()
# this is a classless domain
domain = orange.Domain(attrList, False)
# name of the rule is a meta attribute
domain.addmeta(mid, ruleName, False)
domain.addmeta(mid1, ruleTerms, False)
domain.addmeta(mid2, ruleNumber, False)
table = orange.ExampleTable(domain)
for k in sorted(tableDict.keys()):
exampleValues = []
for (i,term) in enumerate(allTerms):
if term in tableDict[k][const.RULETERMS_KEY]:
#exampleValues.append(PRESENT)
exampleValues.append(orange.Value(attrList[i], const.PRESENT))
else:
#exampleValues.append(ABSENT)
exampleValues.append(orange.Value(attrList[i], const.ABSENT))
example = orange.Example(domain, exampleValues)
#example[NAME_ATTR] = tableDict[k][RULENAME_KEY][1:-1] #skip square brackets from the string
#example[TERMS_ATTR] = tableDict[k][RULETERMS_STR_KEY][1:-1]
#example[SEQ_NUM_ATTR] = k
example[const.NAME_ATTR] = orange.Value(ruleName, tableDict[k][const.RULENAME_KEY][1:-1]) #skip square brackets from the string
example[const.TERMS_ATTR] = orange.Value(ruleTerms, tableDict[k][const.RULETERMS_STR_KEY][1:-1])
example[const.SEQ_NUM_ATTR] = orange.Value(ruleNumber, k)
table.append(example)
#end
return table
#end
# this function creates a table from SEGS rules where columns are genes
def __make_rule_gene_example_table(tableDict, genes):
import orange
import constants as const
# attributes are rules (all conjuncts of a rule form the name of the attribute)
#attrList = [orange.EnumVariable(name=ruleString[1:-1].replace(' ', '_'), values=[PRESENT, ABSENT])
# for ruleString in tableDict.keys()]
attrList = [orange.EnumVariable(name=str(gene), values=[const.PRESENT, const.ABSENT]) for gene in genes]
# three meta attributes
ruleName = orange.StringVariable(const.NAME_ATTR)
mid = orange.newmetaid()
ruleTerms = orange.StringVariable(const.TERMS_ATTR)
mid1 = orange.newmetaid()
#ruleNumber = orange.EnumVariable(SEQ_NUM_ATTR) #StringVariable(SEQ_NUM_ATTR)
ruleNumber = orange.FloatVariable(const.SEQ_NUM_ATTR, startValue=1, endValue=len(tableDict), stepValue=1, numberOfDecimals=0)
mid2 = orange.newmetaid()
# this is a classless domain
domain = orange.Domain(attrList, False)
# name of the rule is a meta attribute
domain.addmeta(mid, ruleName, False)
domain.addmeta(mid1, ruleTerms, False)
domain.addmeta(mid2, ruleNumber, False)
table = orange.ExampleTable(domain)
for k in sorted(tableDict.keys()):
exampleValues = []
for (i,gene) in enumerate(genes):
#if gene in tableDict[k][GENES_KEY]:
if gene in tableDict[k][const.TOP_GENES_KEY]:
#exampleValues.append(PRESENT)
exampleValues.append(orange.Value(attrList[i], const.PRESENT))
else:
exampleValues.append(orange.Value(attrList[i], const.ABSENT))
#exampleValues.append(ABSENT)
example = orange.Example(domain, exampleValues)
example[const.NAME_ATTR] = tableDict[k][const.RULENAME_KEY][1:-1] #skip square brackets from the string
example[const.TERMS_ATTR] = tableDict[k][const.RULETERMS_STR_KEY][1:-1]
example[const.SEQ_NUM_ATTR] = k
example[const.NAME_ATTR] = orange.Value(ruleName, tableDict[k][const.RULENAME_KEY][1:-1]) #skip square brackets from the string
example[const.TERMS_ATTR] = orange.Value(ruleTerms, tableDict[k][const.RULETERMS_STR_KEY][1:-1])
example[const.SEQ_NUM_ATTR] = orange.Value(ruleNumber, k)
table.append(example)
#end
return table
#end
def segmine_rules_as_table(input_dict):
import constants as const
rules = input_dict['rules']
tableDict = {}
allGenes = set()
allGenesDE = set()
allTerms = set()
for (i, rule) in enumerate(rules):
# beware, there can be also rules with only interacting terms...wtf...
if const.RULETERMS_STR_KEY in rule[const.DESCRIPTION_KEY]:
TERMids = [x[const.TERMID_KEY] for x in rule[const.DESCRIPTION_KEY][const.RULETERMS_STR_KEY]]
TERMnames = [x[const.TERMNAME_KEY] for x in rule[const.DESCRIPTION_KEY][const.RULETERMS_STR_KEY]]
else:
TERMids = []
TERMnames = []
INTids = []
INTnames = []
if const.INTTERMS_KEY in rule[const.DESCRIPTION_KEY]:
INTids = [x[const.TERMID_KEY] for x in rule[const.DESCRIPTION_KEY][const.INTTERMS_KEY]]
INTnames = [x[const.TERMNAME_KEY] for x in rule[const.DESCRIPTION_KEY][const.INTTERMS_KEY]]
ruleTerms = TERMids + INTids
ruleTermNames = TERMnames + INTnames
ruleGenes = rule[const.COVGENES_KEY]
ruleGenesDE = rule[const.COVTOPGENES_KEY]
tableDict[i] = {}
tableDict[i][const.GENES_KEY] = dict.fromkeys(ruleGenes)
tableDict[i][const.TOP_GENES_KEY] = dict.fromkeys(ruleGenesDE)
tableDict[i][const.RULENAME_KEY] = str(ruleTermNames)
tableDict[i][const.RULETERMS_STR_KEY] = str(ruleTerms)
tableDict[i][const.RULETERMS_KEY] = ruleTerms
allGenes.update(ruleGenes)
allGenesDE.update(ruleGenesDE)
allTerms.update(ruleTerms)
#endfor
geneTable = __make_rule_gene_example_table(tableDict, sorted(list(allGenesDE)))
termTable = __make_rule_term_example_table(tableDict, sorted(list(allTerms)))
return {'gene_table': geneTable, 'term_table': termTable}
#end
def filter_unknown_genes_stu(input_dict):
import cPickle
from os.path import normpath, join, dirname
ranks = input_dict['gene_ranks']
genes = cPickle.load(open(normpath(join(dirname(__file__), 'data/genes_stu.pickle')), 'rb'))
result = []
unknown = 0
for gene, rank in ranks:
gene = gene.lower()
if gene in genes:
result.append((gene, rank))
else:
unknown += 1
if unknown:
logging.warning('There were %d unknown STU genes.' % unknown)
return {'filtered_ranks': result}
#end
def filter_unknown_genes_ath(input_dict):
import cPickle
from os.path import normpath, join, dirname
ranks = input_dict['gene_ranks']
genes = cPickle.load(open(normpath(join(dirname(__file__), 'data/genes_ath.pickle')), 'rb'))
result = []
unknown = 0
for gene, rank in ranks:
gene = gene.lower()
if gene in genes:
result.append((gene, rank))
else:
unknown += 1
if unknown:
logging.warning('There were %d unknown ATH genes.' % unknown)
return {'filtered_ranks': result}
#end
def segmine_cutoff(input_dict):
from numpy import array
ub = float(input_dict['upper']) if input_dict['upper'].strip() else None
lb = float(input_dict['lower']) if input_dict['lower'].strip() else None
ranks = input_dict['ranks']
logfcs = input_dict['logfcs']
takeAbs = True if input_dict['absolute'].lower() == 'true' else False
if ub and lb and ub <= lb:
raise Exception('Invalid bounds')
ranksDict = {}
for pair in ranks:
ranksDict[pair[0]] = pair[1]
logfcsarr = array([float(elt[1]) for elt in logfcs])
if takeAbs:
logfcsarr = abs(logfcsarr)
UB = max(logfcsarr) if ub == None else ub
LB = min(logfcsarr) if lb == None else lb
resultRanks = []
resultLogFCs = []
errors = []
for (ID, value) in logfcs:
tmp = abs(value) if takeAbs else value
if tmp <= UB and tmp >= LB:
if ID not in ranksDict:
errors.append(ID)
else:
resultRanks.append((ranksDict[ID], ID))
resultLogFCs.append((value, ID))
# end
resultRanks.sort(reverse=True)
resultRanks = [(elt[1], elt[0]) for elt in resultRanks]
resultLogFCs.sort(reverse=True)
resultLogFCs = [(elt[1], elt[0]) for elt in resultLogFCs]
if errors:
logging.warning('%d genes ignored because ranks were not present' % len(errors))
return {'filtered_ranks': resultRanks, 'filtered_logfcs': resultLogFCs}
def resolve_gene_names_STU(input_dict):
import cPickle
from os.path import normpath, join, dirname
ranks = input_dict['gene_ranks']
mapping = cPickle.load(open(normpath(join(dirname(__file__), 'data/probe2rep_STU.pickle')), 'rb'))
result = []
unknown = 0
for (gene, rank) in ranks:
if gene in mapping:
result.append((mapping[gene], rank))
else:
#result.append((gene, rank))
unknown += 1
if unknown:
logging.warning('There were %d unknown STU probe names.' % unknown)
# remove duplicates and sort again
result = list(set(result))
result = [(x[1], x[0]) for x in sorted([(x[1], x[0]) for x in result], reverse=True)]
return {'mapped_ranks': result}
def segmine_do_hclustering(input_dict):
import Orange
table = input_dict['table']
linkage = int(input_dict['linkage'])
metric = int(input_dict['metric'])
linkages = {1: Orange.clustering.hierarchical.SINGLE,
2: Orange.clustering.hierarchical.AVERAGE,
3: Orange.clustering.hierarchical.COMPLETE,
4: Orange.clustering.hierarchical.WARD}
dmetrices = {1: Orange.distance.Euclidean,
2: Orange.distance.Hamming,
3: Orange.distance.Maximal,
4: Orange.distance.Manhattan,
5: Orange.distance.Relief,
6: Orange.distance.PearsonR,
7: Orange.distance.SpearmanR,
8: Orange.distance.Mahalanobis}
dmatrix = Orange.distance.distance_matrix(table, dmetrices[metric])
clustering = Orange.clustering.hierarchical.HierarchicalClustering()
clustering.linkage = linkages[linkage]
hcl = clustering(dmatrix)
hcl.mapping.objects = table
| |
import pip_setup
pip_setup.install("moviepy")
pip_setup.install("pygame")
import Menu
import pygame
from moviepy.editor import *
import random
from Settings import *
from Sprites import *
from Menu import *
import time
import numpy as np
class GAME :
def __init__(self):
#GAME ~initialisation~
self.file = "highscore.txt"
pygame.init()
self.screen = pygame.display.set_mode((WIDTH,HEIGHT))
self.background = pygame.image.load("background.png").convert()
pygame.display.set_caption(TITLE)
self.clock = pygame.time.Clock()
self.running = True
self.game_music = pygame.mixer.Sound('bip_bip_bap_1.wav')
self.menu_music = pygame.mixer.Sound('bip_bip_bop_V4.wav')
self.gameover_music = pygame.mixer.Sound('bip_bip_bup.wav')
self.menu_music.set_volume(0.5)
self.game_music.set_volume(0.5)
self.gameover_music.set_volume(0.5)
def new_game(self):
#GAME ~new_game~
self.wave = 1
self.shoot = False
self.lastHitTimer = 0
self.lastHitTimer_e = 0
self.lastHitennemyTimer = 0
self.lastShootTimer = 0
self.last_boss_attack = 0
self.anim_jar = 0
self.last_jar_attack = 0
self.anim_player_attack = 0
self.anim_ennemi_attack = 0
self.boss_attack = False
self.all_sprites = pygame.sprite.Group()
self.platforms = pygame.sprite.Group()
self.ennemis = pygame.sprite.Group()
self.weapons = pygame.sprite.Group()
self.skulls = pygame.sprite.Group()
self.jars = pygame.sprite.Group()
self.fires = pygame.sprite.Group()
self.player = PLAYER(self)
self.ennemy_list = []
self.all_sprites.add(self.player)
self.ennemy_speed = 2
self.ennemy_range = 400
self.ennemy_attack = 30
self.life_multiplyer = 0.4
self.x_jar = round(PLATFORMS_LIST[0][0]) + 30
for nbr in range(3):
self.jar = JAR(self.platforms,(self.x_jar+(50*nbr)))
self.jars.add(self.jar)
self.all_sprites.add(self.jar)
for nbr in range(3):
self.dynamic_difficulty()
ennemi = ENNEMI(random.randrange(self.ennemy_speed-1,self.ennemy_speed+1), self.ennemy_range, self.ennemy_attack, self.platforms, 0.5 , 1)
self.ennemis.add(ennemi)
self.all_sprites.add(ennemi)
self.ennemy_list.append(ennemi)
for plat in PLATFORMS_LIST:
p = PLATFORM(*plat)
self.all_sprites.add(p)
self.platforms.add(p)
self.weapon = WEAPON(1)
self.weapons.add(self.weapon)
self.all_sprites.add(self.weapon)
self.skull = WEAPON(2)
self.skulls.add(self.skull)
self.all_sprites.add(self.skull)
self.fire = WEAPON(3)
self.fires.add(self.fire)
self.all_sprites.add(self.fire)
self.player.right = False
self.attacked = False
self.run()
def run(self):
#GAME ~loop~
self.play = True
while self.play:
self.clock.tick(FPS)
self.events()
self.update()
self.draw()
def update(self):
# GAME ~update~
self.all_sprites.update()
#jar animation
for self.jar in self.jars:
if self.anim_jar < 8:
self.jar.image = self.jar.sprites[int(self.anim_jar)]
self.anim_jar = self.anim_jar + 0.05
else :
self.anim_jar = 0
#management of the artificial intelligence of the ennemies
for ennemi in self.ennemis:
ennemi.AI(self.player.pos, self.platforms)
#gestion of the collision of the platform if falling
plat_hits = pygame.sprite.spritecollide(self.player , self.platforms , False)
if self.player.vel.y > 0:
if plat_hits :
#if the player is under the platform
self.player.pos.y = plat_hits[0].rect.top
self.player.vel.y = 0
#gestion of the collision of the player and ennimies
self.player_hits = pygame.sprite.spritecollide(self.player , self.ennemis , False)
self.player_toutch = pygame.sprite.spritecollide(self.player , self.jars , False)
self.player_toutch_fire = pygame.sprite.spritecollide(self.player , self.fires , False)
if (pygame.time.get_ticks() < self.lastHitTimer + 1000):
if self.player.right :
self.player.image = self.player.sprites_attaqued[1]
elif not self.player.right:
self.player.image = self.player.sprites_attaqued[0]
else:
if self.player_hits or self.player_toutch or self.player_toutch_fire :
if self.player.right :
self.player.image = self.player.sprites_attaqued[1]
elif not self.player.right:
self.player.image = self.player.sprites_attaqued[0]
self.lastHitTimer = pygame.time.get_ticks()
self.player.life -= 1
self.player.pos.x -= 3
for ennemi in self.ennemis:
ennemi_hit_together = pygame.sprite.collide_rect(self.player, ennemi)
if ennemi.right:
self.ennemi_looking_at = 1
else:
self.ennemi_looking_at = -1
if ennemi_hit_together and ennemi.type_en == 1:
if ennemi.attack_anim < 10 and self.ennemi_looking_at == 1:
ennemi.image = ennemi.sprites_attack_right[int(ennemi.attack_anim)]
ennemi.attack_anim = ennemi.attack_anim + 0.3
elif ennemi.attack_anim < 10 and self.ennemi_looking_at == -1:
ennemi.image = ennemi.sprites_attack_left[int(ennemi.attack_anim)]
ennemi.attack_anim = ennemi.attack_anim + 0.3
else :
ennemi.attack_anim = 0
#
#if ennemi_hit_together == 1 and (pygame.time.get_ticks() > self.lastHitennemyTimer + 100) and ennemi.isFalling == False:
#ennemi.move(random.randint(-10, 10))
#self.lastHitennemyTimer = pygame.time.get_ticks()
ennemi_hits = pygame.sprite.spritecollide(ennemi , self.weapons , False)
ennemi.life_multiplyer = self.life_multiplyer
if ennemi.type_en == 1 and not self.player_hits:
if (pygame.time.get_ticks() < self.lastHitTimer_e + 500) and ennemi_hits :
if ennemi.right :
ennemi.image = ennemi.sprites_walk[2]
elif not ennemi.right:
ennemi.image = ennemi.sprites_walk[3]
else:
if ennemi.right :
ennemi.image = ennemi.sprites_walk[0]
elif not ennemi.right:
ennemi.image = ennemi.sprites_walk[1]
if ennemi_hits and self.player.pos.x > ennemi.rect.x and self.shoot :
if ennemi.right :
ennemi.image = ennemi.sprites_walk[2]
elif not ennemi.right:
ennemi.image = ennemi.sprites_walk[3]
self.lastHitTimer_e = pygame.time.get_ticks()
ennemi.life -= 1
if ennemi.isFalling == False:
ennemi.rect.x -= 10
elif ennemi_hits and self.player.pos.x < ennemi.rect.x and self.shoot:
if ennemi.right :
ennemi.image = ennemi.sprites_walk[2]
elif not ennemi.right:
ennemi.image = ennemi.sprites_walk[3]
self.lastHitTimer_e = pygame.time.get_ticks()
ennemi.life -= 1
if ennemi.isFalling == False:
ennemi.rect.x += 10
for ennemi in self.ennemis:
if (pygame.time.get_ticks() > self.lastHitTimer_e + 500) :
if ennemi_hits and self.shoot and ennemi.type_en == 2:
if ennemi.right :
ennemi.life -= 1
ennemi.image = ennemi.sprites_walk_right[5]
self.lastHitTimer_e = pygame.time.get_ticks()
elif not ennemi.right:
ennemi.image = ennemi.sprites_walk_left[5]
ennemi.life -= 1
self.lastHitTimer_e = pygame.time.get_ticks()
if ennemi.life < 0 :
ennemi.kill()
self.player.score += 1
if len(self.ennemis) == 0:
self.wave += 1
self.new_wave()
if self.shoot and self.looking_at == -1 :
if self.anim_player_attack < 9 :
self.player.image = self.player.sprites_attack[int(self.anim_player_attack)]
self.anim_player_attack = self.anim_player_attack + 0.3
if self.shoot and self.looking_at == 1 :
if self.anim_player_attack < 9 :
self.player.image = self.player.sprites_attack_r[int(self.anim_player_attack)]
self.anim_player_attack = self.anim_player_attack + 0.3
#gestion of shoot or attack
if self.shoot :
if self.looking_at == -1 :
if self.x < self.x_max :
y = (-9.81 /( 2 * (self.v0 * self.v0) * (math.cos(self.alpha) * math.cos(self.alpha)) ) * (self.x*self.x)) + math.tan(self.alpha) * self.x + self.h
self.weapon.rect.midbottom = (self.coord_x , self.h + (self.h - y))
self.x = self.x + 10
self.coord_x = self.coord_x - 20
else:
self.shoot = False
self.x = ( (self.v0*self.v0) * ( math.sin (2*self.alpha)) ) / (2*9.81)
self.coord_x = 0
elif self.looking_at == 1 :
if self.x < self.x_max :
y = (-9.81 /( 2 * (self.v0 * self.v0) * (math.cos(self.alpha) * math.cos(self.alpha)) ) * (self.x*self.x)) + math.tan(self.alpha) * self.x + self.h
self.weapon.rect.midbottom = (self.coord_x , self.h + (self.h - y) )
self.x = self.x + 10
self.coord_x = self.coord_x + 20
else :
self.shoot = False
self.x = ( (self.v0*self.v0) * ( math.sin (2*self.alpha)) ) / (2*9.81)
self.coord_x = 0
else :
#player have weapon
self.weapon.rect.midbottom = (20000,20000)
for ennemi in self.ennemis:
if ennemi.type_en == 2 and pygame.time.get_ticks() > self.last_boss_attack + 5000:
self.last_boss_attack = pygame.time.get_ticks()
self.boss_h = float(ennemi.rect.y) + 30
self.boss_angle = 50.0 # en degres
self.boss_alpha = float(self.boss_angle * 3.14 / 180.0) # conversion en radian
self.boss_v0 = 40.0
self.boss_x_max = self.skull.shoot(self.boss_v0, self.boss_alpha, self.boss_h)
self.boss_coord_x = ennemi.rect.x + 65
self.boss_x = ((self.boss_v0 * 0) * (math.sin(2 * self.boss_alpha))) / (9.81)
self.boss_attack = True
if ennemi.right:
self.boss_looking_at = 1
else:
self.boss_looking_at = -1
if self.boss_attack:
if self.boss_looking_at == -1:
if self.boss_x < self.boss_x_max:
self.boss_y = (-9.81 / (2 * (self.boss_v0 * self.boss_v0) * (math.cos(self.boss_alpha) * math.cos(self.boss_alpha))) * (
self.boss_x * self.boss_x)) + math.tan(
self.boss_alpha) * self.boss_x + self.boss_h
self.skull.rect.midbottom = (self.boss_coord_x, self.boss_h + (self.boss_h - self.boss_y))
self.boss_x = self.boss_x + 10
self.boss_coord_x = self.boss_coord_x - 10
else:
self.boss_attack = False
self.anim_ennemi_attack = 0
self.boss_x = ((self.boss_v0 * self.boss_v0) * (math.sin(2 * self.boss_alpha))) / (2 * 9.81)
self.boss_coord_x = 0
elif self.boss_looking_at == 1:
if self.boss_x < self.boss_x_max:
self.boss_y = (-9.81 / (2 * (self.boss_v0 * self.boss_v0) * (
math.cos(self.boss_alpha) * math.cos(self.boss_alpha))) * (
self.boss_x * self.boss_x)) + math.tan(
self.boss_alpha) * self.boss_x + self.boss_h
self.skull.rect.midbottom = (self.boss_coord_x, self.boss_h + (self.boss_h - self.boss_y))
self.boss_x = self.boss_x + 10
self.boss_coord_x = self.boss_coord_x + 10
else:
self.boss_x = ((self.boss_v0 * self.boss_v0) * (math.sin(2 * self.boss_alpha))) / (2 * 9.81)
self.boss_coord_x = 0
self.boss_attack = False
self.anim_ennemi_attack = 0
else:
# boss have skull
self.skull.rect.midbottom = (20000, 20000)
for ennemi in self.ennemis:
if ennemi.right:
self.boss_looking_at = 1
else:
self.boss_looking_at = -1
if self.boss_attack and self.boss_looking_at == -1 and ennemi.type_en == 2:
if self.anim_ennemi_attack < 7:
ennemi.image = ennemi.sprites_attack_right[int(self.anim_ennemi_attack)]
self.anim_ennemi_attack = self.anim_ennemi_attack + 0.3
if self.boss_attack and self.boss_looking_at == 1 and ennemi.type_en == 2:
if self.anim_ennemi_attack < 7:
ennemi.image = ennemi.sprites_attack_left[int(self.anim_ennemi_attack)]
self.anim_ennemi_attack = self.anim_ennemi_attack + 0.3
ennemi_hit_together = pygame.sprite.collide_rect(self.player, ennemi)
if ennemi_hit_together and ennemi.type_en == 2:
if ennemi.attack_anim_boss < 10 and self.boss_looking_at == -1:
ennemi.image = ennemi.sprites_boss_attack_right[int(ennemi.attack_anim_boss)]
ennemi.attack_anim_boss = ennemi.attack_anim_boss + 0.3
elif ennemi.attack_anim_boss < 10 and self.boss_looking_at == 1:
ennemi.image = ennemi.sprites_boss_attack_left[int(ennemi.attack_anim_boss)]
ennemi.attack_anim_boss = ennemi.attack_anim_boss + 0.3
else:
ennemi.attack_anim_boss = 0
for jar in self.jars:
if pygame.time.get_ticks() > self.last_jar_attack + 3000 :
self.last_jar_attack = pygame.time.get_ticks()
self.jar_h = float(jar.rect.y) + 50
self.jar_angle = random.randint(7.0,11.0) # en degres
self.jar_alpha = float(self.jar_angle * 3.14 / 180.0) #conversion en radian
self.jar_v0 = random.randint(200,280)
self.jar_x_max = self.skull.shoot(self.jar_v0, self.jar_alpha , self.jar_h)
self.jar_coord_x = jar.rect.x + 65
self.jar_x = ( (self.jar_v0*0) * ( math.sin (2*self.jar_alpha)) ) / (9.81)
self.jar_attack = True
if self.jar_attack:
for self.jar in self.jars:
if self.jar_x < self.jar_x_max :
self.jar_y = (-9.81 /( 2 * (self.jar_v0 * self.jar_v0) * (math.cos(self.jar_alpha) * math.cos(self.jar_alpha)) ) * (self.jar_x*self.jar_x)) + math.tan(self.jar_alpha) * self.jar_x + self.jar_h
self.fire.rect.midbottom = (self.jar_coord_x , self.jar_h + (self.jar_h - self.jar_y) )
self.jar_x = self.jar_x + 10
self.jar_coord_x | |
<gh_stars>0
#!/usr/bin/env python3
import sys
import os
import random
import numpy as np
import cv2
from ACT_utils import iou2d
from Dataset import GetDataset
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'python'))
import caffe
distort_params = {
'brightness_prob': 0.5,
'brightness_delta': 32,
'contrast_prob': 0.5,
'contrast_lower': 0.5,
'contrast_upper': 1.5,
'hue_prob': 0.5,
'hue_delta': 18,
'saturation_prob': 0.5,
'saturation_lower': 0.5,
'saturation_upper': 1.5,
'random_order_prob': 0.0,
}
expand_params = {
'expand_prob': 0.5,
'max_expand_ratio': 4.0,
}
batch_samplers = [{
'sampler': {},
'max_trials': 1,
'max_sample': 1,
}, {
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0,},
'sample_constraint': {'min_jaccard_overlap': 0.1, },
'max_trials': 50,
'max_sample': 1,
}, {
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0,},
'sample_constraint': {'min_jaccard_overlap': 0.3,},
'max_trials': 50,
'max_sample': 1,
},{
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0,},
'sample_constraint': {'min_jaccard_overlap': 0.5,},
'max_trials': 50,
'max_sample': 1,
},{
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0,},
'sample_constraint': {'min_jaccard_overlap': 0.7,},
'max_trials': 50,
'max_sample': 1,
},{
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0,},
'sample_constraint': {'min_jaccard_overlap': 0.9,},
'max_trials': 50,
'max_sample': 1,
},{
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0,},
'sample_constraint': {'max_jaccard_overlap': 1.0,},
'max_trials': 50,
'max_sample': 1,
},]
def random_brightness(imglist, brightness_prob, brightness_delta):
if random.random() < brightness_prob:
brig = random.uniform(-brightness_delta, brightness_delta)
for i in range(len(imglist)):
imglist[i] += brig
return imglist
def random_contrast(imglist, contrast_prob, contrast_lower, contrast_upper):
if random.random() < contrast_prob:
cont = random.uniform(contrast_lower, contrast_upper)
for i in range(len(imglist)):
imglist[i] *= cont
return imglist
def random_saturation(imglist, saturation_prob, saturation_lower, saturation_upper):
if random.random() < saturation_prob:
satu = random.uniform(saturation_lower, saturation_upper)
for i in range(len(imglist)):
hsv = cv2.cvtColor(imglist[i], cv2.COLOR_BGR2HSV)
hsv[:, :, 1] *= satu
imglist[i] = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return imglist
def random_hue(imglist, hue_prob, hue_delta):
if random.random() < hue_prob:
hue = random.uniform(-hue_delta, hue_delta)
for i in range(len(imglist)):
hsv = cv2.cvtColor(imglist[i], cv2.COLOR_BGR2HSV)
hsv[:, :, 0] += hue
imglist[i] = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return imglist
def apply_distort(imglist, distort_param):
out_imglist = imglist
if distort_param['random_order_prob'] != 0: raise NotImplementedError
if random.random() > 0.5:
out_imglist = random_brightness(out_imglist, distort_param['brightness_prob'], distort_param['brightness_delta'])
out_imglist = random_contrast(out_imglist, distort_param['contrast_prob'], distort_param['contrast_lower'], distort_param['contrast_upper'])
out_imglist = random_saturation(out_imglist, distort_param['saturation_prob'], distort_param['saturation_lower'], distort_param['saturation_upper'])
out_imglist = random_hue(out_imglist, distort_param['hue_prob'], distort_param['hue_delta'])
else:
out_imglist = random_brightness(out_imglist, distort_param['brightness_prob'], distort_param['brightness_delta'])
out_imglist = random_saturation(out_imglist, distort_param['saturation_prob'], distort_param['saturation_lower'], distort_param['saturation_upper'])
out_imglist = random_hue(out_imglist, distort_param['hue_prob'], distort_param['hue_delta'])
out_imglist = random_contrast(out_imglist, distort_param['contrast_prob'], distort_param['contrast_lower'], distort_param['contrast_upper'])
return out_imglist
def apply_expand(imglist, tubes, expand_param, mean_values=None):
# Tubes: dict of label -> list of tubes with tubes being <x1> <y1> <x2> <y2>
out_imglist = imglist
out_tubes = tubes
if random.random() < expand_param['expand_prob']:
expand_ratio = random.uniform(1, expand_param['max_expand_ratio'])
oh,ow = imglist[0].shape[:2]
h = int(oh * expand_ratio)
w = int(ow * expand_ratio)
out_imglist = [np.zeros((h, w, 3), dtype=np.float32) for i in range(len(imglist))]
h_off = int(np.floor(h - oh))
w_off = int(np.floor(w - ow))
if mean_values is not None:
for i in range(len(imglist)):
out_imglist[i] += np.array(mean_values).reshape(1, 1, 3)
for i in range(len(imglist)):
out_imglist[i][h_off:h_off+oh, w_off:w_off+ow, :] = imglist[i]
# project boxes
for ilabel in tubes:
for itube in range(len(tubes[ilabel])):
out_tubes[ilabel][itube] += np.array([[w_off, h_off, w_off, h_off]], dtype=np.float32)
return out_imglist, out_tubes
def sample_cuboids(tubes, batch_samplers, imheight, imwidth):
sampled_cuboids = []
for batch_sampler in batch_samplers:
max_trials = batch_sampler['max_trials']
max_sample = batch_sampler['max_sample']
itrial = 0
isample = 0
sampler = batch_sampler['sampler']
min_scale = sampler['min_scale'] if 'min_scale' in sampler else 1
max_scale = sampler['max_scale'] if 'max_scale' in sampler else 1
min_aspect = sampler['min_aspect_ratio'] if 'min_aspect_ratio' in sampler else 1
max_aspect = sampler['max_aspect_ratio'] if 'max_aspect_ratio' in sampler else 1
while itrial < max_trials and isample < max_sample:
# sample a normalized box
scale = random.uniform(min_scale, max_scale)
aspect = random.uniform(min_aspect, max_aspect)
width = scale * np.sqrt(aspect)
height = scale / np.sqrt(aspect)
x = random.uniform(0, 1 - width)
y = random.uniform(0, 1 - height)
# rescale the box
sampled_cuboid = np.array([x*imwidth, y*imheight, (x+width)*imwidth, (y+height)*imheight], dtype=np.float32)
# check constraint
itrial += 1
if not 'sample_constraint' in batch_sampler:
sampled_cuboids.append(sampled_cuboid)
isample += 1
continue
constraints = batch_sampler['sample_constraint']
ious = np.array([np.mean(iou2d(t, sampled_cuboid)) for t in sum(tubes.values(),[])])
if ious.size == 0: # empty gt
isample += 1
continue
if 'min_jaccard_overlap' in constraints and ious.max() >= constraints['min_jaccard_overlap']:
sampled_cuboids.append( sampled_cuboid )
isample += 1
continue
if 'max_jaccard_overlap' in constraints and ious.min() >= constraints['max_jaccard_overlap']:
sampled_cuboids.append( sampled_cuboid )
isample += 1
continue
return sampled_cuboids
def crop_image(imglist, tubes, batch_samplers):
candidate_cuboids = sample_cuboids(tubes, batch_samplers, imglist[0].shape[0], imglist[0].shape[1])
if not candidate_cuboids:
return imglist, tubes
crop_cuboid = random.choice(candidate_cuboids)
x1, y1, x2, y2 = map(int, crop_cuboid.tolist())
for i in range(len(imglist)):
imglist[i] = imglist[i][y1:y2+1, x1:x2+1, :]
out_tubes = {}
wi = x2 - x1
hi = y2 - y1
for ilabel in tubes:
for itube in range(len(tubes[ilabel])):
t = tubes[ilabel][itube]
t -= np.array([[x1, y1, x1, y1]], dtype=np.float32)
# check if valid
cx = 0.5 * (t[:, 0] + t[:, 2])
cy = 0.5 * (t[:, 1] + t[:, 3])
if np.any(cx < 0) or np.any(cy < 0) or np.any(cx > wi) or np.any(cy > hi):
continue
if not ilabel in out_tubes:
out_tubes[ilabel] = []
# clip box
t[:, 0] = np.maximum(0, t[:, 0])
t[:, 1] = np.maximum(0, t[:, 1])
t[:, 2] = np.minimum(wi, t[:, 2])
t[:, 3] = np.minimum(hi, t[:, 3])
out_tubes[ilabel].append(t)
return imglist, out_tubes
# Assisting function for finding a good/bad tubelet
def tubelet_in_tube(tube, i, K):
# True if all frames from i to (i + K - 1) are inside tube
# it's sufficient to just check the first and last frame.
return (i in tube[: ,0] and i + K - 1 in tube[:, 0])
def tubelet_out_tube(tube, i, K):
# True if all frames between i and (i + K - 1) are outside of tube
return all([not j in tube[:, 0] for j in range(i, i + K)])
def tubelet_in_out_tubes(tube_list, i, K):
# Given a list of tubes: tube_list, return True if
# all frames from i to (i + K - 1) are either inside (tubelet_in_tube)
# or outside (tubelet_out_tube) the tubes.
return all([tubelet_in_tube(tube, i, K) or tubelet_out_tube(tube, i, K) for tube in tube_list])
def tubelet_has_gt(tube_list, i, K):
# Given a list of tubes: tube_list, return True if
# the tubelet starting spanning from [i to (i + K - 1)]
# is inside (tubelet_in_tube) at least a tube in tube_list.
return any([tubelet_in_tube(tube, i, K) for tube in tube_list])
class MultiframesLayer(caffe.Layer):
def shuffle(self): # shuffle the list of possible starting frames
self._order = list(range(self._nseqs))
if self._shuffle:
# set seed like that to have exactly the same shuffle even if we restart from a caffemodel
random.seed(self._rand_seed + self._nshuffles)
random.shuffle(self._order)
self._nshuffles += 1
self._next = 0
def setup(self, bottom, top):
layer_params = eval(self.param_str)
assert 'dataset_name' in layer_params
dataset_name = layer_params['dataset_name']
self._dataset = GetDataset(dataset_name)
assert 'K' in layer_params
self._K = layer_params['K']
assert self._K > 0
# parse optional argument
default_values = {
'rand_seed': 0,
'shuffle': True,
'batch_size': 32 // self._K,
'mean_values': [104, 117, 123],
'resize_height': 300,
'resize_width': 300,
'restart_iter': 0,
'flow': False,
'ninput': 1,
}
for k in default_values.keys():
if k in layer_params:
lay_param = layer_params[k]
else:
lay_param = default_values[k]
setattr(self, '_' + k, lay_param)
if not self._flow and self._ninput > 1:
raise NotImplementedError("ACT-detector: Not implemented: ninput > 1 with rgb frames")
d = self._dataset
K = self._K
# build index (v,i) of valid starting chunk
self._indices = []
for v in d.train_vlist():
vtubes = sum(d.gttubes(v).values(), [])
self._indices += [(v,i) for i in range(1, d.nframes(v)+2-K) if tubelet_in_out_tubes(vtubes,i,K) and tubelet_has_gt(vtubes,i,K)]
# self._indices += [(v,i) for i in range(1, d.nframes(v)+2-K) if all([ (i in t[:,0] and i+K-1 in t[:,0]) or all([not j in t[:,0] for j in xrange(i,i+K)]) for t in vtubes]) and any([ (i in t[:,0] and i+K-1 in t[:,0]) for t in vtubes]) ]
self._nseqs = len(self._indices)
self._iter = 0
self._nshuffles = 0
self.shuffle()
if self._restart_iter > 0:
assert self._next == 0
self._iter = self._restart_iter
iimages = self._restart_iter * self._batch_size
while iimages > self._nseqs:
self.shuffle()
iimages -= self._nseqs
self._next = iimages
for i in range(K):
top[i].reshape(self._batch_size, 3 * self._ninput, self._resize_height, self._resize_width)
top[K].reshape(1, 1, 1, 8)
def prepare_blob(self):
d = self._dataset
K = self._K
# Have the same data augmentation, even if restarted
random.seed(self._rand_seed + self._iter)
data = [np.empty((self._batch_size, 3 * self._ninput, self._resize_height, self._resize_width), dtype=np.float32) for ii in range(K)]
alltubes = []
for i in range(self._batch_size):
if self._next == self._nseqs:
self.shuffle()
v,frame = self._indices[self._order[self._next]]
# flipping with probability 0.5
do_mirror = random.getrandbits(1) == 1
# load images and tubes and apply mirror
| |
__init__(self, i):
self.count = 0
self.iter = zip(itertools.count(1), i)
def __iter__(self):
return self
def __next__(self):
self.count, result = next(self.iter)
return result
def empty():
"""
An empty iterator.
"""
return iter(tuple())
def is_empty(iterable):
"""
Return whether the iterable is empty or not. Consumes at most one item
from the iterator to test.
>>> is_empty(iter(range(0)))
True
>>> is_empty(iter(range(1)))
False
"""
try:
next(iter(iterable))
except StopIteration:
return True
return False
class Reusable:
"""
An iterator that may be reset and reused.
>>> ri = Reusable(range(3))
>>> tuple(ri)
(0, 1, 2)
>>> next(ri)
0
>>> tuple(ri)
(1, 2)
>>> next(ri)
0
>>> ri.reset()
>>> tuple(ri)
(0, 1, 2)
"""
def __init__(self, iterable):
self.__saved = iterable
self.reset()
def __iter__(self):
return self
def reset(self):
"""
Resets the iterator to the start.
Any remaining values in the current iteration are discarded.
"""
self.__iterator, self.__saved = itertools.tee(self.__saved)
def __next__(self):
try:
return next(self.__iterator)
except StopIteration:
# we're still going to raise the exception, but first
# reset the iterator so it's good for next time
self.reset()
raise
def every_other(iterable):
"""
Yield every other item from the iterable
>>> ' '.join(every_other('abcdefg'))
'a c e g'
"""
items = iter(iterable)
while True:
try:
yield next(items)
next(items)
except StopIteration:
return
def remove_duplicates(iterable, key=None):
"""
Given an iterable with items that may come in as sequential duplicates,
remove those duplicates.
Unlike unique_justseen, this function does not remove triplicates.
>>> ' '.join(remove_duplicates('abcaabbccaaabbbcccbcbc'))
'a b c a b c a a b b c c b c b c'
>>> ' '.join(remove_duplicates('aaaabbbbb'))
'a a b b b'
"""
return itertools.chain.from_iterable(
map(every_other, map(operator.itemgetter(1), itertools.groupby(iterable, key)))
)
def skip_first(iterable):
"""
Skip the first element of an iterable
>>> tuple(skip_first(range(10)))
(1, 2, 3, 4, 5, 6, 7, 8, 9)
"""
return itertools.islice(iterable, 1, None)
def peek(iterable):
"""
Get the next value from an iterable, but also return an iterable
that will subsequently return that value and the rest of the
original iterable.
>>> l = iter([1,2,3])
>>> val, l = peek(l)
>>> val
1
>>> list(l)
[1, 2, 3]
"""
peeker, original = itertools.tee(iterable)
return next(peeker), original
class Peekable:
"""
Wrapper for a traditional iterable to give it a peek attribute.
>>> nums = Peekable(range(2))
>>> nums.peek()
0
>>> nums.peek()
0
>>> next(nums)
0
>>> nums.peek()
1
>>> next(nums)
1
>>> nums.peek()
Traceback (most recent call last):
...
StopIteration
Peekable should accept an iterable and not just an iterator.
>>> list(Peekable(range(2)))
[0, 1]
"""
def __new__(cls, iterator):
# if the iterator is already 'peekable', return it; otherwise
# wrap it
if hasattr(iterator, 'peek'):
return iterator
else:
return object.__new__(cls)
def __init__(self, iterator):
self.iterator = iter(iterator)
def __iter__(self):
return self
def __next__(self):
return next(self.iterator)
def peek(self):
result, self.iterator = peek(self.iterator)
return result
def takewhile_peek(predicate, iterable):
"""
Like takewhile, but takes a peekable iterable and doesn't
consume the non-matching item.
>>> items = Peekable(range(10))
>>> is_small = lambda n: n < 4
>>> small_items = takewhile_peek(is_small, items)
>>> list(small_items)
[0, 1, 2, 3]
>>> list(items)
[4, 5, 6, 7, 8, 9]
>>> empty = takewhile_peek(is_small, Peekable([]))
>>> list(empty)
[]
>>> items = Peekable([3])
>>> small_items = takewhile_peek(is_small, items)
>>> list(small_items)
[3]
>>> list(items)
[]
>>> items = Peekable([4])
>>> small_items = takewhile_peek(is_small, items)
>>> list(small_items)
[]
>>> list(items)
[4]
"""
while True:
try:
if not predicate(iterable.peek()):
break
yield next(iterable)
except StopIteration:
break
def first(iterable, *args):
"""
Return the first item from the iterable.
>>> first(range(11))
0
>>> first([3,2,1])
3
>>> iter = range(11)
>>> first(iter)
0
Raises StopIteration if no value is present.
>>> first([])
Traceback (most recent call last):
...
StopIteration
Pass a default to be used when iterable is empty.
>>> first([], None)
"""
iterable = iter(iterable)
return next(iterable, *args)
def last(iterable):
"""
Return the last item from the iterable, discarding the rest.
>>> last(range(20))
19
>>> last([])
Traceback (most recent call last):
...
ValueError: Iterable contains no items
"""
for item in iterable:
pass
try:
return item
except NameError:
raise ValueError("Iterable contains no items")
def one(item):
"""
Return the first element from the iterable, but raise an exception
if elements remain in the iterable after the first.
>>> one(['val'])
'val'
>>> one(['val', 'other'])
Traceback (most recent call last):
...
ValueError: ...values to unpack...
>>> one([])
Traceback (most recent call last):
...
ValueError: ...values to unpack...
>>> numbers = itertools.count()
>>> one(numbers)
Traceback (most recent call last):
...
ValueError: ...values to unpack...
>>> next(numbers)
2
"""
(result,) = item
return result
def nwise(iter, n):
"""
Like pairwise, except returns n-tuples of adjacent items.
s -> (s0,s1,...,sn), (s1,s2,...,s(n+1)), ...
"""
iterset = [iter]
while len(iterset) < n:
iterset[-1:] = itertools.tee(iterset[-1])
next(iterset[-1], None)
return zip(*iterset)
def window(iter, pre_size=1, post_size=1):
"""
Given an iterable, return a new iterable which yields triples of
(pre, item, post), where pre and post are the items preceeding and
following the item (or None if no such item is appropriate). pre
and post will always be pre_size and post_size in length.
>>> example = window(range(10), pre_size=2)
>>> pre, item, post = next(example)
>>> pre
(None, None)
>>> post
(1,)
>>> next(example)
((None, 0), 1, (2,))
>>> list(example)[-1]
((7, 8), 9, (None,))
"""
pre_iter, iter = itertools.tee(iter)
pre_iter = itertools.chain((None,) * pre_size, pre_iter)
pre_iter = nwise(pre_iter, pre_size)
post_iter, iter = itertools.tee(iter)
post_iter = itertools.chain(post_iter, (None,) * post_size)
post_iter = nwise(post_iter, post_size)
next(post_iter, None)
return zip(pre_iter, iter, post_iter)
class IterSaver:
def __init__(self, n, iterable):
self.n = n
self.iterable = iterable
self.buffer = collections.deque()
def __next__(self):
while len(self.buffer) <= self.n:
self.buffer.append(next(self.iterable))
return self.buffer.popleft()
def partition_items(count, bin_size):
"""
Given the total number of items, determine the number of items that
can be added to each bin with a limit on the bin size.
So if you want to partition 11 items into groups of 3, you'll want
three of three and one of two.
>>> partition_items(11, 3)
[3, 3, 3, 2]
But if you only have ten items, you'll have two groups of three and
two of two.
>>> partition_items(10, 3)
[3, 3, 2, 2]
"""
num_bins = int(math.ceil(count / float(bin_size)))
bins = [0] * num_bins
for i in range(count):
bins[i % num_bins] += 1
return bins
def balanced_rows(n, iterable, fillvalue=None):
"""
Like grouper, but balance the rows to minimize fill per row.
balanced_rows(3, 'ABCDEFG', 'x') --> ABC DEx FGx"
"""
iterable, iterable_copy = itertools.tee(iterable)
count = len(tuple(iterable_copy))
for allocation in partition_items(count, n):
row = itertools.islice(iterable, allocation)
if allocation < n:
row = itertools.chain(row, [fillvalue])
yield tuple(row)
def reverse_lists(lists):
"""
>>> reverse_lists([[1,2,3], [4,5,6]])
[[3, 2, 1], [6, 5, 4]]
"""
return list(map(list, map(reversed, lists)))
def always_iterable(item):
"""
Given an object, always return an iterable. If the item is not
already iterable, return a tuple containing only the item. If item is
None, an empty iterable is returned.
>>> always_iterable([1,2,3])
<list_iterator...>
>>> always_iterable('foo')
<tuple_iterator...>
>>> always_iterable(None)
<tuple_iterator...>
>>> always_iterable(range(10))
<range_iterator...>
>>> def _test_func(): yield "I'm iterable"
>>> print(next(always_iterable(_test_func())))
I'm iterable
Although mappings are iterable, treat each like a singleton, as
it's more like an object than a sequence.
>>> next(always_iterable(dict(a=1)))
{'a': 1}
"""
base_types = str, bytes, collections.abc.Mapping
return more_itertools.always_iterable(item, base_type=base_types)
def suppress_exceptions(callables, *exceptions):
"""
Call each callable in callables, suppressing any exceptions supplied. If
no exception classes are supplied, all Exceptions will be suppressed.
>>> import functools
>>> c1 = functools.partial(int, 'a')
>>> c2 = functools.partial(int, '10')
>>> list(suppress_exceptions((c1, c2)))
[10]
>>> list(suppress_exceptions((c1, c2), KeyError))
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'a'
"""
if not exceptions:
exceptions = (Exception,)
for callable in callables:
try:
yield callable()
except exceptions:
pass
def apply(func, iterable):
"""
Like 'map', invoking func on each item in the iterable,
except return the original item and not the return
value from the function.
Useful when the side-effect of the func is what's desired.
>>> res = apply(print, range(1, 4))
>>> list(res)
1
2
3
[1, 2, 3]
"""
for item in iterable:
func(item)
yield item
def list_or_single(iterable):
"""
Given an iterable, return the items as a list. If the iterable contains
exactly one | |
Associate ( self ) :
seg = self.CurrentSegmentation ()
if seg :
debug(" - seg: ", seg.name)
if self.chosen_map :
debug(" - map: " + self.chosen_map.name)
seg.set_volume_data ( self.chosen_map )
self.status ( "Map %s is now associated with %s" % (self.chosen_map.name, seg.name) )
else :
self.status ( "No map selected" )
def SmoothAndGroup ( self, smod, task = None ) :
numit = self._num_steps.value
sdev = self._step_size.value
targNRegs = self._target_num_regions.value
csyms, sym_err = self.GetUseSymmetry ()
if sym_err :
self.status ( sym_err )
return
if targNRegs <= 0 :
self.status ( "# of regions" )
return
smod.smooth_and_group(numit, sdev, targNRegs, csyms, task)
self.ReportRegionCount(smod)
def GroupByCons ( self, smod, task = None ) :
numit = self._num_steps_con.value
targNRegs = self._target_num_regions_con.value
csyms, sym_err = self.GetUseSymmetry ()
if sym_err :
self.status ( sym_err )
return
if targNRegs <= 0 :
self.status ( "Enter an integer > 0 for target # of regions" )
return
debug(" - grouping %d steps, target %d" % (numit, targNRegs))
#smod.smooth_and_group(numit, sdev, targNRegs, csyms, task)
smod.group_connected_n ( numit, targNRegs, None, csyms, task )
self.ReportRegionCount(smod)
def GroupByConsOneStep ( self, task = None ) :
smod = self.CurrentSegmentation()
if smod is None:
return
if smod.volume_data() is None:
self.status ('Segmentation map not opened')
return
if len(smod.regions) <= 1:
self.status ('%s has %d regions' % (smod.name, len(smod.regions)))
return
csyms, sym_err = self.GetUseSymmetry ()
if sym_err :
self.status ( sym_err )
return
regions = None
if self._group_by_con_only_visible.enabled :
regions = smod.visible_regions()
if len(regions) == 0 :
self.status ("Grouping by connections: no visible regions found or they are from a different model" )
return
self.status ("Grouping by connections: applying only to %d regions visible" % len(regions) )
newRegs, removedRegs = smod.group_connected_n ( 1, 1, regions, csyms )
#self.RegsDispUpdate ( ) # Display region surfaces
for r in newRegs : r.make_surface (None, None, smod.regions_scale)
for r in removedRegs : r.remove_surface()
self.ReportRegionCount(smod)
if smod.adj_graph :
graph.create_graph ( smod, smod.graph_links )
self.status ( "Got %d regions after grouping by connections" % (len(smod.regions)) )
def SmoothAndGroupOneStep ( self ) :
smod = self.CurrentSegmentation()
if smod is None:
return
if smod.volume_data() is None:
self.status ('Segmentation map not opened')
return
if len(smod.regions) <= 1:
self.status ('%s has %d regions' % (smod.name, len(smod.regions)))
return
step = self._step_size.value
sdev = step + smod.smoothing_level
self.status ( "Smoothing and grouping, standard deviation %.3g voxels" % sdev)
csyms, sym_err = self.GetUseSymmetry ()
if sym_err :
self.status ( sym_err )
return
while 1:
new_regs = len(smod.smooth_and_group(1, sdev, 1, csyms))
# if symmetry is being used we should stop after one step
# since symmetry can block regions from joining indefinitely
if csyms or new_regs > 0 : break
self.status ('No new groups smoothing %.3g voxels' % sdev)
sdev += step
self.RegsDispUpdate ( ) # Display region surfaces
self.ReportRegionCount(smod)
if smod.adj_graph :
graph.create_graph ( smod, smod.graph_links )
self.status ( "Got %d regions after smoothing %.3g voxels." %
(len(smod.regions), sdev) )
def Overlapping ( self ) :
dmap = self.SegmentationMap()
if dmap == None :
self.status ( "No map selected" )
return
smod = self.CurrentSegmentation()
if smod == None :
self.status ( "No segmentation selected" )
return
if len(smod.regions) == 0 :
self.status ( "No regions found in %s" % smod.name )
return
from chimerax.atomic import selected_atoms
selatoms = selected_atoms(self.session)
spoints = None
if len ( selatoms ) > 0 :
spoints = selatoms.scene_coords
else :
from chimerax.map import Volume
mods = [v for v in self.session.selection.models() if isinstance(v, Volume)]
if len(mods) == 1 :
mod = mods[0]
debug("Using for selection:", mod.name)
from . import axes
spoints, weights = axes.map_points ( mod, True )
debug(" - map - got %d points in contour" % len (spoints))
mod.scene_position.transform_points( spoints, in_place = True )
else :
self.status ("0 or more than 1 volume model selected")
return
simap = self.PointIndexesInMap ( spoints, dmap )
self.status ( "Overlapping %d atoms with %d regions" % (
len(selatoms), len(smod.regions) ) )
#ovp = float ( self.overlappingPercentage.get() )
ovp = 50.0
ovRatio = ovp / 100.0
debug(" - overlap ratio: %f" % ovRatio)
oregs = []
for ri, r in enumerate ( smod.regions ) :
ipoints = r.points()
noverlap = 0
for i,j,k in ipoints :
try : simap[i][j][k]
except: continue
noverlap += 1
ov = float ( noverlap ) / float ( len(ipoints) )
if ov > ovRatio : oregs.append ( r )
#if noverlap > 0 : oregs.append ( r )
regions.select_regions ( oregs )
self.status ( "Selected %d regions" % ( len(oregs) ) )
def GroupUsingFits ( self ) :
dmap = self.SegmentationMap()
if dmap == None : debug("Map %s not open" % self.map_name); return
smod = self.CurrentSegmentation()
if smod == None : return
if len(smod.regions) == 0 : debug("No regions in", smod.name); return
try : dmap.fitted_mols
except : dmap.fitted_mols = []
if len(dmap.fitted_mols) == 0 : debug("No fits found for", dmap.name); return
debug("Grouping %d regions by overlap to %d fitted structures" % (
len(smod.regions), len(dmap.fitted_mols) ))
dmap.chain_maps = []
for mol in dmap.fitted_mols :
try : mol.fmap.imap
except : mol.fmap.imap = self.MapIndexesInMap ( dmap, mol.fmap )
from random import random as rand
mol.fmap.surf_color = ( rand(), rand(), rand(), 1 )
dmap.chain_maps.append ( mol.fmap )
# self.SegAccuracy ( "_fits_acc", True )
def RegSurfsShowNone ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
for reg in smod.regions :
if reg.surface_piece:
reg.surface_piece.display = False
def RegSurfsShowAll ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
self.RegsDispUpdate()
def RegSurfsShowOnlySelected ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
regions.show_only_regions(smod.selected_regions())
def RegSurfsHide ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
sregs = smod.selected_regions()
#if len(sregs) == 0 : sregs = smod.all_regions()
for r in sregs : r.hide_surface()
def RegSurfsShow ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
sregs = smod.selected_regions()
#if len(sregs) == 0 : sregs = smod.all_regions()
for r in sregs : r.show_surface()
def RegSurfsShowAdjacent ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
sregs = smod.selected_regions()
if len(sregs) == 0 :
return
cr = set()
for r in sregs :
cr.update(r.contacting_regions())
self.status ( "Region has %d adjacent regions" % len(cr) )
for r in cr :
r.show_surface()
def RegSurfsShowNotGrouped ( self ) :
debug("Showing not-grouped regions...")
smod = self.CurrentSegmentation()
if smod == None : return
for reg in smod.regions :
if len(reg.cregs) == 0 :
if reg.surface_piece:
reg.surface_piece.display = True
else :
if reg.surface_piece:
reg.surface_piece.display = False
def SelectGrouped ( self ) :
debug("Selecting grouped regions...")
smod = self.CurrentSegmentation()
if smod == None : return
smod.select_regions([reg for reg in smod.regions if len(reg.cregs) > 0], only = True)
def SelectNotGrouped ( self ) :
debug("Showing not-grouped regions...")
smod = self.CurrentSegmentation()
if smod == None : return
smod.select_regions([reg for reg in smod.regions if len(reg.cregs) == 0], only = True)
def RegSurfsShowGrouped ( self ) :
debug("Showing grouped regions...")
smod = self.CurrentSegmentation()
if smod == None : return
sregs = smod.grouped_regions()
if len(sregs) == 0 :
self.status ( "No grouped regions" )
return
self.status ( "Showing %d grouped regions" % len(sregs) )
regions.show_only_regions(sregs)
def RegSurfsTransparent ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
sregs = smod.selected_regions()
if len(sregs) == 0 : sregs = smod.all_regions()
for r in sregs :
if r.has_surface():
cr,cg,cb = r.surface_piece.color[:3] #r.color[:3]
r.surface_piece.color = ( cr, cg, cb, int(255*REG_OPACITY) )
r.surface_piece.display_style = r.surface_piece.Solid
def RegSurfsOpaque ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
sregs = smod.selected_regions()
if len(sregs) == 0 : sregs = smod.all_regions()
for r in sregs :
if r.has_surface():
cr,cg,cb = r.surface_piece.color[:3] #r.color[:3]
r.surface_piece.color = ( cr, cg, cb, 255 )
r.surface_piece.display_style = r.surface_piece.Solid
def RegSurfsMesh ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
sregs = smod.selected_regions()
if len(sregs) == 0 : sregs = | |
import json
import logging
from copy import deepcopy
from collections import Counter
from os import getenv
import numpy as np
import sentry_sdk
from nltk.tokenize import sent_tokenize
from common.greeting import greeting_spec
from common.link import skills_phrases_map
from common.constants import CAN_CONTINUE_PROMPT, CAN_CONTINUE_SCENARIO, MUST_CONTINUE, CAN_NOT_CONTINUE
from common.sensitive import is_sensitive_situation
from common.universal_templates import (
if_chat_about_particular_topic,
is_switch_topic,
is_any_question_sentence_in_utterance,
if_not_want_to_chat_about_particular_topic,
if_choose_topic,
)
from common.utils import (
get_intent_name,
get_intents,
get_topics,
get_entities,
get_common_tokens_in_lists_of_strings,
is_no,
)
from utils import (
calculate_single_convers_evaluator_score,
CONV_EVAL_STRENGTH,
CONFIDENCE_STRENGTH,
how_are_you_spec,
what_i_can_do_spec,
misheard_with_spec1,
psycho_help_spec,
misheard_with_spec2,
alexa_abilities_spec,
join_used_links_in_attributes,
get_updated_disliked_skills,
)
from common.response_selection import (
ACTIVE_SKILLS,
ALMOST_ACTIVE_SKILLS,
CAN_NOT_BE_DISLIKED_SKILLS,
NOT_ADD_PROMPT_SKILLS,
)
sentry_sdk.init(getenv("SENTRY_DSN"))
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.DEBUG)
logger = logging.getLogger(__name__)
force_intents_fname = "force_intents_intent_catcher.json"
FORCE_INTENTS_IC = json.load(open(force_intents_fname))
lets_chat_about_triggers_fname = "lets_chat_about_triggers.json"
LETS_CHAT_ABOUT_PARTICULAR_TOPICS = json.load(open(lets_chat_about_triggers_fname))
require_action_intents_fname = "require_action_intents.json"
REQUIRE_ACTION_INTENTS = json.load(open(require_action_intents_fname))
PROMPT_PROBA = 0.3
ACKNOWLEDGEMENT_PROBA = 0.5
LINK_TO_PHRASES = sum([list(list_el) for list_el in skills_phrases_map.values()], [])
# this is a list of skills which are not one-lines
GENERAL_TOPICS = ["Phatic", "Other"]
def categorize_candidate(
cand_id,
skill_name,
categorized_hyps,
categorized_prompts,
_is_just_prompt,
_is_active_skill,
_can_continue,
_same_topic_entity,
_is_dialog_abandon,
_is_required_da=False,
):
"""Hypotheses could be:
- active or not
- can continue tag (not bool) out of CAN_CONTINUE_PROMPT, CAN_CONTINUE_SCENARIO,
MUST_CONTINUE, CAN_NOT_CONTINUE
- if has at least one of the same topic/named entity/noun phrase
- is dialog breakdown or not
- if contains required dialog act (required by user)
Categories in priority order:
= in terms of appropriateness when required dialog act:
- reqda_same_topic_entity_no_db
- reqda_same_topic_entity_db
- reqda_othr_topic_entity_no_db
- reqda_othr_topic_entity_db
- else: TODO: here should be grounding skill with sorry about can not answer your request!
- same_topic_entity_no_db
- same_topic_entity_db
- othr_topic_entity_no_db
- othr_topic_entity_db
= in terms of continuation of script:
- active: active or not
- continued: must_continue, can_continue_scenario, can_continue_scenario_done, can_not_continue
- finished: one liners and finished scripts (can_not_continue)
= in terms of appropriateness (user initiated switching topic and general case):
- same_topic_entity_no_db
- same_topic_entity_db
- othr_topic_entity_no_db
- othr_topic_entity_db
"""
if (_can_continue == MUST_CONTINUE) or _is_active_skill:
# so, scripted skills with CAN_CONTINUE_PROMPT status are not considered as active!
# this is a chance for other skills to be turned on
actsuffix = "active"
elif _can_continue in [CAN_CONTINUE_SCENARIO, CAN_CONTINUE_PROMPT]:
actsuffix = "continued"
else:
actsuffix = "finished"
if _same_topic_entity and not _is_dialog_abandon:
# have at least one the same entity/topic/nounphrase, and NO dialog breakdown
suffix = "same_topic_entity_no_db"
elif _same_topic_entity and _is_dialog_abandon:
# have at least one the same entity/topic/nounphrase, and dialog breakdown
suffix = "same_topic_entity_db"
elif not _is_dialog_abandon:
# no same entity/topic/nounphrase, and NO dialog breakdown
suffix = "othr_topic_entity_no_db"
else:
# no same entity/topic/nounphrase, and dialog breakdown
suffix = "othr_topic_entity_db"
if _is_required_da:
dasuffix = "reqda"
else:
dasuffix = ""
categorized_hyps[f"{actsuffix}_{suffix}_{dasuffix}"] += [cand_id]
if _is_just_prompt:
categorized_prompts[f"{actsuffix}_{suffix}_{dasuffix}"] += [cand_id]
return categorized_hyps, categorized_prompts
def choose_best_with_scores(curr_cands_ids, curr_single_scores, candidates, bot_utterances):
for i, cand_id in enumerate(curr_cands_ids):
if candidates[cand_id]["skill_name"] in [
"dummy_skill",
"convert_reddit",
"alice",
"eliza",
"tdidf_retrieval",
"program_y",
]:
if "question" in candidates[cand_id].get("type", "") or "?" in candidates[cand_id]["text"]:
penalty_start_utt = 1
if candidates[cand_id]["skill_name"] == "program_y":
penalty_start_utt = 4
n_questions = 0
if len(bot_utterances) >= penalty_start_utt and "?" in bot_utterances[-1]:
curr_single_scores[i] /= 1.5
n_questions += 1
if len(bot_utterances) >= penalty_start_utt + 1 and "?" in bot_utterances[-2]:
curr_single_scores[i] /= 1.1
n_questions += 1
if n_questions == 2:
# two subsequent questions (1 / (1.5 * 1.1 * 1.2) = ~0.5)
curr_single_scores[i] /= 1.2
curr_scores = [curr_single_scores[i] for i in curr_cands_ids]
best_id = np.argmax(curr_scores)
return curr_cands_ids[best_id]
def get_main_info_annotations(annotated_utterance):
intents = get_intents(annotated_utterance, which="all")
topics = get_topics(annotated_utterance, which="all")
named_entities = [ent[0]["text"] for ent in annotated_utterance.get("annotations", {}).get("ner", []) if ent]
nounphrases = [
nounph for nounph in annotated_utterance.get("annotations", {}).get("spacy_nounphrases", []) if nounph
]
return intents, topics, named_entities, nounphrases
def pickup_best_id(categorized, candidates, curr_single_scores, bot_utterances):
"""Choose best hypotheses or prompt using priorities:
- containing required dialog act, not containing required dialog act [second case also if
user does not require particular dialog act];
- active, continued, finished;
- containing same topic/entity without dialog breakdown, containing same topic/entity with dialog breakdown,
containing other topic/entity without dialog breakdown, containing other topic/entity with dialog breakdown.
"""
best_cand_id = 0
for dasuffix in ["reqda", ""]:
# firstly, consider ACTIVE SKILL
for actsuffix in ["active"]:
for suffix in [
"same_topic_entity_no_db",
"same_topic_entity_db",
"othr_topic_entity_no_db",
"othr_topic_entity_db",
]:
if len(categorized[f"{actsuffix}_{suffix}_{dasuffix}"]) > 0:
best_cand_id = choose_best_with_scores(
categorized[f"{actsuffix}_{suffix}_{dasuffix}"], curr_single_scores, candidates, bot_utterances
)
logger.info(f"==========Found {actsuffix}_{suffix}_{dasuffix} hyp: {candidates[best_cand_id]}")
return best_cand_id
# secondly, consider all skills with the same topic/entities, priority those who can continue
for actsuffix in ["continued", "finished"]:
for suffix in ["same_topic_entity_no_db", "same_topic_entity_db"]:
if len(categorized[f"{actsuffix}_{suffix}_{dasuffix}"]) > 0:
best_cand_id = choose_best_with_scores(
categorized[f"{actsuffix}_{suffix}_{dasuffix}"], curr_single_scores, candidates, bot_utterances
)
logger.info(f"==========Found {actsuffix}_{suffix}_{dasuffix} hyp: {candidates[best_cand_id]}")
return best_cand_id
# thirdly, consider all skills with other topic/entities, priority those who can continue
for actsuffix in ["continued", "finished"]:
for suffix in ["othr_topic_entity_no_db", "othr_topic_entity_db"]:
if len(categorized[f"{actsuffix}_{suffix}_{dasuffix}"]) > 0:
best_cand_id = choose_best_with_scores(
categorized[f"{actsuffix}_{suffix}_{dasuffix}"], curr_single_scores, candidates, bot_utterances
)
logger.info(f"==========Found {actsuffix}_{suffix}_{dasuffix} hyp: {candidates[best_cand_id]}")
return best_cand_id
return best_cand_id
def prompt_decision():
if np.random.uniform() < PROMPT_PROBA:
return True
return False
def acknowledgement_decision(all_user_intents):
_is_user_opinion = "opinion" in all_user_intents
if (_is_user_opinion and np.random.uniform() < ACKNOWLEDGEMENT_PROBA) or (
not _is_user_opinion and np.random.uniform() < ACKNOWLEDGEMENT_PROBA / 5
):
return True
return False
def compute_curr_single_scores(candidates, scores, confidences):
curr_single_scores = []
if all(["hypothesis_scorer" in cand["annotations"] for cand in candidates]):
for i in range(len(candidates)):
curr_single_scores.append(candidates[i]["annotations"]["hypothesis_scorer"])
else:
for i in range(len(scores)):
cand_scores = scores[i]
confidence = confidences[i]
skill_name = candidates[i]["skill_name"]
score_conv_eval = calculate_single_convers_evaluator_score(cand_scores)
score = CONV_EVAL_STRENGTH * score_conv_eval + CONFIDENCE_STRENGTH * confidence
toxicity = max(candidates[i].get("annotations", {}).get("toxic_classification", {"toxic": 0.0}).values())
logger.info(
f"Skill {skill_name} has final score: {score}. Confidence: {confidence}. "
f"Toxicity: {toxicity}. Cand scores: {cand_scores}"
)
curr_single_scores.append(score)
return curr_single_scores
def add_to_top1_category(cand_id, categorized, _is_require_action_intent):
if _is_require_action_intent:
categorized["active_same_topic_entity_no_db_reqda"].append(cand_id)
else:
categorized["active_same_topic_entity_no_db_"].append(cand_id)
return categorized
def does_not_require_prompt(candidates, best_cand_id):
_is_already_prompt = "prompt" in candidates[best_cand_id].get("response_parts", [])
_is_question = "?" in candidates[best_cand_id]["text"]
_is_very_long = len(candidates[best_cand_id]["text"]) > 200
_best_cand_intents = get_intents(candidates[best_cand_id], which="all")
_is_request = any([intent in _best_cand_intents for intent in REQUIRE_ACTION_INTENTS.keys()])
_is_not_add_prompt_skill = candidates[best_cand_id]["skill_name"] in NOT_ADD_PROMPT_SKILLS
_is_any_question = is_any_question_sentence_in_utterance(candidates[best_cand_id])
_can_continue = candidates[best_cand_id].get("can_continue", CAN_NOT_CONTINUE) != CAN_NOT_CONTINUE
if (
_is_already_prompt
or _is_question
or _is_very_long
or _is_request
or _is_not_add_prompt_skill
or _is_any_question
or _can_continue
):
return True
return False
def if_acknowledgement_in_previous_bot_utterance(dialog):
if len(dialog["bot_utterances"]) > 0 and len(dialog["human_utterances"]) > 1:
prev_bot_uttr_text = dialog["bot_utterances"][-1]["text"].lower()
prev_human_uttr = dialog["human_utterances"][-2]
acknowledgments = []
for hyp in prev_human_uttr["hypotheses"]:
if hyp.get("response_parts", []) == ["acknowledgment"]:
acknowledgments += [hyp["text"].lower()]
for ackn in acknowledgments:
if ackn in prev_bot_uttr_text:
return True
return False
def tag_based_response_selection(dialog, candidates, scores, confidences, bot_utterances, all_prev_active_skills=None):
all_prev_active_skills = all_prev_active_skills if all_prev_active_skills is not None else []
all_prev_active_skills = Counter(all_prev_active_skills)
annotated_uttr = dialog["human_utterances"][-1]
all_user_intents, all_user_topics, all_user_named_entities, all_user_nounphrases = get_main_info_annotations(
annotated_uttr
)
_is_switch_topic_request = is_switch_topic(annotated_uttr)
_is_force_intent = any([_intent in all_user_intents for _intent in FORCE_INTENTS_IC.keys()])
# if user utterance contains any question (REGEXP & punctuation check!)
_is_require_action_intent = is_any_question_sentence_in_utterance(
{"text": annotated_uttr.get("annotations", {}).get("sentseg", {}).get("punct_sent", annotated_uttr["text"])}
)
# if user utterance contains any question AND requires some intent by socialbot
_is_require_action_intent = _is_require_action_intent and any(
[_intent in all_user_intents for _intent in REQUIRE_ACTION_INTENTS.keys()]
)
_force_intents_detected = [_intent for _intent in FORCE_INTENTS_IC.keys() if _intent in all_user_intents]
# list of user intents which require some action by socialbot
_require_action_intents_detected = [
_intent for _intent in REQUIRE_ACTION_INTENTS.keys() if _intent in all_user_intents
]
_force_intents_skills = sum([FORCE_INTENTS_IC.get(_intent, []) for _intent in _force_intents_detected], [])
# list of intents required by the socialbot
_required_actions = sum(
[REQUIRE_ACTION_INTENTS.get(_intent, []) for _intent in _require_action_intents_detected], []
)
_contains_entities = len(get_entities(annotated_uttr, only_named=False, with_labels=False)) > 0
_is_active_skill_can_not_continue = False
_prev_bot_uttr = dialog["bot_utterances"][-1] if len(dialog["bot_utterances"]) > 0 else {}
_prev_active_skill = dialog["bot_utterances"][-1]["active_skill"] if len(dialog["bot_utterances"]) > 0 else ""
_prev_prev_active_skill = dialog["bot_utterances"][-2]["active_skill"] if len(dialog["bot_utterances"]) > 1 else ""
_no_script_two_times_in_a_row = False
if _prev_active_skill and _prev_prev_active_skill:
if all(
[
skill not in ACTIVE_SKILLS + ALMOST_ACTIVE_SKILLS
for skill in [_prev_active_skill, _prev_prev_active_skill]
]
):
_no_script_two_times_in_a_row = True
disliked_skills = get_updated_disliked_skills(dialog, can_not_be_disliked_skills=CAN_NOT_BE_DISLIKED_SKILLS)
_is_dummy_linkto_available = any(
[
cand_uttr["skill_name"] == "dummy_skill" and cand_uttr.get("type", "") == "link_to_for_response_selector"
for cand_uttr in candidates
]
)
categorized_hyps = {}
categorized_prompts = {}
for dasuffix in ["reqda", ""]:
for actsuffix in ["active", "continued", "finished"]:
for suffix in [
"same_topic_entity_no_db",
"same_topic_entity_db",
"othr_topic_entity_no_db",
"othr_topic_entity_db",
]:
categorized_hyps[f"{actsuffix}_{suffix}_{dasuffix}"] = []
categorized_prompts[f"{actsuffix}_{suffix}_{dasuffix}"] = []
CASE = ""
acknowledgement_hypothesis = {}
for cand_id, cand_uttr in enumerate(candidates):
if confidences[cand_id] == 0.0 and cand_uttr["skill_name"] not in ACTIVE_SKILLS:
logger.info(f"Dropping cand_id: {cand_id} due to toxicity/badlists")
continue
all_cand_intents, all_cand_topics, all_cand_named_entities, all_cand_nounphrases = get_main_info_annotations(
cand_uttr
)
skill_name = cand_uttr["skill_name"]
_is_dialog_abandon = False # "abandon" in all_cand_intents
_is_just_prompt = (
cand_uttr["skill_name"] == "dummy_skill"
and any(
[
question_type in cand_uttr.get("type", "")
for question_type in ["normal_question", | |
<reponame>eliasall/BetterX-Cloud
## Tickets File
def insertTickets(filetype, json, cursor, conn, uid):
if (filetype == 'tickets'):
featureAttrs = {'tickets', 'timestamp', 'uid'}
cnt = 0
tblName = 'tickets'
cntattr = 0
keytypevals = {}
values = []
for tis in featureAttrs:
keytypevals,values = appendJsonKey(json, tis, keytypevals, values, cntattr)
cntattr = cntattr + 1
renameArrayItem(keytypevals, 'timestamp', 'epoch')
attrsInJson,typesInJson = toCommaStringDict(keytypevals)
dbinsert(tblName,attrsInJson,typesInJson,cursor,values,conn)
## Setup File
def insertSetup(filetype, json, cursor, conn, uid):
if (filetype == 'setup'):
featureAttrs = {'age', 'city', 'country', 'datatransmit_time', 'education', 'gender', 'phoneusefrequency', 'timezone', 'uid', 'webusefrequency', 'latitude', 'longitude', 'timestamp', 'datatransmit_charging', 'datatransmit_wifi'}
cnt = 0
tblName = 'setup'
cntattr = 0
keytypevals = {}
values = []
for tis in featureAttrs:
keytypevals,values = appendJsonKey(json, tis, keytypevals, values, cntattr)
cntattr = cntattr + 1
attrsInJson,typesInJson = toCommaStringDict(keytypevals)
dbinsert(tblName,attrsInJson,typesInJson,cursor,values,conn)
## Web File
def insertWeb(filetype, json, cursor, conn, uid):
if (filetype == 'web'):
web_page_node(json,uid,cursor,conn) # [pages] / [pageNode]
web_entry_node(json, uid, cursor, conn) # [pages] / [entriesNode]
def web_entry_response(json_entries_node, uid, cursor, conn, parentid):
tblName = 'web_entries_response'
featureAttrs = {'status', 'statusText', 'httpVersion', 'cookieNumber', 'redirectURL', 'headersSize', 'bodySize'}
featureAttrs2 = {'Date', 'Server', 'X-Powered-By', 'Content-Encoding', 'Content-Length', 'Keep-Alive', 'Connection', 'Content-Type'}
featureAttrs3 = {'size', 'compression', 'mimeType', 'encoding'}
vals = {}
values = []
cntattr = 0
for tis in featureAttrs:
vals[cntattr] = tis
values.append(json_entries_node['response'][tis])
cntattr = cntattr + 1
vals[cntattr] = 'web_entries_id'
values.append(parentid)
cntattr = cntattr + 1
attrsInJson,typesInJson = toCommaStringDict(vals)
#print type(attrsInJson)
#print attrsInJson
vals2 = {}
values2 = []
cntattr2 = 0
for tis2 in featureAttrs2:
vals2,values2 = appendJsonKey(json_entries_node['response']['headers'], tis2, vals2, values2, cntattr2)
cntattr2 = cntattr2 + 1
renameArrayItem(vals2, 'Date', 'header_Date')
renameArrayItem(vals2, 'Server', 'header_Server')
renameArrayItem(vals2, 'X-Powered-By', 'header_XPoweredBy')
renameArrayItem(vals2, 'Content-Encoding', 'header_ContentEncoding')
renameArrayItem(vals2, 'Content-Length', 'header_ContentLength')
renameArrayItem(vals2, 'Keep-Alive', 'header_KeepAlive')
renameArrayItem(vals2, 'Connection', 'header_Connection')
renameArrayItem(vals2, 'Content-Type', 'header_ContentType')
attrsInJson2,typesInJson2 = toCommaStringDict(vals2)
#print type(attrsInJson2)
#print attrsInJson2
vals3 = {}
values3 = []
cntattr3 = 0
for tis3 in featureAttrs3:
vals3,values3 = appendJsonKey(json_entries_node['response']['content'], tis3, vals3, values3, cntattr3)
cntattr3 = cntattr3 + 1
renameArrayItem(vals3, 'size', 'content_size')
renameArrayItem(vals3, 'compression', 'content_compression')
renameArrayItem(vals3, 'mimeType', 'content_mimeType')
renameArrayItem(vals3, 'encoding', 'content_encoding')
attrsInJson3,typesInJson3 = toCommaStringDict(vals3)
#print type(attrsInJson3)
#print attrsInJson3
attrsInJsonCombined = attrsInJson
typesInJsonCombined = typesInJson
if ( attrsInJson2 != ''):
attrsInJsonCombined = attrsInJsonCombined + ',' + attrsInJson2
typesInJsonCombined = typesInJsonCombined + ',' + typesInJson2
values.extend(values2)
if ( attrsInJson3 != ''):
attrsInJsonCombined = attrsInJsonCombined + ',' + attrsInJson3
typesInJsonCombined = typesInJsonCombined + ',' + typesInJson3
values.extend(values3)
dbinsert(tblName,attrsInJsonCombined,typesInJsonCombined,cursor,values,conn)
def web_entry_request(json_entries_node, uid, cursor, conn, parentid):
tblName = 'web_entries_request'
featureAttrs = {'method', 'url', 'httpVersion', 'cookieNumber', 'headerSize', 'bodySize'}
featureAttrs2 = {'Host', 'User-Agent', 'Accept', 'Accept-Encoding', 'Connection', 'Content-Length', 'Keep-Alive'}
vals = {}
values = []
cntattr = 0
for tis in featureAttrs:
vals[cntattr] = tis
values.append(json_entries_node['request'][tis])
cntattr = cntattr + 1
vals[cntattr] = 'web_entries_id'
values.append(parentid)
cntattr = cntattr + 1
attrsInJson,typesInJson = toCommaStringDict(vals)
#print type(attrsInJson)
#print attrsInJson
vals2 = {}
values2 = []
cntattr2 = 0
for tis2 in featureAttrs2:
vals2,values2 = appendJsonKey(json_entries_node['request']['headers'], tis2, vals2, values2, cntattr2)
cntattr2 = cntattr2 + 1
renameArrayItem(vals2, 'Host', 'header_Host')
renameArrayItem(vals2, 'User-Agent', 'header_UserAgent')
renameArrayItem(vals2, 'Accept', 'header_Accept')
renameArrayItem(vals2, 'Accept-Encoding', 'header_AcceptEncoding')
renameArrayItem(vals2, 'Connection', 'header_Connection')
renameArrayItem(vals2, 'Content-Length', 'header_ContentLength')
renameArrayItem(vals2, 'Keep-Alive', 'header_KeepAlive')
attrsInJson2,typesInJson2 = toCommaStringDict(vals2)
#print type(attrsInJson2)
#print attrsInJson2
attrsInJsonCombined = attrsInJson
typesInJsonCombined = typesInJson
if ( attrsInJson2 != ''):
attrsInJsonCombined = attrsInJson + ',' + attrsInJson2
typesInJsonCombined = typesInJson + ',' + typesInJson2
values.extend(values2)
dbinsert(tblName,attrsInJsonCombined,typesInJsonCombined,cursor,values,conn)
def web_entry_node(json, uid, cursor, conn):
tblName = 'web_entries'
featureAttrs = {'pageid', 'entryStartTime', 'time', 'serverIPAddress', 'connection'}
featureAttrs2 = {'blocked', 'dns', 'connect', 'send', 'wait', 'receive', 'ssl'}
featureAttrs3 = {'beforeRequestCacheEntries', 'afterRequestCacheEntries', 'hitCount'}
for jiv in json['pages']:
for innerjiv in jiv['entriesNode']:
cntattr = 0
attrsInJson = ''
typesInJson = ''
keytypevals = {}
values = []
for tis in featureAttrs:
keytypevals,values = appendJsonKey(innerjiv, tis, keytypevals, values, cntattr)
cntattr = cntattr + 1
attrsInJson,typesInJson = toCommaStringDict(keytypevals)
cntattr2 = 0
attrsInJson2 = ''
typesInJson2 = ''
keytypevals2 = {}
values2 = []
for tis2 in featureAttrs2:
keytypevals2,values2 = appendJsonKey(innerjiv['timings'], tis2, keytypevals2, values2, cntattr2)
cntattr2 = cntattr2 + 1
attrsInJson2,typesInJson2 = toCommaStringDict(keytypevals2)
cntattr3 = 0
attrsInJson3 = ''
typesInJson3 = ''
keytypevals3 = {}
values3 = []
for tis3 in featureAttrs3:
keytypevals3,values3 = appendJsonKey(innerjiv['cache'], tis3, keytypevals3, values3, cntattr3)
cntattr3 = cntattr3 + 1
attrsInJson3,typesInJson3 = toCommaStringDict(keytypevals3)
##combine
attrsInJsonCombined = attrsInJson + ',' + attrsInJson2 + ',' + attrsInJson3
typesInJsonCombined = typesInJson + ',' + typesInJson2 + ',' + typesInJson3
values.extend(values2)
values.extend(values3)
#insert
dbinsert(tblName,attrsInJsonCombined,typesInJsonCombined,cursor,values,conn)
##entry request
web_entry_id = getMaxId(tblName,cursor,conn)
web_entry_request(innerjiv, uid, cursor, conn, web_entry_id)
web_entry_response(innerjiv, uid, cursor, conn, web_entry_id)
def web_page_node(json, uid, cursor, conn):
tblName = 'web_pages'
featureAttrs = {'tabid', 'pageStartTime', 'pageid', 'pagetitle', 'pageOnContentLoad', 'pageOnLoad', 'origin'}
cntattr = 0
for jiv in json['pages']:
attrsInJson = ''
typesInJson = ''
keytypevals = {}
values = []
for tis in featureAttrs:
keytypevals,values = appendJsonKey(jiv['pageNode'], tis, keytypevals, values, cntattr)
cntattr = cntattr + 1
keytypevals[cntattr] = 'uid'
cntattr = cntattr + 1
values.append(uid)
renameArrayItem(keytypevals, 'pageid', 'id')
attrsInJson,typesInJson = toCommaStringDict(keytypevals)
dbinsert(tblName,attrsInJson,typesInJson,cursor,values,conn)
## Tab File
def insertTabs(filetype, json, cursor, conn, uid):
if (filetype == 'tabs'):
featureAttrs = {'timestamp', 'tabid', 'tabstatus'}
cnt = 0
for jiv in json['tabs']:
tblName = 'web_tabs'
cntattr = 0
keytypevals = {}
values = []
for tis in featureAttrs:
keytypevals,values = appendJsonKey(jiv, tis, keytypevals, values, cntattr)
cntattr = cntattr + 1
keytypevals[cntattr] = 'uid'
cntattr = cntattr + 1
values.append(uid)
attrsInJson,typesInJson = toCommaStringDict(keytypevals)
dbinsert(tblName,attrsInJson,typesInJson,cursor,values,conn)
## Info File
def insertInfo(filetype, json, cursor, conn, uid):
if (filetype == 'info'):
featureAttrs = {'timestamp', 'version', 'browser'}
tblName = 'web_info'
cntattr = 0
keytypevals = {}
values = []
for tis in featureAttrs:
keytypevals,values = appendJsonKey(json, tis, keytypevals, values, cntattr)
cntattr = cntattr + 1
keytypevals[cntattr] = 'uid'
cntattr = cntattr + 1
values.append(uid)
attrsInJson,typesInJson = toCommaStringDict(keytypevals)
dbinsert(tblName,attrsInJson,typesInJson,cursor,values,conn)
## App File
def insertApps(filetype, json, cursor, conn, uid):
if (filetype == 'apps'):
featureAttrs = {'uid', 'timestamp', 'app'}
cnt = 0
for jiv in json:
tblName = 'apps'
cntattr = 0
keytypevals = {}
values = []
for tis in featureAttrs:
keytypevals,values = appendJsonKey(jiv, tis, keytypevals, values, cntattr)
cntattr = cntattr + 1
attrsInJson,typesInJson = toCommaStringDict(keytypevals)
dbinsert(tblName,attrsInJson,typesInJson,cursor,values,conn)
## Feature File
def insertFeatures(filetype, json, cursor, conn, uid):
if (filetype == 'features'):
featureAttrs = {'manufacturer', 'model', 'timestamp', 'uid', 'version'}
cnt = 0
for jiv in json:
tblName = 'features'
cntattr = 0
keytypevals = {}
values = []
for tis in featureAttrs:
keytypevals,values = appendJsonKey(jiv, tis, keytypevals, values, cntattr)
cntattr = cntattr + 1
renameArrayItem(keytypevals, 'timestamp', 'epoch')
attrsInJson,typesInJson = toCommaStringDict(keytypevals)
if (isJsonKey(jiv, 'screensize')):
featureAttrs2 = {'height', 'width'}
cntattr2 = 0
keytypevals2 = {}
values2 = []
for tis2 in featureAttrs2:
keytypevals2,values2 = appendJsonKey(jiv['screensize'], tis2, keytypevals2, values2, cntattr2)
cntattr2 = cntattr2 + 1
renameArrayItem(keytypevals2, 'height', 'screen_height')
renameArrayItem(keytypevals2, 'width', 'screen_width')
attrsInJson2,typesInJson2 = toCommaStringDict(keytypevals2)
#combine
attrsInJsonCombined = attrsInJson
typesInJsonCombined = typesInJson
if ( attrsInJson2 != ''):
attrsInJsonCombined = attrsInJsonCombined + ',' + attrsInJson2
typesInJsonCombined = typesInJsonCombined + ',' + typesInJson2
values.extend(values2)
dbinsert(tblName,attrsInJsonCombined,typesInJsonCombined,cursor,values,conn)
## Network File
def insertNetwork(filetype, json, cursor, conn, uid):
if (filetype == 'network'):
networkAttrs = {'BSSID', 'IP', 'MAC', 'RSSI', 'SSID', 'detailedState', 'extraInfo', 'frequency', 'hasInternet', 'linkSpeed', 'mobileStatus', 'netID', 'signalStrength', 'timestamp', 'wiMaxStatus', 'wifiStatus'}
cnt = 0
for jiv in json:
tblName = 'network'
cntattr = 0
keytypevals = {}
values = []
for tis in networkAttrs:
keytypevals,values = appendJsonKey(jiv, tis, keytypevals, values, cntattr)
cntattr = cntattr + 1
keytypevals[cntattr] = 'uid'
cntattr = cntattr + 1
values.append(uid)
renameArrayItem(keytypevals, 'timestamp', 'epoch')
attrsInJson,typesInJson = toCommaStringDict(keytypevals)
dbinsert(tblName,attrsInJson,typesInJson,cursor,values,conn)
if isJsonKey(jiv, 'availableNetworks'):
for innerjiv in jiv['availableNetworks']:
innernetworkAttrs = {'BSSID', 'SSID', 'capabilities'}
tblNameinner = 'network_availableNetwork'
innercount = 0
keytypevalsinner = {}
valuesinner = []
cntattrinner = 0
for tisinner in innernetworkAttrs:
keytypevalsinner,valuesinner = appendJsonKey(innerjiv, tisinner, keytypevalsinner, valuesinner, cntattrinner)
cntattrinner = cntattrinner + 1
maxID = getMaxId(tblName, cursor, conn)
keytypevalsinner[cntattrinner] = 'network_id'
cntattrinner = cntattrinner + 1
valuesinner.append(maxID)
attrsInJsoninner,typesInJsoninner = toCommaStringDict(keytypevalsinner)
dbinsert(tblNameinner,attrsInJsoninner,typesInJsoninner,cursor,valuesinner,conn)
if isJsonKey(jiv, 'capabilities'):
for innerjiv2 in jiv['capabilities']:
try:
innernetworkAttrs2 = {'mLinkDownBandwidthKbps', 'mLinkUpBandwidthKbps', 'mNetworkCapabilities', 'mSignalStrength', 'mTransportTypes'}
tblNameinner2 = 'network_capabilities'
innercount2 = 0
keytypevalsinner2 = {}
valuesinner2 = []
cntattrinner2 = 0
for tisinner2 in innernetworkAttrs2:
keytypevalsinner2,valuesinner2 = appendJsonKey(innerjiv2, tisinner2, keytypevalsinner2, valuesinner2, cntattrinner2)
cntattrinner2 = cntattrinner2 + 1
maxID2 = getMaxId(tblName, cursor, conn)
keytypevalsinner2[cntattrinner2] = 'network_id'
cntattrinner2 = cntattrinner2 + 1
valuesinner2.append(maxID2)
attrsInJsoninner2,typesInJsoninner2 = toCommaStringDict(keytypevalsinner2)
dbinsert(tblNameinner2,attrsInJsoninner2,typesInJsoninner2,cursor,valuesinner2,conn)
except:
dummy = 0
if isJsonKey(jiv, 'linkProperties'):
for innerjiv3 in jiv['linkProperties']:
try:
innernetworkAttrs3 = {'mDomains', 'mIfaceName', 'mMtu', 'mTcpBufferSizes'}
innernetworkAttrs3b = {'mDnses'}
tblNameinner3 = 'network_linkProperties'
innercount3 = 0
keytypevalsinner3 = {}
valuesinner3 = []
cntattrinner3 = 0
for tisinner3 in innernetworkAttrs3:
keytypevalsinner3,valuesinner3 = appendJsonKey(innerjiv3, tisinner3, keytypevalsinner3, valuesinner3, cntattrinner3)
cntattrinner3 = cntattrinner3 + 1
keytypevalsinner3b = {}
valuesinner3b = []
for tisinner3b in innernetworkAttrs3b:
keytypevalsinner3b,valuesinner3b = appendJsonKeyConcat(innerjiv3, tisinner3b, keytypevalsinner3b, valuesinner3b, cntattrinner3)
cntattrinner3 = cntattrinner3 + 1
maxID3 = getMaxId(tblName, cursor, conn)
keytypevalsinner3[cntattrinner3] = 'network_id'
cntattrinner3 = cntattrinner3 + 1
valuesinner3.append(maxID3)
attrsInJsoninner3,typesInJsoninner3 = toCommaStringDict(keytypevalsinner3)
attrsInJsoninner3b,typesInJsoninner3b = toCommaStringDict(keytypevalsinner3b)
#combine
attrsInJsonCombined = attrsInJsoninner3
typesInJsonCombined = typesInJsoninner3
if ( attrsInJsoninner3b != ''):
attrsInJsonCombined = attrsInJsonCombined + ',' + attrsInJsoninner3b
typesInJsonCombined = typesInJsonCombined + ',' + typesInJsoninner3b
valuesinner3.extend(valuesinner3b)
dbinsert(tblNameinner3,attrsInJsonCombined,typesInJsonCombined,cursor,valuesinner3,conn)
if isJsonKey(innerjiv3, 'mLinkAddresses'):
for innerjiv5 in innerjiv3['mLinkAddresses']:
innernetworkAttrs5 = {'address', 'flags', 'prefixLength', 'scope'}
tblNameinner5 = 'network_linkProperties_mLinkAddresses'
keytypevalsinner5 = {}
valuesinner5 = []
cntattrinner5 = 0
for tisinner5 in innernetworkAttrs5:
keytypevalsinner5,valuesinner5 = appendJsonKey(innerjiv5, tisinner5, keytypevalsinner5, valuesinner5, cntattrinner5)
cntattrinner5 = cntattrinner5 + 1
maxID5 = getMaxId(tblNameinner3, cursor, conn)
keytypevalsinner5[cntattrinner5] = 'network_linkProperties_id'
cntattrinner5 = cntattrinner5 + 1
valuesinner5.append(maxID5)
attrsInJsoninner5,typesInJsoninner5 = toCommaStringDict(keytypevalsinner5)
dbinsert(tblNameinner5,attrsInJsoninner5,typesInJsoninner5,cursor,valuesinner5,conn)
if isJsonKey(innerjiv3, 'mRoutes'):
for innerjiv6 in innerjiv3['mRoutes']:
innernetworkAttrs6 = {'mGateway', 'mHasGateway', 'mInterface', 'mIsHost', 'mType'}
tblNameinner6 = 'network_linkProperties_mRoutes'
keytypevalsinner6 = {}
valuesinner6 = []
cntattrinner6 = 0
for tisinner6 in innernetworkAttrs6:
keytypevalsinner6,valuesinner6 = appendJsonKey(innerjiv6, tisinner6, keytypevalsinner6, valuesinner6, cntattrinner6)
cntattrinner6 = cntattrinner6 + 1
maxID6 = getMaxId(tblNameinner3, cursor, conn)
keytypevalsinner6[cntattrinner6] = 'network_linkProperties_id'
cntattrinner6 = cntattrinner6 + 1
valuesinner6.append(maxID6)
attrsInJsoninner6,typesInJsoninner6 = toCommaStringDict(keytypevalsinner6)
dbinsert(tblNameinner6,attrsInJsoninner6,typesInJsoninner6,cursor,valuesinner6,conn)
except:
dummy = 0
cnt = cnt + 1
## Sensor File (temperature and humidity not coded)
def insertSensor(filetype, json, cursor, conn, session):
if (filetype == 'sensors' and json['sensor'] == 'Location'):
tblName = 'sensor_location'
fields = 'uid, epoch, configAccuracy'
fieldTypes = '%s, %s, %s'
values = [json['uid'], json['timestamp'], json['sensorData']['configAccuracy']]
dbinsert(tblName,fields,fieldTypes,cursor,values,conn)
maxID = getMaxId(tblName, cursor, conn)
cnt = 0
for jiv in json['sensorData']['locations']:
tblName = 'sensor_location_data'
fields = 'sensor_location_id, latitude, longitude, accuracy, speed, bearing, provider, time, local_time'
fieldTypes = '%s, %s, %s, %s, %s, %s, %s, %s, %s'
values = [maxID,
json['sensorData']['locations'][cnt]['latitude'],
json['sensorData']['locations'][cnt]['longitude'],
json['sensorData']['locations'][cnt]['accuracy'],
json['sensorData']['locations'][cnt]['speed'],
json['sensorData']['locations'][cnt]['bearing'],
json['sensorData']['locations'][cnt]['provider'],
json['sensorData']['locations'][cnt]['time'],
json['sensorData']['locations'][cnt]['local_time']]
dbinsert(tblName,fields,fieldTypes,cursor,values,conn)
cnt = cnt + 1
if (filetype == 'sensors' and json['sensor'] == 'WiFi'):
tblName = 'sensor_WiFi'
fields = 'uid, epoch, senseCycles'
fieldTypes = '%s, %s, %s'
values = [json['uid'], json['timestamp'], json['sensorData']['senseCycles']]
dbinsert(tblName,fields,fieldTypes,cursor,values,conn)
maxID = getMaxId(tblName, cursor, conn)
cnt = 0
for jiv in json['sensorData']['scanResult']:
tblName = 'sensor_WiFi_scanResult'
fields = 'sensor_WiFi_id, ssid, bssid, capabilities, level, frequency'
fieldTypes = '%s, %s, %s, %s, %s, %s'
values = [maxID,
json['sensorData']['scanResult'][cnt]['ssid'],
json['sensorData']['scanResult'][cnt]['bssid'],
json['sensorData']['scanResult'][cnt]['capabilities'],
json['sensorData']['scanResult'][cnt]['level'],
json['sensorData']['scanResult'][cnt]['frequency'] ]
dbinsert(tblName,fields,fieldTypes,cursor,values,conn)
cnt = cnt + 1
if (filetype == 'sensors' and json['sensor'] == 'MagneticField'):
tblName = 'betterXkeyspace.sensor_magneticfield'
cnt = 0
for jiv in json['sensorData']['sensorTimeStamps']:
fields = 'uid, xAxis, yAxis, zAxis, epoch'
fieldTypes = '%s, | |
import os
import numpy as np
from numpy import count_nonzero
import pandas as pd
import matplotlib.pyplot as plt
from dimred import DimRed
from scipy.sparse import random as sparse_random
from scipy.sparse import csr_matrix, isspmatrix
from sklearn.datasets import load_iris, load_digits, make_friedman1, make_sparse_spd_matrix
from sklearn.decomposition import TruncatedSVD, SparsePCA
from sklearn.utils.extmath import svd_flip, stable_cumsum
from sklearn.preprocessing import StandardScaler
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
def test_init():
dimred = DimRed()
dimred2 = DimRed(algo='dimred_svd')
dimred3 = DimRed(algo='dimred_evd', n_components=3)
dimred4 = DimRed(algo='sklearn_truncated_svd', n_components=1)
dimred5 = DimRed(algo='sklearn_sparse_pca', n_components=2)
assert(dimred.n_components == 0.95)
assert(dimred.algo == 'auto')
assert(dimred2.n_components == 0.95)
assert(dimred2.algo == 'dimred_svd')
assert(dimred3.n_components == 3)
assert(dimred3.algo == 'dimred_evd')
assert(dimred4.n_components == 1)
assert(dimred4.algo == 'sklearn_truncated_svd')
assert(dimred5.n_components == 2)
assert(dimred5.algo == 'sklearn_sparse_pca')
def test_np_array_2_components():
X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
dimred = DimRed(n_components=2)
X_pca = dimred.fit_transform(X)
explained_variance_ratio = dimred.explained_variance_ratio_
assert(explained_variance_ratio[0] == 0.9924428900898052)
assert(explained_variance_ratio[1] == 0.007557109910194766)
assert(X.shape == (6,2))
assert(X_pca.shape == (6,2))
assert(dimred.algo == 'sklearn_pca')
def test_np_array_default_components():
X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
dimred = DimRed() #0.95 default
dimred2 = DimRed(n_components=0.40)
X_pca = dimred.fit_transform(X)
X_pca2 = dimred2.fit_transform(X)
explained_variance_ratio = dimred.explained_variance_ratio_
explained_variance_ratio2 = dimred2.explained_variance_ratio_
assert(explained_variance_ratio[0] == 0.9924428900898052)
assert(explained_variance_ratio2[0] == 0.9924428900898052)
assert(X.shape == (6,2))
assert(X_pca.shape == (6,1))
assert(X_pca2.shape == (6,1))
assert(dimred.algo == 'sklearn_pca')
assert(dimred2.algo == 'sklearn_pca')
def test_np_array_sparse_noncsr():
# create sparse matrix
X_sparse = np.array([[1,0,0,0,0,0], [0,0,2,0,0,0], [0,0,0,2,0,0]])
# calculate sparsity
sparsity = 1.0 - count_nonzero(X_sparse) / X_sparse.size
# The above array has 0.833 sparsity (meaning 83.3% of its values are 0)
dimred = DimRed(n_components=1)
dimred.fit_transform(X_sparse)
assert(dimred.issparse == True)
assert(dimred.sparsity == 0.8333333333333334)
assert(dimred.sp_issparse == False)
def test_np_array_sparse_csr():
# create sparse matrix
X_sparse = np.array([[1,0,0,0,0,0], [0,0,2,0,0,0], [0,0,0,2,0,0]])
X_sparse_csr = csr_matrix(X_sparse)
# calculate sparsity
sparsity = 1.0 - csr_matrix.getnnz(X_sparse_csr) / (X_sparse_csr.shape[0] * X_sparse_csr.shape[1])
dimred = DimRed(n_components=1)
dimred.fit_transform(X_sparse_csr)
assert(dimred.issparse)
assert(dimred.sp_issparse)
def test_iris_data_sklearn_pca():
iris = load_iris()
X = iris.data
y = iris.target
dimred = DimRed(n_components=2)
X_pca = dimred.fit_transform(X)
explained_variance_ratio = dimred.explained_variance_ratio_
singular_values = dimred.singular_values_
assert(dimred.algo == 'sklearn_pca')
assert(explained_variance_ratio[0] == 0.9246187232017271)
assert(explained_variance_ratio[1] == 0.05306648311706783)
assert(singular_values[0] == 25.099960442183864)
assert(singular_values[1] == 6.013147382308734)
def test_iris_data_dimred_svd():
iris = load_iris()
X = iris.data
#y = iris.target
dimred = DimRed(n_components=2, algo="dimred_svd")
X_pca = dimred.fit_transform(X)
explained_variance_ratio = dimred.explained_variance_ratio_
singular_values = dimred.singular_values_
components = dimred.n_components_
assert(X.shape == (150, 4))
assert(X_pca.shape == (150,2))
assert(dimred.algo == 'dimred_svd')
assert(explained_variance_ratio[0] == 0.9246187232017271)
assert(explained_variance_ratio[1] == 0.05306648311706782)
assert(singular_values[0] == 25.099960442183864)
assert(singular_values[1] == 6.013147382308733)
assert(components == 2)
def test_iris_data_sklearn_pca():
iris = load_iris()
X = iris.data
y = iris.target
dimred = DimRed(n_components=2)
X_pca = dimred.fit_transform(X)
explained_variance_ratio = dimred.explained_variance_ratio_
singular_values = dimred.singular_values_
assert(dimred.algo == 'sklearn_pca')
assert(explained_variance_ratio[0] == 0.9246187232017271)
assert(explained_variance_ratio[1] == 0.05306648311706783)
assert(singular_values[0] == 25.099960442183864)
assert(singular_values[1] == 6.013147382308734)
def test_iris_data_dimred_svd_equal_sklearn_pca():
iris = load_iris()
X = iris.data
dimred = DimRed(n_components=2, algo="dimred_svd")
dimred_sk = DimRed(n_components=2, algo="sklearn_pca")
X_pca = dimred.fit_transform(X)
X_pca_sk = dimred_sk.fit_transform(X)
explained_variance_ratio_ = dimred.explained_variance_ratio_
singular_values_ = dimred.singular_values_
components_ = dimred.components_
n_components_ = dimred.n_components_
explained_variance_ratio_sk_ = dimred_sk.explained_variance_ratio_
singular_values_sk_ = dimred_sk.singular_values_
components_sk_ = dimred_sk.components_
n_components_sk_ = dimred_sk.n_components_
assert(X.shape == (150, 4))
assert(X_pca.shape == (150, 2))
assert(X_pca_sk.shape == (150, 2))
assert(dimred.algo == 'dimred_svd')
assert(dimred_sk.algo == 'sklearn_pca')
assert(np.allclose(explained_variance_ratio_, explained_variance_ratio_sk_))
assert(np.allclose(singular_values_, singular_values_sk_))
assert(np.allclose(n_components_, n_components_sk_))
def test_iris_data_dimred_svd_equal_sklearn_pca_1_comp():
iris = load_iris()
X = iris.data
dimred = DimRed(n_components=1, algo="dimred_svd")
dimred_sk = DimRed(n_components=1, algo="sklearn_pca")
X_pca = dimred.fit_transform(X)
X_pca_sk = dimred_sk.fit_transform(X)
explained_variance_ratio_ = dimred.explained_variance_ratio_
singular_values_ = dimred.singular_values_
n_components_ = dimred.n_components_
explained_variance_ratio_sk_ = dimred_sk.explained_variance_ratio_
singular_values_sk_ = dimred_sk.singular_values_
n_components_sk_ = dimred_sk.n_components_
assert(X.shape == (150, 4))
assert(X_pca.shape == (150, 1))
assert(X_pca_sk.shape == (150, 1))
assert(dimred.algo == 'dimred_svd')
assert(dimred_sk.algo == 'sklearn_pca')
assert(np.allclose(explained_variance_ratio_, explained_variance_ratio_sk_))
assert(np.allclose(singular_values_, singular_values_sk_))
assert(np.allclose(n_components_, n_components_sk_))
def test_mnist_data_dimred_svd_90():
# Load and return the digits dataset (classification).
# Each datapoint is a 8x8 image of a digit.
# Dimensionality = 64
# Features = integers 0-16
# Observations = 1797
digits = load_digits(as_frame=True)
X = digits.data
y = digits.target
pixel_colnames = digits.feature_names
n_samples, n_features = X.shape
scaler = StandardScaler()
scaler.fit(X)
dimred = DimRed(algo='dimred_svd', n_components = .90)
X_pca = dimred.fit_transform(X)
mnist_dimensions_before_pca = X.shape[1]
mnist_dimensions_after_pca = X_pca.shape[1]
components = dimred.n_components_
assert(mnist_dimensions_before_pca == 64)
assert(mnist_dimensions_after_pca == 21)
assert(components == 21)
fig, ax = dimred.draw_varianceplot('MNIST Data')
plt.show(block=False)
plt.pause(1.5)
plt.close()
def test_mnist_data_dimred_svd_60():
# Load and return the digits dataset (classification).
# Each datapoint is a 8x8 image of a digit.
# Dimensionality = 64
# Features = integers 0-16
# Observations = 1797
digits = load_digits(as_frame=True)
X = digits.data
y = digits.target
pixel_colnames = digits.feature_names
n_samples, n_features = X.shape
scaler = StandardScaler()
scaler.fit(X)
dimred = DimRed(algo='dimred_svd', n_components = .60)
X_pca = dimred.fit_transform(X)
mnist_dimensions_before_pca = X.shape[1]
mnist_dimensions_after_pca = X_pca.shape[1]
components = dimred.n_components_
assert(mnist_dimensions_before_pca == 64)
assert(mnist_dimensions_after_pca == 7)
assert(components == 7)
fig, ax = dimred.draw_varianceplot('MNIST Data')
plt.show(block=False)
plt.pause(1.5)
plt.close()
def test_center():
X = np.array([[0, 3, 4], [1, 2, 4], [3, 4, 5]])
X_center_ref = np.array([[-1.33333333, 0., -0.33333333],[-0.33333333, -1., -0.33333333],[1.66666667, 1., 0.66666667]])
X_center = DimRed._center(X)
assert(np.allclose(X_center, X_center_ref))
def test_covariance():
X = np.array([[0, 3, 4], [1, 2, 4], [3, 4, 5]])
X_cov_ref = np.array([[2.3333333333333335, 1., 0.8333333333333334],[1. , 1., 0.5], [0.8333333333333334, 0.5, 0.3333333333333333]])
X_cov = DimRed._cov(X)
assert(np.array_equal(X_cov, X_cov_ref))
def test_preprocess():
X = np.array([[0, 3, 4], [1, 2, 4], [3, 4, 5]])
X_center_ref = np.array([[-1.33333333, 0., -0.33333333],[-0.33333333, -1., -0.33333333],[1.66666667, 1., 0.66666667]])
dimred = DimRed()
X, n_samples, n_features = dimred._preprocess(X)
assert(np.allclose(X, X_center_ref))
assert(n_samples == X.shape[0])
assert(n_features == X.shape[1])
def test_preprocess_feature_is_one():
X = np.array([[-1], [2]])
dimred = DimRed()
try:
dimred.fit_transform(X)
assert False
except:
assert True
def test_preprocess_components_high():
X = np.array([[0, 3, 4], [1, 2, 4], [3, 4, 5]])
HIGH_COMPONENTS = 5
assert(X.shape[1] < HIGH_COMPONENTS)
dimred = DimRed(n_components=HIGH_COMPONENTS)
dimred.fit_transform(X)
assert(dimred.n_components == X.shape[1] - 1)
def test_eigen_sorted():
X_cov_ref = np.array([[2.3333333333333335, 1., 0.8333333333333334],
[1. , 1., 0.5],
[0.8333333333333334, 0.5, 0.3333333333333333]])
X_eig_vecs_ref = np.array([[-0.83234965, -0.50163583, -0.23570226],
[-0.45180545, 0.86041634, -0.23570226],
[-0.32103877, 0.08969513, 0.94280904]])
X_eig_vals_ref = np.array([ 3.19755880e+00, 4.69107871e-01, -3.13055232e-18])
X_eig_vals, X_eig_vecs = DimRed._eigen_sorted(X_cov_ref)
assert(np.allclose(X_eig_vals, X_eig_vals_ref)) # avoiding rounding float errors
assert(np.allclose(X_eig_vecs, X_eig_vecs_ref)) # avoiding rounding float errors
def test_dimred_evd():
X = np.array([[0, 3, 4], [1, 2, 4], [3, 4, 5]])
X_vecs_ref = np.array([[ 1.21681246e+00, 6.38949394e-01, 3.34638699e-16],
[ 8.36268258e-01, -7.23102775e-01, 1.68105246e-16],
[-2.05308072e+00, 8.41533816e-02, 2.79127548e-16]])
e_vals_ref = np.array([ 3.19755880e+00, 4.69107871e-01, -3.13055232e-18])
e_vecs_ref = np.array([[-0.83234965, -0.50163583, -0.23570226],
[-0.45180545, 0.86041634, -0.23570226],
[-0.32103877, 0.08969513, 0.94280904]])
X_vecs_pca_ref3 = np.array([[ 1.21681246e+00, 6.38949394e-01, 3.34638699e-16],
[ 8.36268258e-01, -7.23102775e-01, 1.68105246e-16],
[-2.05308072e+00, 8.41533816e-02, 2.79127548e-16]])
X_vecs_pca_ref2 = np.array([[ 1.21681246, 0.63894939],
[ 0.83626826, -0.72310278],
[-2.05308072, 0.08415338]])
X_vecs_pca_ref1 = np.array([[ 1.21681246],
[ 0.83626826],
[-2.05308072]])
# Covariance (implemented by _cov())
n_samples, n_features = X.shape
x_mean_vec = np.mean(X, axis=0)
X_centered = X - x_mean_vec
X_cov = X_centered.T.dot(X_centered) / (n_samples - 1)
# Eigen values (implemented by _eigen_sorted)
eig_vals, eig_vecs = np.linalg.eig(X_cov)
idx = eig_vals.argsort()[::-1] # idx= array([0, 1, 2])
e_vals, e_vecs = eig_vals[idx], eig_vecs[:, idx]
X_vecs = X_centered.dot(e_vecs)
X_vecs_pca_1 = X_vecs[:, :1] # keep 1 component
X_vecs_pca_2 = X_vecs[:, :2] # keep 2 components
X_vecs_pca_3 = X_vecs[:, :3] # keep 3 components
dimred = DimRed(algo='dimred_evd')
dimred1 = DimRed(algo='dimred_evd', n_components=1)
dimred2 = DimRed(algo='dimred_evd', n_components=2)
dimred3 = DimRed(algo='dimred_evd', n_components=3)
X_transf = dimred.fit_transform(X)
X_transf1 = dimred1.fit_transform(X)
X_transf2 = dimred2.fit_transform(X)
X_transf3 = dimred3.fit_transform(X)
assert(np.allclose(e_vals, e_vals_ref)) # avoiding rounding float errors
assert(np.allclose(e_vecs, e_vecs_ref)) # avoiding rounding float errors
assert(np.allclose(X_vecs, X_vecs_ref)) # avoiding rounding float errors
assert(np.allclose(X_vecs_pca_1, X_vecs_pca_ref1)) # avoiding rounding float errors
assert(np.allclose(X_vecs_pca_2, X_vecs_pca_ref2)) # avoiding rounding float errors
assert(np.allclose(X_vecs_pca_3, X_vecs_pca_ref3)) # avoiding rounding float errors
assert(np.allclose(X_transf, X_vecs_pca_ref2)) # avoiding rounding float errors
assert(np.allclose(X_transf1, X_vecs_pca_ref1)) # avoiding rounding float errors
assert(np.allclose(X_transf2, X_vecs_pca_ref2)) # avoiding rounding float errors
assert(np.allclose(X_transf3, X_vecs_pca_ref3)) # avoiding rounding float errors
def test_dimred_svd():
X = np.array([[0, 3, 4], [1, 2, 4], [3, 4, 5]])
U_ref = np.array([[-0.48117093, -0.65965234, 0.57735027],
[-0.33069022, 0.74653242, 0.57735027],
[ 0.81186114, -0.08688008, 0.57735027]])
Sigma_ref = np.array([2.52885697e+00, 9.68615374e-01, 5.82986245e-16])
Vt_ref = np.array([[ 0.83234965, 0.45180545, 0.32103877],
[ 0.50163583, -0.86041634, -0.08969513],
[-0.23570226, -0.23570226, 0.94280904]])
dimred = DimRed(algo='dimred_svd') #0.95 default
# Center matrix
x_mean_vec = np.mean(X, axis=0)
X_centered = X - x_mean_vec
# SVD - manual
U, Sigma, Vt = np.linalg.svd(X_centered, full_matrices=False)
U, Vt = svd_flip(U, Vt)
# flip eigenvectors' sign to enforce deterministic output
X_transf = dimred._postprocess_dimred_pca_svd(U, Sigma, Vt)
# SVD - function
X_transformed = dimred.fit_transform(X)
assert(X.shape == (3,3))
assert(X_transf.shape == (3,2))
assert(X_transformed.shape == (3,2))
assert(np.allclose(U, U_ref)) # avoiding rounding float errors
assert(np.allclose(Vt, Vt_ref)) # avoiding rounding float errors
assert(np.allclose(Sigma, Sigma_ref)) # avoiding rounding float errors
assert(np.allclose(X_transf, X_transformed)) # avoiding rounding float errors
def test_sparse_pca_forced():
X, _ = make_friedman1(n_samples=200, n_features=30, random_state=0)
transformer = SparsePCA(n_components=5, random_state=0)
transformer.fit(X)
X_transformed = transformer.transform(X)
dimred = DimRed(algo='sklearn_sparse_pca', n_components=5, random_int=0)
X_pca = dimred.fit_transform(X)
assert(X.shape == | |
"""
Core classes for XBlocks.
This code is in the Runtime layer, because it is authored once by edX
and used by all runtimes.
"""
import functools
import pkg_resources
try:
import simplesjson as json # pylint: disable=F0401
except ImportError:
import json
from webob import Response
from xblock.exceptions import XBlockSaveError, KeyValueMultiSaveError, JsonHandlerError, DisallowedFileError
from xblock.fields import ChildrenModelMetaclass, ModelMetaclass, String, List, Scope, Reference
from xblock.plugin import Plugin
# __all__ controls what classes end up in the docs.
__all__ = ['XBlock']
class TagCombiningMetaclass(type):
"""
Collects and combines `._class_tags` from all base classes and
puts them together in one `.class_tags` attribute.
"""
def __new__(mcs, name, bases, attrs):
# Allow this method to access the `_class_tags`
# pylint: disable=W0212
class_tags = set([])
# Collect the tags from all base classes.
for base in bases:
try:
class_tags.update(base._class_tags)
except AttributeError:
# Base classes may have no ._class_tags, that's ok.
pass
attrs['_class_tags'] = class_tags
return super(TagCombiningMetaclass, mcs).__new__(mcs, name, bases, attrs)
class ServiceRequestedMetaclass(type):
"""
Creates the _services_requested dict on the class.
Keys are service names, values are "need" or "want".
"""
def __new__(mcs, name, bases, attrs):
attrs['_services_requested'] = {}
return super(ServiceRequestedMetaclass, mcs).__new__(mcs, name, bases, attrs)
class XBlockMetaclass(
ChildrenModelMetaclass,
ModelMetaclass,
TagCombiningMetaclass,
ServiceRequestedMetaclass,
):
"""
Metaclass for XBlock.
Combines all the metaclasses XBlocks needs:
* `ChildrenModelMetaclass`
* `ModelMetaclass`
* `TagCombiningMetaclass`
* `ServiceRequestedMetaclass`
"""
pass
# -- Base Block
class XBlock(Plugin):
"""Base class for XBlocks.
Derive from this class to create a new kind of XBlock. There are no
required methods, but you will probably need at least one view.
Don't provide the ``__init__`` method when deriving from this class.
"""
__metaclass__ = XBlockMetaclass
entry_point = 'xblock.v1'
parent = Reference(help='The id of the parent of this XBlock', default=None, scope=Scope.parent)
name = String(help="Short name for the block", scope=Scope.settings)
tags = List(help="Tags for this block", scope=Scope.settings)
_class_tags = set()
@classmethod
def json_handler(cls, func):
"""Wrap a handler to consume and produce JSON.
Rather than a Request object, the method will now be passed the
JSON-decoded body of the request. Any data returned by the function
will be JSON-encoded and returned as the response.
The wrapped function can raise JsonHandlerError to return an error
response with a non-200 status code.
"""
@XBlock.handler
@functools.wraps(func)
def wrapper(self, request, suffix=''):
"""The wrapper function `json_handler` returns."""
if request.method != "POST":
return JsonHandlerError(405, "Method must be POST").get_response(allow=["POST"])
try:
request_json = json.loads(request.body)
except ValueError:
return JsonHandlerError(400, "Invalid JSON").get_response()
try:
response = func(self, request_json, suffix)
except JsonHandlerError as err:
return err.get_response()
if isinstance(response, Response):
return response
else:
return Response(json.dumps(response), content_type='application/json')
return wrapper
@classmethod
def handler(cls, func):
"""A decorator to indicate a function is usable as a handler."""
func._is_xblock_handler = True # pylint: disable=protected-access
return func
@staticmethod
def tag(tags):
"""Returns a function that adds the words in `tags` as class tags to this class."""
def dec(cls):
"""Add the words in `tags` as class tags to this class."""
# Add in this class's tags
cls._class_tags.update(tags.replace(",", " ").split()) # pylint: disable=protected-access
return cls
return dec
@classmethod
def load_tagged_classes(cls, tag):
"""Produce a sequence of all XBlock classes tagged with `tag`."""
# Allow this method to access the `_class_tags`
# pylint: disable=W0212
for name, class_ in cls.load_classes():
if tag in class_._class_tags:
yield name, class_
@classmethod
def open_local_resource(cls, uri):
"""Open a local resource.
The container calls this method when it receives a request for a
resource on a URL which was generated by Runtime.local_resource_url().
It will pass the URI from the original call to local_resource_url()
back to this method. The XBlock must parse this URI and return an open
file-like object for the resource.
For security reasons, the default implementation will return only a
very restricted set of file types, which must be located in a folder
called "public". XBlock authors who want to override this behavior will
need to take care to ensure that the method only serves legitimate
public resources. At the least, the URI should be matched against a
whitelist regex to ensure that you do not serve an unauthorized
resource.
"""
# Verify the URI is in whitelisted form before opening for serving.
# URI must begin with public/, and no file path component can start
# with a dot, which prevents ".." and ".hidden" files.
if not uri.startswith("public/"):
raise DisallowedFileError("Only files from public/ are allowed: %r" % uri)
if "/." in uri:
raise DisallowedFileError("Only safe file names are allowed: %r" % uri)
return pkg_resources.resource_stream(cls.__module__, uri)
@staticmethod
def needs(service_name):
"""A class decorator to indicate that an XBlock class needs a particular service."""
def _decorator(cls): # pylint: disable=missing-docstring
cls._services_requested[service_name] = "need" # pylint: disable=protected-access
return cls
return _decorator
@staticmethod
def wants(service_name):
"""A class decorator to indicate that an XBlock class wants a particular service."""
def _decorator(cls): # pylint: disable=missing-docstring
cls._services_requested[service_name] = "want" # pylint: disable=protected-access
return cls
return _decorator
@classmethod
def service_declaration(cls, service_name):
"""
Find and return a service declaration.
XBlocks declare their service requirements with @XBlock.needs and
@XBlock.wants decorators. These store information on the class.
This function finds those declarations for a block.
Arguments:
service_name (string): the name of the service requested.
Returns:
One of "need", "want", or None.
"""
# The class declares what services it desires. To deal with subclasses,
# especially mixins, properly, we have to walk up the inheritance
# hierarchy, and combine all the declared services into one dictionary.
# We do this once per class, then store the result on the class.
if "_combined_services" not in cls.__dict__:
# Walk the MRO chain, collecting all the services together.
combined = {}
for parent in reversed(cls.__mro__):
combined.update(getattr(parent, "_services_requested", {}))
cls._combined_services = combined
declaration = cls._combined_services.get(service_name)
return declaration
def __init__(self, runtime, field_data, scope_ids):
"""
Construct a new XBlock.
This class should only be instantiated by runtimes.
Arguments:
runtime (:class:`.Runtime`): Use it to access the environment.
It is available in XBlock code as ``self.runtime``.
field_data (:class:`.FieldData`): Interface used by the XBlock
fields to access their data from wherever it is persisted.
scope_ids (:class:`.ScopeIds`): Identifiers needed to resolve
scopes.
"""
super(XBlock, self).__init__()
self.runtime = runtime
self._field_data = field_data
self._field_data_cache = {}
self._dirty_fields = {}
self.scope_ids = scope_ids
# A cache of the parent block, retrieved from .parent
self._parent_block = None
self._parent_block_id = None
def __repr__(self):
# `XBlock` obtains the `fields` attribute from the `ModelMetaclass`.
# Since this is not understood by static analysis, silence this error.
# pylint: disable=E1101
attrs = []
for field in self.fields.values():
try:
value = getattr(self, field.name)
except Exception: # pylint: disable=W0703
# Ensure we return a string, even if unanticipated exceptions.
attrs.append(" %s=???" % (field.name,))
else:
if isinstance(value, basestring):
value = value.strip()
if len(value) > 40:
value = value[:37] + "..."
attrs.append(" %s=%r" % (field.name, value))
return "<%s @%04X%s>" % (
self.__class__.__name__,
id(self) % 0xFFFF,
','.join(attrs)
)
def get_parent(self):
"""Return the parent block of this block, or None if there isn't one."""
if self._parent_block_id != self.parent:
if self.parent is not None:
self._parent_block = self.runtime.get_block(self.parent)
else:
self._parent_block = None
self._parent_block_id = self.parent
return self._parent_block
def render(self, view, context=None):
"""Render `view` with this block's runtime and the supplied `context`"""
return self.runtime.render(self, view, context)
def handle(self, handler_name, request, suffix=''):
"""Handle `request` with this block's runtime."""
return self.runtime.handle(self, handler_name, request, suffix)
def save(self):
"""Save all dirty fields attached to this XBlock."""
if not self._dirty_fields:
# nop if _dirty_fields attribute is empty
return
try:
fields_to_save = self._get_fields_to_save()
# Throws KeyValueMultiSaveError if things go wrong
self._field_data.set_many(self, fields_to_save)
except KeyValueMultiSaveError as save_error:
saved_fields = [field for field in self._dirty_fields if field.name in save_error.saved_field_names]
for field in saved_fields:
# should only find one corresponding field
del self._dirty_fields[field]
raise XBlockSaveError(saved_fields, self._dirty_fields.keys())
# Remove all dirty fields, since the save was successful
self._clear_dirty_fields()
def _get_fields_to_save(self):
"""
Create dictionary mapping between dirty fields and data cache values.
A `field` is an instance of `Field`.
"""
fields_to_save = {}
for field in self._dirty_fields.keys():
# If the field value isn't the same as the baseline we recorded
# when it was read, then save it
if field._is_dirty(self): # pylint: disable=protected-access
fields_to_save[field.name] = field.to_json(self._field_data_cache[field.name])
return fields_to_save
def _clear_dirty_fields(self):
"""
Remove all dirty fields | |
<filename>gdb/gdb.py<gh_stars>10-100
# To use, add source /path/to/gdb.py to your $HOME/.gdbinit file.
import gdb
import atexit
import os
import re
import subprocess
import tempfile
import textwrap as tw
from typing import Optional, List
from collections import defaultdict
from dataclasses import dataclass
def add_symbol_file(filename, baseaddr):
sections = []
textaddr = '0'
p = subprocess.Popen(["readelf", "-SW", filename], stdout=subprocess.PIPE)
for line in p.stdout.readlines():
line = line.decode("utf-8").strip()
if not line.startswith('[') or line.startswith('[Nr]'):
continue
line = re.sub(r'\[ *(\d+)\]', '\\1', line)
sec = dict(zip(['nr', 'name', 'type', 'addr'], line.split()))
if sec['nr'] == '0':
continue
if sec['name'] == '.text':
textaddr = sec['addr']
elif int(sec['addr'], 16) != 0:
sections.append(sec)
cmd = "add-symbol-file %s 0x%08x" % (filename, int(textaddr, 16) + baseaddr)
for s in sections:
addr = int(s['addr'], 16)
if s['name'] == '.text' or addr == 0:
continue
cmd += " -s %s 0x%x" % (s['name'], int(baseaddr + addr))
gdb.execute(cmd)
class StarterExecBreakpoint(gdb.Breakpoint):
STARTER_HAS_LOADED = '__gdb_hook_starter_ready'
def __init__(self):
super(StarterExecBreakpoint, self).__init__(self.STARTER_HAS_LOADED, internal=True)
self.inited = False
def stop(self):
gdb.write('__gdb_hook_starter_ready.\n')
base_addr = gdb.parse_and_eval('conf->base')
in_hw_mode = gdb.parse_and_eval('conf->mode == SGXLKL_HW_MODE')
if in_hw_mode:
gdb.write('Running on hardware... skipping simulation load.\n')
else:
libsgxlkl = gdb.execute('printf "%s", libsgxlkl_path', to_string=True)
gdb.write('Loading symbols for %s at base 0x%x...\n' % (
libsgxlkl, int(base_addr)))
add_symbol_file(libsgxlkl, int(base_addr))
gdb.write('Looking up __gdb_load_debug_symbols_alive symbol.\n');
if not self.inited and gdb.lookup_global_symbol("__gdb_load_debug_symbols_alive"):
gdb.write('Enabled loading in-enclave debug symbols\n')
gdb.execute('set __gdb_load_debug_symbols_alive = 1')
gdb.write('set __gdb_load_debug_symbols_alive = 1\n')
self.inited = True
LoadLibraryBreakpoint()
LoadLibraryFromFileBreakpoint()
return False
class LoadLibraryBreakpoint(gdb.Breakpoint):
LDSO_LOAD_LIBRARY = '__gdb_hook_load_debug_symbols'
def __init__(self):
super(LoadLibraryBreakpoint, self).__init__(self.LDSO_LOAD_LIBRARY, internal=True)
def stop(self):
# dump symbols out to disk
uintptr_t = gdb.lookup_type('uintptr_t')
ssize_t = gdb.lookup_type('ssize_t')
mem_loc = int(gdb.parse_and_eval('symmem').cast(uintptr_t))
mem_sz = int(gdb.parse_and_eval('symsz').cast(ssize_t))
memvw = gdb.selected_inferior().read_memory(mem_loc, mem_sz)
# work out where new library is loaded
base_addr = int(gdb.parse_and_eval('dso->base').cast(uintptr_t))
fn = None
with tempfile.NamedTemporaryFile(suffix='.so', delete=False) as f:
f.write(memvw)
fn = f.name
gdb.write('Loading symbols at base 0x%x...\n' % (int(base_addr)))
add_symbol_file(fn, int(base_addr))
atexit.register(os.unlink, fn)
return False
class LoadLibraryFromFileBreakpoint(gdb.Breakpoint):
LDSO_LOAD_LIBRARY_FROM_FILE = '__gdb_hook_load_debug_symbols_from_file'
def __init__(self):
super(LoadLibraryFromFileBreakpoint, self).__init__(self.LDSO_LOAD_LIBRARY_FROM_FILE, internal=True)
def stop(self):
uintptr_t = gdb.lookup_type('uintptr_t')
libpath = gdb.execute('printf "%s", libpath', to_string=True)
base_addr = int(gdb.parse_and_eval('dso->base').cast(uintptr_t))
gdb.write('Loading symbols at base 0x%x...\n' % (int(base_addr)))
add_symbol_file(libpath, int(base_addr))
return False
def get_lthread_backtrace(lt_addr: str,
btdepth: str,
capture: bool = False
) -> Optional[str]:
old_fp = gdb.execute('p/x $rbp', to_string=True).split('=')[1].strip()
old_sp = gdb.execute('p/x $rsp', to_string=True).split('=')[1].strip()
old_ip = gdb.execute('p/x $rip', to_string=True).split('=')[1].strip()
gdb.execute('set $rbp = ((struct lthread *)%s)->ctx.ebp' % lt_addr)
gdb.execute('set $rsp = ((struct lthread *)%s)->ctx.esp' % lt_addr)
gdb.execute('set $rip = ((struct lthread *)%s)->ctx.eip' % lt_addr)
output = gdb.execute('bt %s' % btdepth, to_string=capture)
# Restore registers
gdb.execute('set $rbp = %s' % old_fp)
gdb.execute('set $rsp = %s' % old_sp)
gdb.execute('set $rip = %s' % old_ip)
return output
class LthreadBacktrace(gdb.Command):
"""
Print backtrace for an lthread
Param 1: Address of lthread
Param 2: Backtrace depth (optional)
"""
def __init__(self):
super(LthreadBacktrace, self).__init__("lthread-bt", gdb.COMMAND_USER)
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
if not argv:
gdb.write('No lthread address provided. Usage: lthread-bt <addr> [<btdepth>]\n')
gdb.flush()
return False
lt_addr = argv[0]
if len(argv) > 1:
btdepth = argv[1]
else:
btdepth = ""
get_lthread_backtrace(lt_addr, btdepth)
return False
class LthreadStats(gdb.Command):
"""
Prints the number of lthreads in the futex, scheduler, and syscall queues.
"""
def __init__(self):
super(LthreadStats, self).__init__("lthread-stats", gdb.COMMAND_USER)
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
if argv and len(argv) > 0:
btdepth = argv[0]
else:
btdepth = ""
schedq_lts = 0
syscall_req_lts = 0
syscall_ret_lts = 0
fxq_lts = 0
schedq_lts = self.count_queue_elements('__scheduler_queue')
syscall_req_lts = self.count_queue_elements('__syscall_queue')
syscall_ret_lts = self.count_queue_elements('__return_queue')
fxq = gdb.execute('p/x futex_queues->slh_first', to_string=True).split('=')[1].strip()
while(int(fxq, 16) != 0):
fxq_lts = fxq_lts + 1;
fxq = gdb.execute('p/x ((struct futex_q*)%s)->entries.sle_next'%fxq, to_string=True).split('=')[1].strip()
waiting_total = schedq_lts + syscall_req_lts + syscall_ret_lts + fxq_lts
gdb.write('Waiting lthreads:\n')
gdb.write(' scheduler queue: %s\n'%schedq_lts)
gdb.write(' syscall request queue: %s\n'%syscall_req_lts)
gdb.write(' syscall return queue: %s\n'%syscall_ret_lts)
gdb.write(' waiting for futex: %s\n'%fxq_lts)
gdb.write(' Total: %s\n'%waiting_total)
gdb.flush()
return False
def count_queue_elements(self, queue):
enqueue_pos = int(gdb.execute('p %s->enqueue_pos'%queue, to_string=True).split('=')[1].strip())
dequeue_pos = int(gdb.execute('p %s->dequeue_pos'%queue, to_string=True).split('=')[1].strip())
return enqueue_pos - dequeue_pos
class LogAllLts(gdb.Command):
"""
Do a backtrace of all active lthreads.
Param: Depth of backtrace (optional)
"""
def __init__(self):
super(LogAllLts, self).__init__("bt-lts", gdb.COMMAND_USER)
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
if argv and len(argv) > 0:
btdepth = argv[0]
else:
btdepth = ""
ltq = gdb.execute('p/x __active_lthreads', to_string=True).split('=')[1].strip()
no = 1
while(int(ltq, 16) != 0):
lt = gdb.execute('p/x ((struct lthread_queue*)%s)->lt'%ltq, to_string=True).split('=')[1].strip()
lt_tid = gdb.execute('p/d ((struct lthread_queue*)%s)->lt->tid'%ltq, to_string=True).split('=')[1].strip()
lt_name = gdb.execute('p/s ((struct lthread_queue*)%s)->lt->funcname'%ltq, to_string=True).split('=')[1].strip().split(',')[0]
lt_cpu = gdb.execute('p/d ((struct lthread_queue*)%s)->lt->cpu'%ltq, to_string=True).split('=')[1].strip()
gdb.write('#%3d Lthread: TID: %3s, Addr: %s, Name: %s, CPU: %s\n'%(no, lt_tid, lt, lt_name, lt_cpu))
gdb.execute('lthread-bt %s %s'%(lt, btdepth))
gdb.write('\n')
gdb.flush()
ltq = gdb.execute('p/x ((struct lthread_queue*)%s)->next'%ltq, to_string=True).split('=')[1].strip()
no = no + 1
return False
class LogAllLtsCsv(gdb.Command):
"""
Do a backtrace of all active lthreads.
Param: Depth of backtrace (optional)
"""
def __init__(self) -> None:
super(LogAllLtsCsv, self).__init__("bt-lts-csv", gdb.COMMAND_USER)
def invoke(self, arg, from_tty) -> bool:
import csv
argv = gdb.string_to_argv(arg)
if argv and len(argv) > 0:
btdepth = argv[0]
else:
btdepth = ""
ltq = gdb.execute('p/x __active_lthreads', to_string=True).split('=')[1].strip()
no = 1
rows = []
while(int(ltq, 16) != 0):
lt = gdb.execute('p/x ((struct lthread_queue*)%s)->lt'%ltq, to_string=True).split('=')[1].strip()
tid = gdb.execute('p/d ((struct lthread_queue*)%s)->lt->tid'%ltq, to_string=True).split('=')[1].strip()
name = gdb.execute('p/s ((struct lthread_queue*)%s)->lt->funcname'%ltq, to_string=True).split('=')[1].strip().split(',')[0]
cpu = gdb.execute('p/d ((struct lthread_queue*)%s)->lt->cpu'%ltq, to_string=True).split('=')[1].strip()
bt = get_lthread_backtrace(lt, btdepth, capture=True)
rows.append([lt, tid, name, cpu, bt])
ltq = gdb.execute('p/x ((struct lthread_queue*)%s)->next'%ltq, to_string=True).split('=')[1].strip()
no = no + 1
dest = "/tmp/backtrace.csv"
print(f"write to {dest}")
with open(dest, "w") as f:
writer = csv.writer(f)
fields = ["thread", "tid", "name", "cpu", "backtrace"]
writer.writerow(fields)
for val in rows:
writer.writerow(val)
return False
@dataclass
class FxWaiter:
key: str
lt: str
deadline: str
backtrace: str
def get_fx_waiters(btdepth: str) -> List[FxWaiter]:
waiters = []
fxq = gdb.execute('p/x futex_queues->slh_first', to_string=True).split('=')[1].strip()
while(int(fxq, 16) != 0):
ft_lt = gdb.execute('p/x ((struct futex_q*)%s)->futex_lt'%fxq, to_string=True).split('=')[1].strip()
ft_key = gdb.execute('p ((struct futex_q*)%s)->futex_key'%fxq, to_string=True).split('=')[1].strip()
ft_deadline = gdb.execute('p ((struct futex_q*)%s)->futex_deadline'%fxq, to_string=True).split('=')[1].strip()
ft_bt = gdb.execute('lthread-bt %s %s'%(ft_lt, btdepth), to_string=True)
waiter = FxWaiter(key=ft_key, lt=ft_lt, deadline=ft_deadline, backtrace=ft_bt)
waiters.append(waiter)
fxq = gdb.execute('p/x ((struct futex_q*)%s)->entries.sle_next'%fxq, to_string=True).split('=')[1].strip()
return waiters
class LogFxWaiters(gdb.Command):
"""
Do a backtrace of all lthreads waiting on a futex
Param: Depth of backtrace (optional)
"""
def __init__(self):
super(LogFxWaiters, self).__init__("bt-fxq", gdb.COMMAND_USER)
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
if argv and len(argv) > 0:
btdepth = argv[0]
else:
btdepth = ""
waiters = get_fx_waiters(btdepth)
for w in waiters:
gdb.write('FX entry: key: %s, lt: %s, deadline: %s\n'%(w.key, w.lt, w.deadline))
gdb.write(w.backtrace)
gdb.write("\n")
gdb.flush()
return False
class LogFxWaitersCSV(gdb.Command):
"""
Do a backtrace of all lthreads waiting on a futex
Param: Depth of backtrace (optional)
"""
def __init__(self):
super(LogFxWaitersCSV, self).__init__("bt-fxq-csv", gdb.COMMAND_USER)
def invoke(self, arg, from_tty):
import csv
argv = gdb.string_to_argv(arg)
if argv and len(argv) > 0:
btdepth = argv[0]
else:
btdepth = ""
waiters = get_fx_waiters(btdepth)
print(len(waiters))
dest = "/tmp/waiters.csv"
print(f"write to {dest}")
with open(dest, "w") as f:
writer = csv.writer(f)
fields = ["key", "lt", "deadline", "backtrace"]
writer.writerow(fields)
for val in waiters:
writer.writerow((val.key, val.lt, val.deadline, val.backtrace))
return False
class LogSchedQueueTids(gdb.Command):
"""
Print thread id of each lthread in scheduler queue.
"""
def __init__(self):
super(LogSchedQueueTids, self).__init__("schedq-tids", gdb.COMMAND_USER)
def invoke(self, arg, from_tty):
enqueue_pos = int(gdb.execute('p __scheduler_queue->enqueue_pos', to_string=True).split('=')[1].strip())
dequeue_pos = int(gdb.execute('p __scheduler_queue->dequeue_pos', to_string=True).split('=')[1].strip())
if (enqueue_pos < dequeue_pos): raise Exception("Logic error: %d < %d"%(enqueue_pos, dequeue_pos))
buffer_mask = int(gdb.execute('p __scheduler_queue->buffer_mask', to_string=True).split('=')[1].strip())
tids = []
for i in range(dequeue_pos, enqueue_pos):
gdb.write('p ((struct lthread*)__scheduler_queue->buffer[%d & %d].data)->tid\n'%(i, buffer_mask))
tid = int(gdb.execute('p ((struct lthread*)__scheduler_queue->buffer[%d & %d].data)->tid'%(i, buffer_mask), to_string=True).split('=')[1].strip())
tids.append(tid)
gdb.write('\nScheduler queue lthreads:\n'+tw.fill(str(tids))+'\n')
gdb.flush()
class LogSyscallBacktraces(gdb.Command):
"""
Print backtraces for all lthreads waiting in the syscall queues.
Param: Depth of backtrace (optional)
"""
def __init__(self):
super(LogSyscallBacktraces, self).__init__("bt-syscallqueues", gdb.COMMAND_USER)
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
if argv and len(argv) > 0:
btdepth = argv[0]
else:
btdepth = ""
gdb.write('Lthreads in system call request queue:\n')
self.print_bts_for_queue('__syscall_queue', btdepth)
gdb.write('\nLthreads in system call return queue:\n')
self.print_bts_for_queue('__return_queue', btdepth)
return False
def print_bts_for_queue(self, queue, btdepth):
enqueue_pos = int(gdb.execute('p %s->enqueue_pos'%queue, to_string=True).split('=')[1].strip())
dequeue_pos = int(gdb.execute('p %s->dequeue_pos'%queue, to_string=True).split('=')[1].strip())
if (enqueue_pos < dequeue_pos): raise Exception("Logic error: %d < %d"%(enqueue_pos, dequeue_pos))
buffer_mask = int(gdb.execute('p %s->buffer_mask'%queue, to_string=True).split('=')[1].strip())
for i in range(dequeue_pos, enqueue_pos):
lt = gdb.execute('p/x slotlthreads[%s->buffer[%d & %d].data]'%(queue, i, buffer_mask), to_string=True).split('=')[1].strip()
if(lt != '0x0'):
tid = int(gdb.execute('p ((struct lthread*)%s)->tid'%lt, to_string=True).split('=')[1].strip())
gdb.write('Lthread [tid=%d]\n'%tid)
gdb.execute('lthread-bt %s %s'%(lt, btdepth))
gdb.write('\n')
else:
gdb.write('Queue entry without associated lthread...\n')
gdb.flush()
class LogSyscallTids(gdb.Command):
"""
Print tids of lthreads in syscall and return queues.
"""
def __init__(self):
super(LogSyscallTids, self).__init__("syscall-tids", gdb.COMMAND_USER)
def invoke(self, arg, from_tty):
gdb.write('\nSlot tids:\n'+tw.fill(str(self.slot_tids())))
gdb.write('\nSlot syscallnos:\n'+tw.fill(str(self.syscall_nos())))
gdb.write('\nSyscall tids:\n'+tw.fill(str(self.queue_tids('syscall'))))
gdb.write('\nReturn tids:\n'+tw.fill(str(self.queue_tids('return'))))
gdb.flush()
def slot_tids(self):
maxsyscalls = int(gdb.execute('p maxsyscalls', to_string=True).split('=')[1].strip())
slot_tids | |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 26 12:06:07 2013
Copyright (c) 2013-2014, CEA/DSV/I2BM/Neurospin. All rights reserved.
@author: <NAME>, <NAME>
@email: <EMAIL>, <EMAIL>
@license: BSD 3-clause.
"""
from six import with_metaclass
import abc
import numpy as np
from .utils import TOLERANCE
from .utils import RandomUniform
from .utils import norm2
__all__ = ["grad_l1", "grad_l1mu", "grad_l2", "grad_l2", "grad_l2_squared",
"grad_tv", "grad_tvmu", "grad_grouptvmu"]
class Function(with_metaclass(abc.ABCMeta, object)):
def __init__(self, l, **kwargs):
self.l = float(l)
for k in kwargs:
setattr(self, k, kwargs[k])
@abc.abstractmethod
def grad(self, x):
raise NotImplementedError("Abstract method 'grad' must be "
"specialised!")
class L1(Function):
def __init__(self, l, rng=RandomUniform(-1, 1)):
super(L1, self).__init__(l, rng=rng)
def grad(self, x):
"""Sub-gradient of the function
f(x) = |x|_1,
where |x|_1 is the L1-norm.
"""
grad = np.zeros((x.shape[0], 1))
grad[x >= TOLERANCE] = 1.0
grad[x <= -TOLERANCE] = -1.0
between = (x > -TOLERANCE) & (x < TOLERANCE)
grad[between] = self.rng(between.sum())
return self.l * grad
def grad_l1(beta, rng=RandomUniform(-1, 1)):
"""Sub-gradient of the function
f(x) = |x|_1,
where |x|_1 is the L1-norm.
"""
grad = np.zeros((beta.shape[0], 1))
grad[beta >= TOLERANCE] = 1.0
grad[beta <= -TOLERANCE] = -1.0
between = (beta > -TOLERANCE) & (beta < TOLERANCE)
grad[between] = rng(between.sum())
return grad
class SmoothedL1(Function):
def __init__(self, l, mu=TOLERANCE):
super(SmoothedL1, self).__init__(l, mu=mu)
def grad(self, x):
"""Gradient of the function
f(x) = L1(mu, x),
where L1(mu, x) is the Nesterov smoothed L1-norm.
"""
alpha = (1.0 / self.mu) * x
asnorm = np.abs(alpha)
i = asnorm > 1.0
alpha[i] = np.divide(alpha[i], asnorm[i])
return self.l * alpha
def grad_l1mu(beta, mu):
"""Gradient of the function
f(x) = L1(mu, x),
where L1(mu, x) is the Nesterov smoothed L1-norm.
"""
alpha = (1.0 / mu) * beta
asnorm = np.abs(alpha)
i = asnorm > 1.0
alpha[i] = np.divide(alpha[i], asnorm[i])
return alpha
class L2(Function):
def __init__(self, l, rng=RandomUniform(0, 1)):
super(L2, self).__init__(l, rng=rng)
def grad(self, x):
"""Sub-gradient of the function
f(x) = |x|_2,
where |x|_2 is the L2-norm.
"""
norm_beta = norm2(x)
if norm_beta > TOLERANCE:
return x * (1.0 / norm_beta)
else:
D = x.shape[0]
u = (self.rng(D, 1) * 2.0) - 1.0 # [-1, 1]^D
norm_u = norm2(u)
a = self.rng() # [0, 1]
return (self.l * (a / norm_u)) * u
def grad_l2(beta, rng=RandomUniform(0, 1)):
"""Sub-gradient of the function
f(x) = |x|_2,
where |x|_2 is the L2-norm.
"""
norm_beta = norm2(beta)
if norm_beta > TOLERANCE:
return beta * (1.0 / norm_beta)
else:
D = beta.shape[0]
u = (rng(D, 1) * 2.0) - 1.0 # [-1, 1]^D
norm_u = norm2(u)
a = rng() # [0, 1]
return u * (a / norm_u)
class L2Squared(Function):
def __init__(self, l):
super(L2Squared, self).__init__(l)
def grad(self, x):
"""Gradient of the function
f(x) = (1 / 2) * |x|²_2,
where |x|²_2 is the squared L2-norm.
"""
return self.l * x
def grad_l2_squared(beta, rng=None):
"""Gradient of the function
f(x) = (1 / 2) * |x|²_2,
where |x|²_2 is the squared L2-norm.
"""
return beta
class NesterovFunction(with_metaclass(abc.ABCMeta, Function)):
def __init__(self, l, A, mu=TOLERANCE, rng=RandomUniform(-1, 1),
norm=L2.grad, **kwargs):
super(NesterovFunction, self).__init__(l, rng=rng, norm=norm, **kwargs)
self.A = A
self.mu = mu
def grad(self, x):
grad_Ab = 0
for i in range(len(self.A)):
Ai = self.A[i]
Ab = Ai.dot(x)
grad_Ab += Ai.T.dot(self.norm(Ab, self.rng))
return self.l * grad_Ab
def smoothed_grad(self, x):
alpha = self.alpha(x)
Aa = self.A[0].T.dot(alpha[0])
for i in range(1, len(self.A)):
Aa += self.A[i].T.dot(alpha[i])
return self.l * Aa
def alpha(self, x):
""" Dual variable of the Nesterov function.
"""
alpha = [0] * len(self.A)
for i in range(len(self.A)):
alpha[i] = self.A[i].dot(x) * (1.0 / self.mu)
# Apply projection
alpha = self.project(alpha)
return alpha
def project(self, alpha):
for i in range(len(alpha)):
astar = alpha[i]
normas = np.sqrt(np.sum(astar ** 2))
if normas > 1.0:
astar *= 1.0 / normas
alpha[i] = astar
return alpha
class TotalVariation(Function):
def __init__(self, l, A, rng=RandomUniform(0, 1)):
super(TotalVariation, self).__init__(l, A=A, rng=rng)
def grad(self, x):
"""Gradient of the function
f(x) = TV(x),
where TV(x) is the total variation function.
"""
beta_flat = x.ravel()
Ab = np.vstack([Ai.dot(beta_flat) for Ai in self.A]).T
Ab_norm2 = np.sqrt(np.sum(Ab ** 2, axis=1))
upper = Ab_norm2 > TOLERANCE
grad_Ab_norm2 = Ab
grad_Ab_norm2[upper] = (Ab[upper].T / Ab_norm2[upper]).T
lower = Ab_norm2 <= TOLERANCE
n_lower = lower.sum()
if n_lower:
D = len(self.A)
vec_rnd = (self.rng(n_lower, D) * 2.0) - 1.0
norm_vec = np.sqrt(np.sum(vec_rnd ** 2, axis=1))
a = self.rng(n_lower)
grad_Ab_norm2[lower] = (vec_rnd.T * (a / norm_vec)).T
grad = np.vstack([self.A[i].T.dot(grad_Ab_norm2[:, i])
for i in range(len(self.A))])
grad = grad.sum(axis=0)
return self.l * grad.reshape(x.shape)
def grad_tv(beta, A, rng=RandomUniform(0, 1)):
beta_flat = beta.ravel()
Ab = np.vstack([Ai.dot(beta_flat) for Ai in A]).T
Ab_norm2 = np.sqrt(np.sum(Ab ** 2, axis=1))
upper = Ab_norm2 > TOLERANCE
grad_Ab_norm2 = Ab
grad_Ab_norm2[upper] = (Ab[upper].T / Ab_norm2[upper]).T
lower = Ab_norm2 <= TOLERANCE
n_lower = lower.sum()
if n_lower:
D = len(A)
vec_rnd = (rng(n_lower, D) * 2.0) - 1.0
norm_vec = np.sqrt(np.sum(vec_rnd ** 2, axis=1))
a = rng(n_lower)
grad_Ab_norm2[lower] = (vec_rnd.T * (a / norm_vec)).T
grad = np.vstack([A[i].T.dot(grad_Ab_norm2[:, i]) for i in range(len(A))])
grad = grad.sum(axis=0)
return grad.reshape(beta.shape)
class GroupLasso(Function):
def __init__(self, l, A, rng=RandomUniform(-1, 1)):
super(GroupLasso, self).__init__(l, A, rng=rng)
def grad_gl(beta, A, rng=RandomUniform(-1, 1)):
return _Nesterov_grad(beta, A, rng, grad_l2)
class SmoothedTotalVariation(NesterovFunction):
def __init__(self, l, A, mu=TOLERANCE):
super(SmoothedTotalVariation, self).__init__(l, A, mu=mu)
def grad(self, x):
"""Gradient of the function
f(x) = TV(mu, x),
where TV(mu, x) is the Nesterov smoothed total variation function.
"""
return self.smoothed_grad(x)
def project(self, alpha):
""" Projection onto the compact space of the smoothed TV function.
"""
ax = alpha[0]
ay = alpha[1]
az = alpha[2]
anorm = ax ** 2 + ay ** 2 + az ** 2
i = anorm > 1.0
anorm_i = anorm[i] ** 0.5 # Square root is taken here. Faster.
ax[i] = np.divide(ax[i], anorm_i)
ay[i] = np.divide(ay[i], anorm_i)
az[i] = np.divide(az[i], anorm_i)
return [ax, ay, az]
def grad_tvmu(beta, A, mu):
alpha = _Nestetov_alpha(beta, A, mu, _Nesterov_TV_project)
return _Nesterov_grad_smoothed(A, alpha)
class SmoothedGroupLasso(NesterovFunction):
def __init__(self, l, A, mu=TOLERANCE):
super(SmoothedGroupLasso, self).__init__(l, A, mu=mu)
def grad(self, x):
"""Gradient of the function
f(x) = GL(mu, x),
where GL(mu, x) is the Nesterov smoothed group lasso function.
"""
return self.smoothed_grad(x)
def grad_glmu(beta, A, mu):
alpha = _Nestetov_alpha(beta, A, mu, _Nesterov_project)
return _Nesterov_grad_smoothed(A, alpha)
class SmoothedGroupTotalVariation(NesterovFunction):
def __init__(self, l, A, mu=TOLERANCE):
super(SmoothedGroupTotalVariation, self).__init__(l, A, mu=mu)
def grad(self, x):
"""Gradient of the function
f(x) = GroupTV(mu, x),
where GroupTV(mu, x) is the Nesterov smoothed group total variation
function.
"""
return self.smoothed_grad(x)
def project(self, a):
""" Projection onto the compact space of the smoothed Group TV
function.
"""
for g in range(0, len(a), 3):
ax = a[g + 0]
ay = a[g + 1]
az = a[g + 2]
anorm = ax ** 2 + ay ** 2 + az ** 2
i = anorm > 1.0
anorm_i = anorm[i] ** 0.5 # Square root is taken here. Faster.
ax[i] = np.divide(ax[i], anorm_i)
ay[i] = np.divide(ay[i], anorm_i)
az[i] = np.divide(az[i], anorm_i)
a[g + 0] = ax
a[g + 1] = ay
a[g + 2] = az
return a
def grad_grouptvmu(beta, A, mu):
alpha = _Nestetov_alpha(beta, A, mu, _Nesterov_GroupTV_project)
return _Nesterov_grad_smoothed(A, alpha)
def _Nesterov_GroupTV_project(a):
""" Projection onto the compact space of the smoothed Group TV function.
"""
for g in range(0, len(a), 3):
ax = a[g + 0]
ay = a[g + 1]
az = a[g + 2]
anorm = ax ** 2 + ay ** 2 + az ** 2
i = anorm > 1.0
anorm_i = anorm[i] ** 0.5 # Square root is taken here. Faster.
ax[i] = np.divide(ax[i], anorm_i)
ay[i] = np.divide(ay[i], anorm_i)
az[i] = np.divide(az[i], anorm_i)
a[g + 0] = ax
a[g + 1] = ay
a[g + 2] = az
return a
def _Nesterov_grad(beta, A, rng=RandomUniform(-1, 1), grad_norm=grad_l2):
grad_Ab = 0
for i in range(len(A)):
Ai = A[i]
Ab = Ai.dot(beta)
grad_Ab += Ai.T.dot(grad_norm(Ab, rng))
return grad_Ab
def _Nesterov_grad_smoothed(A, alpha):
Aa = A[0].T.dot(alpha[0])
for i in range(1, len(A)):
Aa += A[i].T.dot(alpha[i])
return Aa
def _Nestetov_alpha(beta, A, mu, proj):
""" Dual variable of the Nesterov function.
"""
alpha = [0] * len(A)
for i in range(len(A)):
alpha[i] = A[i].dot(beta) * (1.0 / mu)
# Apply projection.
alpha = proj(alpha)
return alpha
def _Nesterov_project(alpha):
for i in range(len(alpha)):
astar = alpha[i]
normas = np.sqrt(np.sum(astar ** 2))
if | |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import secrets
from urllib.parse import parse_qs, quote
import bottle
import sqlalchemy as db
import uuid
import yaml
import common.auth as _auth
import common.helpers as util
import common.mail_service as mail
from common.logging import logger
from models.context import Context
from models.dataset import Dataset, DatasetModel
from models.leaderboard_configuration import LeaderboardConfigurationModel
from models.leaderboard_snapshot import LeaderboardSnapshotModel
from models.model import DeploymentStatusEnum, Model, ModelModel
from models.round import Round, RoundModel
from models.round_user_example_info import RoundUserExampleInfoModel
from models.score import ScoreModel
from models.task import Task, TaskModel
from models.task_proposal import TaskProposal, TaskProposalModel
from models.task_user_permission import TaskUserPermission
from models.user import User, UserModel
@bottle.put("/tasks/process_proposal/<tpid:int>")
@_auth.requires_auth
def process_proposal(credentials, tpid):
um = UserModel()
user = um.get(credentials["id"])
if not user.admin:
bottle.abort(403, "Access denied")
data = bottle.request.json
if not util.check_fields(data, ["accept"]):
bottle.abort(400, "Missing data")
tpm = TaskProposalModel()
tp = tpm.get(tpid)
tp_creator = um.get(tp.uid)
tp_creator_email = tp_creator.email
if data["accept"]:
t = Task(
task_code=tp.task_code,
name=tp.name,
desc=tp.desc,
config_yaml="""
aggregation_metric:
type: dynascore
context:
- name: context
placeholder: Enter context...
type: string
delta_metrics:
- type: fairness
- type: robustness
input:
- name: statement
placeholder: Enter statement...
type: string
- labels:
- negative
- positive
- neutral
name: label
type: multiclass
as_goal_message: true
metadata:
create:
- display_name: example explanation
name: example_explanation
placeholder: Explain why your example is correct...
type: string
- display_name: model explanation
model_wrong_condition: false
name: model_explanation_right
placeholder: Explain why you thought the model would make a mistake...
type: string
- display_name: model explanation
model_wrong_condition: true
name: model_explanation_wrong
placeholder: Explain why you think the model made a mistake...
type: string
validate:
- labels:
- negative
- positive
- entailed
name: corrected_label
placeholder: Enter corrected label
type: multiclass
validated_label_condition: incorrect
- name: target_explanation
placeholder: Explain why your proposed target is correct...
type: string
validated_label_condition: incorrect
- name: flag_reason
placeholder: Enter the reason for flagging...
type: string
validated_label_condition: flagged
- name: validator_example_explanation
placeholder: Explain why the example is correct...
type: string
validated_label_condition: correct
- name: validator_model_explanation
placeholder: Enter what you think was done to try to trick the model...
type: string
model_wrong_metric:
reference_names:
- label
type: exact_match
output:
- name: label
- name: prob
reference_name: label
type: prob
perf_metric:
reference_name: label
type: macro_f1
""",
cur_round=1,
last_updated=db.sql.func.now(),
) # Annotation config is sentiment example.
tpm.dbs.add(t)
tpm.dbs.flush()
logger.info("Added task (%s)" % (t.id))
tup = TaskUserPermission(uid=tp.uid, type="owner", tid=t.id)
tpm.dbs.add(tup)
tpm.dbs.flush()
logger.info("Added task owner")
r = Round(tid=t.id, rid=1, secret=secrets.token_hex())
tpm.dbs.add(r)
tpm.dbs.flush()
tpm.dbs.commit()
logger.info("Added round (%s)" % (r.id))
config = bottle.default_app().config
mail.send(
config["mail"],
config,
[tp_creator_email],
template_name="templates/task_proposal_approval.txt",
subject="Your Task Proposal has been Accepted",
)
else:
config = bottle.default_app().config
msg = {
"rejection_message": data["changes"],
}
mail.send(
config["mail"],
config,
[tp_creator_email],
template_name="templates/task_proposal_rejection.txt",
msg_dict=msg,
subject="Your Task Proposal has been Rejected",
)
tpm.dbs.query(TaskProposal).filter(TaskProposal.id == tpid).delete()
tpm.dbs.flush()
tpm.dbs.commit()
return util.json_encode({"success": "ok"})
@bottle.get("/tasks/owners/<tid:int>")
@_auth.requires_auth
def get_owners(credentials, tid):
ensure_owner_or_admin(tid, credentials["id"])
tm = TaskModel()
tups = tm.dbs.query(TaskUserPermission).filter(
db.and_(TaskUserPermission.type == "owner", TaskUserPermission.tid == tid)
)
um = UserModel()
users = []
for obj in tups:
user = um.get(obj.uid)
users.append({"id": user.id, "username": user.username})
return util.json_encode(users)
def ensure_owner_or_admin(tid, uid):
um = UserModel()
user = um.get(uid)
if not user.admin:
if not (tid, "owner") in [
(perm.tid, perm.type) for perm in user.task_permissions
]:
bottle.abort(
403, "Access denied (you are not an admin or owner of this task)"
)
@bottle.post("/tasks/<tid:int>/convert_to_model_io")
def convert_to_model_io(tid):
data = bottle.request.json
tm = TaskModel()
task = tm.get(tid)
return util.json_encode(task.convert_to_model_io(data))
@bottle.get("/tasks/get_all_rounds/<tid:int>")
@_auth.requires_auth
def get_all_rounds(credentials, tid):
ensure_owner_or_admin(tid, credentials["id"])
rm = RoundModel()
r_dicts = []
for r in rm.getByTid(tid):
r_dicts.append(r.to_dict())
r_dicts.sort(key=lambda r: r["rid"])
return util.json_encode(r_dicts)
@bottle.get("/tasks/datasets/<tid:int>")
@_auth.requires_auth
def get_datasets(credentials, tid):
dm = DatasetModel()
dataset_list = []
datasets = dm.getByTid(tid)
if datasets:
for dataset in datasets:
dataset_list.append(dataset.to_dict())
return util.json_encode(dataset_list)
@bottle.get("/tasks/admin_or_owner/<tid:int>")
@_auth.requires_auth_or_turk
@_auth.turk_endpoint
def get_admin_or_owner(credentials, tid):
if credentials["id"] == "turk":
return util.json_encode({"admin_or_owner": False})
um = UserModel()
user = um.get(credentials["id"])
admin_or_owner = True
if not user.admin:
if not (tid, "owner") in [
(perm.tid, perm.type) for perm in user.task_permissions
]:
admin_or_owner = False
return util.json_encode({"admin_or_owner": admin_or_owner})
@bottle.post("/tasks/create_round/<tid:int>")
@_auth.requires_auth
def create_round(credentials, tid):
ensure_owner_or_admin(tid, credentials["id"])
tm = TaskModel()
task = tm.get(tid)
task.cur_round += 1
tm.dbs.add(task)
tm.dbs.flush()
r = Round(tid=tid, rid=task.cur_round, secret=secrets.token_hex())
tm.dbs.add(r)
tm.dbs.flush()
tm.dbs.commit()
logger.info("Added round (%s)" % (r.id))
return util.json_encode({"success": "ok"})
@bottle.put("/tasks/update_round/<tid:int>/<rid:int>")
@_auth.requires_auth
def update_round(credentials, tid, rid):
data = bottle.request.json
ensure_owner_or_admin(tid, credentials["id"])
rm = RoundModel()
round = rm.getByTidAndRid(tid, rid)
if "model_ids" in data:
tm = TaskModel()
task = tm.get(tid)
endpoint_urls = []
for model_id in data["model_ids"]:
mm = ModelModel()
model = mm.get(model_id)
if not model.is_published:
bottle.abort(400, "Can't use an unpublished model as a target model")
if model.tid != tid:
bottle.abort(
400, "Can't add a model for another task as a target model"
)
# TODO: store the endpoint url in the models table?
endpoint_url = (
"https://obws766r82.execute-api."
+ task.aws_region
+ ".amazonaws.com/predict?model="
+ model.endpoint_name
)
endpoint_urls.append(endpoint_url)
if endpoint_urls == []:
round.url = None
else:
round.url = "|".join(endpoint_urls)
round.longdesc = data.get("longdesc", round.longdesc)
rm.dbs.add(round)
rm.dbs.flush()
rm.dbs.commit()
logger.info("Updated round (%s)" % (round.id))
return util.json_encode({"success": "ok"})
@bottle.get("/tasks/get_model_identifiers_for_target_selection/<tid:int>")
@_auth.requires_auth
def get_model_identifiers_for_target_selection(credentials, tid):
ensure_owner_or_admin(tid, credentials["id"])
tm = TaskModel()
task = tm.get(tid)
mm = ModelModel()
models = mm.getByTid(tid)
rm = RoundModel()
rounds = rm.getByTid(tid)
rid_to_model_identifiers = {}
for round in rounds:
model_identifiers = []
for model in models:
if (
model.endpoint_name is not None
): # This if-statement is needed for models that predate dynalab
# TODO: store the endpoint url in the models table?
endpoint_url = (
"https://obws766r82.execute-api."
+ task.aws_region
+ ".amazonaws.com/predict?model="
+ model.endpoint_name
)
is_target = False
if round.url is not None and endpoint_url in round.url:
is_target = True
if is_target or (
model.is_published
and model.deployment_status == DeploymentStatusEnum.deployed
):
model_identifiers.append(
{
"model_name": model.name,
"model_id": model.id,
"uid": model.uid,
"username": model.user.username,
"is_target": is_target,
}
)
rid_to_model_identifiers[round.rid] = model_identifiers
return util.json_encode(rid_to_model_identifiers)
@bottle.get("/tasks/get_model_identifiers/<tid:int>")
@_auth.requires_auth
def get_model_identifiers(credentials, tid):
ensure_owner_or_admin(tid, credentials["id"])
mm = ModelModel()
models = mm.getByTid(tid)
model_identifiers = []
for model in models:
model_identifiers.append(
{
"model_name": model.name,
"model_id": model.id,
"deployment_status": model.deployment_status.name,
"is_published": model.is_published,
"uid": model.uid,
"username": model.user.username,
}
)
return util.json_encode(model_identifiers)
@bottle.put("/tasks/toggle_owner/<tid:int>/<username>")
@_auth.requires_auth
def toggle_owner(credentials, tid, username):
ensure_owner_or_admin(tid, credentials["id"])
um = UserModel()
user_to_toggle = um.getByUsername(username)
if (tid, "owner") in [
(perm.tid, perm.type) for perm in user_to_toggle.task_permissions
]:
tup = (
um.dbs.query(TaskUserPermission)
.filter(
db.and_(
TaskUserPermission.uid == user_to_toggle.id,
TaskUserPermission.type == "owner",
TaskUserPermission.tid == tid,
)
)
.delete()
)
um.dbs.flush()
um.dbs.commit()
logger.info("Removed task owner: " + username)
else:
tup = TaskUserPermission(uid=user_to_toggle.id, type="owner", tid=tid)
um.dbs.add(tup)
um.dbs.flush()
um.dbs.commit()
logger.info("Added task owner: " + username)
return util.json_encode({"success": "ok"})
@bottle.put("/tasks/update/<tid:int>")
@_auth.requires_auth
def update(credentials, tid):
ensure_owner_or_admin(tid, credentials["id"])
data = bottle.request.json
for field in data:
if field not in (
"unpublished_models_in_leaderboard",
"validate_non_fooling",
"num_matching_validations",
"instructions_md",
"predictions_upload_instructions_md",
"train_file_upload_instructions_md",
"hidden",
"submitable",
"create_endpoint",
"build_sqs_queue",
"eval_sqs_queue",
"is_decen_task",
"task_aws_account_id",
"task_gateway_predict_prefix",
"config_yaml",
):
bottle.abort(
403,
"""Can only modify unpublished_models_in_leaderboard,
validate_non_fooling, num_matching_validations,
instructions_md, hidden, predictions_upload_instructions_md,
train_file_upload_instructions_md, submitable,
create_endpoint, config_yaml""",
)
tm = TaskModel()
if "config_yaml" in data:
new_config = yaml.load(data["config_yaml"], yaml.SafeLoader)
try:
Task.verify_config(new_config)
except Exception as ex:
logger.exception(str(ex))
bottle.abort(400, str(ex))
task = tm.get(tid)
old_config = yaml.load(task.config_yaml, yaml.SafeLoader)
allowed_fields = ("aggregation_metric",)
# ensure only allowed_fields changed
if {k: v for k, v in new_config.items() if k not in allowed_fields} != {
k: v for k, v in old_config.items() if k not in allowed_fields
}:
bottle.abort(
400,
f"You can only modify the {allowed_fields} fields "
+ "of the annotation config",
)
tm.update(tid, data)
return util.json_encode({"success": "ok"})
@bottle.put("/tasks/activate/<tid:int>")
@_auth.requires_auth
def activate(credentials, tid):
data = bottle.request.json
if not util.check_fields(data, ["config_yaml"]):
bottle.abort(400, "Missing data")
ensure_owner_or_admin(tid, credentials["id"])
tm = TaskModel()
task = tm.get(tid)
if task.active:
bottle.abort(
403,
"""Access denied. Cannot change the config_yaml of an
already active task.""",
)
try:
Task.verify_config(yaml.load(data["config_yaml"], yaml.SafeLoader))
except Exception as ex:
logger.exception(str(ex))
bottle.abort(400, str(ex))
tm.update(tid, {"config_yaml": data["config_yaml"], "active": True})
if len(yaml.load(data["config_yaml"], yaml.SafeLoader).get("context", [])) == 0:
# If there is no context in the config, then add an empty context.
# The task owner should not need to do this, because we already know
# that the context will be empty.
rm = RoundModel()
round = rm.getByTidAndRid(tid, task.cur_round)
r_realid = round.id
context = Context(
r_realid=r_realid,
context_json=util.json_encode({}),
metadata_json=util.json_encode({}),
tag=None,
)
rm.dbs.add(context)
rm.dbs.flush()
rm.dbs.commit()
return util.json_encode({"success": "ok"})
@bottle.get("/tasks")
def tasks():
t = TaskModel()
tasks = t.listWithRounds()
return util.json_encode(tasks)
@bottle.get("/tasks/submitable")
def get_submitable_tasks():
t = TaskModel()
tasks = t.listSubmitable()
return util.json_encode(tasks)
@bottle.get("/tasks/<task_id_or_code>")
@_auth.turk_endpoint
def get_task(task_id_or_code):
t = TaskModel()
task = t.getWithRoundAndMetricMetadata(task_id_or_code)
if not task:
bottle.abort(404, "Not found")
return util.json_encode(task)
@bottle.get("/tasks/<tid:int>/<rid:int>")
def get_task_round(tid, rid):
rm = RoundModel()
round = rm.getByTidAndRid(tid, rid)
if not round:
bottle.abort(404, "Not found")
return util.json_encode(round.to_dict())
@bottle.get("/tasks/<tid:int>/users")
def get_user_leaderboard(tid):
"""
Return users and MER based on their examples score based on tasks
:param tid:
:return: Json Object
"""
info = RoundUserExampleInfoModel()
limit, offset = util.get_limit_and_offset_from_request()
try:
query_result, total_count = info.getUserLeaderByTid(
tid=tid, | |
ok_zone = shared_zone_test_context.ok_zone
dummy_zone = shared_zone_test_context.dummy_zone
ok_zone_name = shared_zone_test_context.ok_zone["name"]
dummy_zone_name = shared_zone_test_context.dummy_zone["name"]
dummy_group_name = shared_zone_test_context.dummy_group["name"]
rs_delete_name = generate_record_name()
rs_delete_fqdn = rs_delete_name + f".{ok_zone_name}"
rs_delete_ok = create_recordset(ok_zone, rs_delete_name, "TXT", [{"text": "test"}], 200)
rs_update_name = generate_record_name()
rs_update_fqdn = rs_update_name + f".{ok_zone_name}"
rs_update_ok = create_recordset(ok_zone, rs_update_name, "TXT", [{"text": "test"}], 200)
rs_delete_dummy_name = generate_record_name()
rs_delete_dummy_fqdn = rs_delete_dummy_name + f".{dummy_zone_name}"
rs_delete_dummy = create_recordset(dummy_zone, rs_delete_dummy_name, "TXT", [{"text": "test"}], 200)
rs_update_dummy_name = generate_record_name()
rs_update_dummy_fqdn = rs_update_dummy_name + f".{dummy_zone_name}"
rs_update_dummy = create_recordset(dummy_zone, rs_update_dummy_name, "TXT", [{"text": "test"}], 200)
batch_change_input = {
"comments": "this is optional",
"changes": [
# valid changes
get_change_TXT_json(rs_delete_fqdn, change_type="DeleteRecordSet"),
get_change_TXT_json(rs_update_fqdn, change_type="DeleteRecordSet"),
get_change_TXT_json(rs_update_fqdn, ttl=300),
# input validations failures
get_change_TXT_json(f"invalid-name$.{ok_zone_name}", change_type="DeleteRecordSet"),
get_change_TXT_json(f"invalid-ttl.{ok_zone_name}", ttl=29, text="bad-ttl"),
# zone discovery failure
get_change_TXT_json("no.zone.at.all.", change_type="DeleteRecordSet"),
# context validation failures
get_change_TXT_json(f"delete-nonexistent.{ok_zone_name}", change_type="DeleteRecordSet"),
get_change_TXT_json(f"update-nonexistent.{ok_zone_name}", change_type="DeleteRecordSet"),
get_change_TXT_json(f"update-nonexistent.{ok_zone_name}", text="test"),
get_change_TXT_json(rs_delete_dummy_fqdn, change_type="DeleteRecordSet"),
get_change_TXT_json(rs_update_dummy_fqdn, text="test"),
get_change_TXT_json(rs_update_dummy_fqdn, change_type="DeleteRecordSet")
]
}
to_create = [rs_delete_ok, rs_update_ok, rs_delete_dummy, rs_update_dummy]
to_delete = []
try:
for rs in to_create:
if rs["zoneId"] == dummy_zone["id"]:
create_client = dummy_client
else:
create_client = ok_client
create_rs = create_client.create_recordset(rs, status=202)
to_delete.append(create_client.wait_until_recordset_change_status(create_rs, "Complete"))
# Confirm that record set doesn't already exist
ok_client.get_recordset(ok_zone["id"], "delete-nonexistent", status=404)
response = ok_client.create_batch_change(batch_change_input, status=400)
# successful changes
assert_successful_change_in_error_response(response[0], input_name=rs_delete_fqdn, record_type="TXT", record_data=None, change_type="DeleteRecordSet")
assert_successful_change_in_error_response(response[1], input_name=rs_update_fqdn, record_type="TXT", record_data=None, change_type="DeleteRecordSet")
assert_successful_change_in_error_response(response[2], ttl=300, input_name=rs_update_fqdn, record_type="TXT", record_data="test")
# input validations failures: invalid input name, reverse zone error, invalid ttl
assert_failed_change_in_error_response(response[3], input_name=f"invalid-name$.{ok_zone_name}", record_type="TXT", record_data="test", change_type="DeleteRecordSet",
error_messages=[f'Invalid domain name: "invalid-name$.{ok_zone_name}", valid domain names must be '
f'letters, numbers, underscores, and hyphens, joined by dots, and terminated with a dot.'])
assert_failed_change_in_error_response(response[4], input_name=f"invalid-ttl.{ok_zone_name}", ttl=29, record_type="TXT", record_data="bad-ttl",
error_messages=['Invalid TTL: "29", must be a number between 30 and 2147483647.'])
# zone discovery failure
assert_failed_change_in_error_response(response[5], input_name="no.zone.at.all.", record_type="TXT", record_data=None, change_type="DeleteRecordSet",
error_messages=[
"Zone Discovery Failed: zone for \"no.zone.at.all.\" does not exist in VinylDNS. "
"If zone exists, then it must be connected to in VinylDNS."])
# context validation failures: record does not exist, not authorized
assert_failed_change_in_error_response(response[6], input_name=f"delete-nonexistent.{ok_zone_name}", record_type="TXT", record_data=None, change_type="DeleteRecordSet",
error_messages=[f"Record \"delete-nonexistent.{ok_zone_name}\" Does Not Exist: cannot delete a record that does not exist."])
assert_failed_change_in_error_response(response[7], input_name=f"update-nonexistent.{ok_zone_name}", record_type="TXT", record_data=None, change_type="DeleteRecordSet",
error_messages=[f"Record \"update-nonexistent.{ok_zone_name}\" Does Not Exist: cannot delete a record that does not exist."])
assert_successful_change_in_error_response(response[8], input_name=f"update-nonexistent.{ok_zone_name}", record_type="TXT", record_data="test")
assert_failed_change_in_error_response(response[9], input_name=rs_delete_dummy_fqdn, record_type="TXT", record_data=None, change_type="DeleteRecordSet",
error_messages=[f"User \"ok\" is not authorized. Contact zone owner group: {dummy_group_name} at <EMAIL> to make DNS changes."])
assert_failed_change_in_error_response(response[10], input_name=rs_update_dummy_fqdn, record_type="TXT", record_data="test",
error_messages=[f"User \"ok\" is not authorized. Contact zone owner group: {dummy_group_name} at <EMAIL> to make DNS changes."])
assert_failed_change_in_error_response(response[11], input_name=rs_update_dummy_fqdn, record_type="TXT", record_data=None, change_type="DeleteRecordSet",
error_messages=[f"User \"ok\" is not authorized. Contact zone owner group: {dummy_group_name} at <EMAIL> to make DNS changes."])
finally:
# Clean up updates
dummy_deletes = [rs for rs in to_delete if rs["zone"]["id"] == dummy_zone["id"]]
ok_deletes = [rs for rs in to_delete if rs["zone"]["id"] != dummy_zone["id"]]
clear_recordset_list(dummy_deletes, dummy_client)
clear_recordset_list(ok_deletes, ok_client)
def test_mx_recordtype_add_checks(shared_zone_test_context):
"""
Test all add validations performed on MX records submitted in batch changes
"""
client = shared_zone_test_context.ok_vinyldns_client
ok_zone_name = shared_zone_test_context.ok_zone["name"]
dummy_zone_name = shared_zone_test_context.dummy_zone["name"]
dummy_group_name = shared_zone_test_context.dummy_group["name"]
ip4_zone_name = shared_zone_test_context.classless_base_zone["name"]
existing_mx_name = generate_record_name()
existing_mx_fqdn = f"{existing_mx_name}.{ok_zone_name}"
existing_mx = create_recordset(shared_zone_test_context.ok_zone, existing_mx_name, "MX", [{"preference": 1, "exchange": "foo.bar."}], 100)
existing_cname_name = generate_record_name()
existing_cname_fqdn = f"{existing_cname_name}.{ok_zone_name}"
existing_cname = create_recordset(shared_zone_test_context.ok_zone, existing_cname_name, "CNAME", [{"cname": "test."}], 100)
good_record_fqdn = generate_record_name(ok_zone_name)
batch_change_input = {
"changes": [
# valid change
get_change_MX_json(good_record_fqdn),
# input validation failures
get_change_MX_json(f"bad-ttl-and-invalid-name$.{ok_zone_name}", ttl=29),
get_change_MX_json(f"bad-exchange.{ok_zone_name}", exchange="foo$.bar."),
get_change_MX_json(f"mx.{ip4_zone_name}"),
# zone discovery failures
get_change_MX_json(f"no.subzone.{ok_zone_name}"),
get_change_MX_json("no.zone.at.all."),
# context validation failures
get_change_CNAME_json(f"cname-duplicate.{ok_zone_name}"),
get_change_MX_json(f"cname-duplicate.{ok_zone_name}"),
get_change_MX_json(existing_mx_fqdn),
get_change_MX_json(existing_cname_fqdn),
get_change_MX_json(f"user-add-unauthorized.{dummy_zone_name}")
]
}
to_create = [existing_mx, existing_cname]
to_delete = []
try:
for create_json in to_create:
create_result = client.create_recordset(create_json, status=202)
to_delete.append(client.wait_until_recordset_change_status(create_result, "Complete"))
response = client.create_batch_change(batch_change_input, status=400)
# successful changes
assert_successful_change_in_error_response(response[0], input_name=good_record_fqdn, record_type="MX", record_data={"preference": 1, "exchange": "foo.bar."})
# ttl, domain name, record data
assert_failed_change_in_error_response(response[1], input_name=f"bad-ttl-and-invalid-name$.{ok_zone_name}", ttl=29, record_type="MX",
record_data={"preference": 1, "exchange": "foo.bar."},
error_messages=['Invalid TTL: "29", must be a number between 30 and 2147483647.',
f'Invalid domain name: "bad-ttl-and-invalid-name$.{ok_zone_name}", '
"valid domain names must be letters, numbers, underscores, and hyphens, joined by dots, and terminated with a dot."])
assert_failed_change_in_error_response(response[2], input_name=f"bad-exchange.{ok_zone_name}", record_type="MX",
record_data={"preference": 1, "exchange": "foo$.bar."},
error_messages=['Invalid domain name: "foo$.bar.", valid domain names must be letters, numbers, underscores, and hyphens, '
'joined by dots, and terminated with a dot.'])
assert_failed_change_in_error_response(response[3], input_name=f"mx.{ip4_zone_name}", record_type="MX",
record_data={"preference": 1, "exchange": "foo.bar."},
error_messages=[f'Invalid Record Type In Reverse Zone: record with name "mx.{ip4_zone_name}" and type "MX" is not allowed in a reverse zone.'])
# zone discovery failures
assert_failed_change_in_error_response(response[4], input_name=f"no.subzone.{ok_zone_name}", record_type="MX",
record_data={"preference": 1, "exchange": "foo.bar."},
error_messages=[f'Zone Discovery Failed: zone for "no.subzone.{ok_zone_name}" does not exist in VinylDNS. '
f'If zone exists, then it must be connected to in VinylDNS.'])
assert_failed_change_in_error_response(response[5], input_name="no.zone.at.all.", record_type="MX",
record_data={"preference": 1, "exchange": "foo.bar."},
error_messages=['Zone Discovery Failed: zone for "no.zone.at.all." does not exist in VinylDNS. '
'If zone exists, then it must be connected to in VinylDNS.'])
# context validations: cname duplicate
assert_failed_change_in_error_response(response[6], input_name=f"cname-duplicate.{ok_zone_name}", record_type="CNAME",
record_data="test.com.",
error_messages=[f"Record Name \"cname-duplicate.{ok_zone_name}\" Not Unique In Batch Change: "
f"cannot have multiple \"CNAME\" records with the same name."])
# context validations: conflicting recordsets, unauthorized error
assert_failed_change_in_error_response(response[8], input_name=existing_mx_fqdn, record_type="MX",
record_data={"preference": 1, "exchange": "foo.bar."},
error_messages=[f"Record \"{existing_mx_fqdn}\" Already Exists: cannot add an existing record; to update it, "
f"issue a DeleteRecordSet then an Add."])
assert_failed_change_in_error_response(response[9], input_name=existing_cname_fqdn, record_type="MX",
record_data={"preference": 1, "exchange": "foo.bar."},
error_messages=["CNAME Conflict: CNAME record names must be unique. "
f"Existing record with name \"{existing_cname_fqdn}\" and type \"CNAME\" conflicts with this record."])
assert_failed_change_in_error_response(response[10], input_name=f"user-add-unauthorized.{dummy_zone_name}", record_type="MX",
record_data={"preference": 1, "exchange": "foo.bar."},
error_messages=[f"User \"ok\" is not authorized. Contact zone owner group: {dummy_group_name} at <EMAIL> to make DNS changes."])
finally:
clear_recordset_list(to_delete, client)
def test_mx_recordtype_update_delete_checks(shared_zone_test_context):
"""
Test all update and delete validations performed on MX records submitted in batch changes
"""
ok_client = shared_zone_test_context.ok_vinyldns_client
dummy_client = shared_zone_test_context.dummy_vinyldns_client
ok_zone = shared_zone_test_context.ok_zone
dummy_zone = shared_zone_test_context.dummy_zone
dummy_zone_name = shared_zone_test_context.dummy_zone["name"]
dummy_group_name = shared_zone_test_context.dummy_group["name"]
ok_zone_name = shared_zone_test_context.ok_zone["name"]
ip4_zone_name = shared_zone_test_context.classless_base_zone["name"]
rs_delete_name = generate_record_name()
rs_delete_fqdn = rs_delete_name + f".{ok_zone_name}"
rs_delete_ok = create_recordset(ok_zone, rs_delete_name, "MX", [{"preference": 1, "exchange": "foo.bar."}], 200)
rs_update_name = generate_record_name()
rs_update_fqdn = rs_update_name + f".{ok_zone_name}"
rs_update_ok = create_recordset(ok_zone, rs_update_name, "MX", [{"preference": 1, "exchange": "foo.bar."}], 200)
rs_delete_dummy_name = generate_record_name()
rs_delete_dummy_fqdn = rs_delete_dummy_name + f".{dummy_zone_name}"
rs_delete_dummy = create_recordset(dummy_zone, rs_delete_dummy_name, "MX", [{"preference": 1, "exchange": "foo.bar."}], 200)
rs_update_dummy_name = generate_record_name()
rs_update_dummy_fqdn = rs_update_dummy_name + f".{dummy_zone_name}"
rs_update_dummy = create_recordset(dummy_zone, rs_update_dummy_name, "MX", [{"preference": 1, "exchange": "foo.bar."}], 200)
batch_change_input = {
"comments": "this is optional",
"changes": [
# valid changes
get_change_MX_json(rs_delete_fqdn, change_type="DeleteRecordSet"),
get_change_MX_json(rs_update_fqdn, change_type="DeleteRecordSet"),
get_change_MX_json(rs_update_fqdn, ttl=300),
# input validations failures
get_change_MX_json(f"invalid-name$.{ok_zone_name}", change_type="DeleteRecordSet"),
get_change_MX_json(f"delete.{ok_zone_name}", ttl=29),
get_change_MX_json(f"bad-exchange.{ok_zone_name}", exchange="foo$.bar."),
get_change_MX_json(f"mx.{ip4_zone_name}"),
# zone discovery failures
get_change_MX_json("no.zone.at.all.", change_type="DeleteRecordSet"),
# context validation failures
get_change_MX_json(f"delete-nonexistent.{ok_zone_name}", change_type="DeleteRecordSet"),
get_change_MX_json(f"update-nonexistent.{ok_zone_name}", change_type="DeleteRecordSet"),
get_change_MX_json(f"update-nonexistent.{ok_zone_name}", preference=1000, exchange="foo.bar."),
get_change_MX_json(rs_delete_dummy_fqdn, change_type="DeleteRecordSet"),
get_change_MX_json(rs_update_dummy_fqdn, preference=1000, exchange="foo.bar."),
get_change_MX_json(rs_update_dummy_fqdn, change_type="DeleteRecordSet")
]
}
to_create = [rs_delete_ok, rs_update_ok, rs_delete_dummy, rs_update_dummy]
to_delete = []
try:
for rs in to_create:
if rs["zoneId"] == dummy_zone["id"]:
create_client = dummy_client
else:
create_client = ok_client
create_rs = create_client.create_recordset(rs, status=202)
to_delete.append(create_client.wait_until_recordset_change_status(create_rs, "Complete"))
# Confirm that record set doesn't already exist
ok_client.get_recordset(ok_zone["id"], "delete-nonexistent", status=404)
response = ok_client.create_batch_change(batch_change_input, status=400)
# successful changes
assert_successful_change_in_error_response(response[0], input_name=rs_delete_fqdn, record_type="MX", record_data=None, change_type="DeleteRecordSet")
assert_successful_change_in_error_response(response[1], input_name=rs_update_fqdn, record_type="MX", record_data=None, change_type="DeleteRecordSet")
assert_successful_change_in_error_response(response[2], ttl=300, input_name=rs_update_fqdn, record_type="MX", record_data={"preference": 1, "exchange": "foo.bar."})
# input validations failures: invalid input name, reverse zone error, invalid ttl
assert_failed_change_in_error_response(response[3], input_name=f"invalid-name$.{ok_zone_name}", record_type="MX", record_data={"preference": 1, "exchange": "foo.bar."},
change_type="DeleteRecordSet",
error_messages=[f'Invalid domain name: "invalid-name$.{ok_zone_name}", valid domain names must be letters, '
f'numbers, underscores, and hyphens, joined by dots, and terminated with a dot.'])
assert_failed_change_in_error_response(response[4], input_name=f"delete.{ok_zone_name}", ttl=29, record_type="MX",
record_data={"preference": 1, "exchange": "foo.bar."},
error_messages=['Invalid TTL: "29", must be a number between 30 and 2147483647.'])
assert_failed_change_in_error_response(response[5], input_name=f"bad-exchange.{ok_zone_name}", record_type="MX",
record_data={"preference": 1, "exchange": "foo$.bar."},
error_messages=['Invalid domain name: "foo$.bar.", valid domain names must be letters, numbers, '
'underscores, and hyphens, joined by dots, and terminated with a dot.'])
assert_failed_change_in_error_response(response[6], input_name=f"mx.{ip4_zone_name}", record_type="MX",
record_data={"preference": 1, "exchange": "foo.bar."},
error_messages=[f'Invalid Record Type In Reverse Zone: record with name "mx.{ip4_zone_name}" '
f'and type "MX" is not allowed in a reverse zone.'])
# zone discovery failure
assert_failed_change_in_error_response(response[7], input_name="no.zone.at.all.", record_type="MX",
record_data=None, change_type="DeleteRecordSet",
error_messages=["Zone Discovery Failed: zone for \"no.zone.at.all.\" does not exist in VinylDNS. "
"If zone exists, then it must be connected to in VinylDNS."])
# context validation failures: record does not exist, not authorized
assert_failed_change_in_error_response(response[8], input_name=f"delete-nonexistent.{ok_zone_name}", record_type="MX",
record_data=None, change_type="DeleteRecordSet",
error_messages=[f"Record \"delete-nonexistent.{ok_zone_name}\" Does Not Exist: cannot delete a record that does not exist."])
assert_failed_change_in_error_response(response[9], input_name=f"update-nonexistent.{ok_zone_name}", record_type="MX",
record_data=None, change_type="DeleteRecordSet",
error_messages=[f"Record \"update-nonexistent.{ok_zone_name}\" Does Not Exist: cannot delete a record that does not exist."])
assert_successful_change_in_error_response(response[10], input_name=f"update-nonexistent.{ok_zone_name}", record_type="MX",
record_data={"preference": 1000, "exchange": "foo.bar."})
assert_failed_change_in_error_response(response[11], | |
from .BinanceTrRequests import sendRequest,sendRequestWithoutAuthorization
from .UserModel import UserModel as UM
from .Apihelpers import date_to_milliseconds
import json
import sys
import traceback
import os
class ApiService:
"""
ApiServiceClass Object
:param apiKey(str): Given account's api key.
:param apiSecret(str): Given account's api secret key.
"""
def __init__(self, apiKey, apiSecret):
self.apiKey = apiKey
self.apiSecret = apiSecret
self.__constants()
def __constants(self):
current_dir = os.path.dirname(__file__)
target_dir = os.path.abspath(os.path.join(current_dir, "constants.json"))
with open(target_dir) as json_file:
self.__constant = json.load(json_file)
self.options = UM(self.apiKey, self.apiSecret)
def testConnectivity(self):
"""
Tests connectivity with server.
:returns: (json) Return requested value from api.
"""
try:
result = sendRequestWithoutAuthorization("/open/v1/common/time", None, True)
return result.json()
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
errorDetails = "".join(traceback.format_exception(exc_type, exc_obj, exc_tb))
print(errorDetails)
def getSymbol(self):
"""
Fetchs market data.
:returns: (json) Return requested value from api.
"""
try:
result = sendRequestWithoutAuthorization("open/v1/common/symbols", None, True)
return result.json()
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
errorDetails = "".join(traceback.format_exception(exc_type, exc_obj, exc_tb))
print(errorDetails)
def getOrderBook(self, symbol, limit=100):
"""
Get order book from account.
:param symbol(str): Coin name with parity. ex. BTC_TRY.
:param limit(int): Give the limit to see how many order will be shown.
:returns: (json) Return requested value from api.
"""
try:
params = {"symbol": symbol, "limit": str(limit)}
result = sendRequest("GET", "/open/v1/market/depth", params, True)
return result
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
errorDetails = "".join(traceback.format_exception(exc_type, exc_obj, exc_tb))
print(errorDetails)
def getRecentTrade(self, symbol, limit=500):
"""
Get recent trades of given parity from market.
:param symbol(str): Coin name with parity. ex. BTC_TRY
:param limit(int): Number of recent trades to be returned.
:returns: (json) Return requested value from api.
"""
try:
params = {"symbol": symbol, "limit": str(limit)}
result = sendRequestWithoutAuthorization("/trades", params)
return result.json()
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
errorDetails = "".join(traceback.format_exception(exc_type, exc_obj, exc_tb))
print(errorDetails)
def getAggregateTrades(self, symbol, startDate=None, endDate=None, limit=50):
"""
Get recent trades of given parity from market in given dates.
:param symbol(str): Coin name with parity. ex. BTC_TRY
:param startDate(str): Start date. ex.(5 min ago, 4 hour ago)
:param endDate(str): End date. ex.(5 min ago, 4 hour ago)
:param limit(int): Number of recent trades to be returned.
:returns: (json) Return requested value from api.
"""
try:
timestamp = self.testConnectivity()["timestamp"]
params = {"symbol": symbol, "limit": str(limit)}
start_ts = None
end_ts = None
# if startDate:
# start_ts = date_to_milliseconds(startDate)
# end_ts = None
# if endDate:
# end_ts = date_to_milliseconds(endDate)
#
# if start_ts and end_ts:
params["startTime"] = startDate
params["endTime"] = endDate
print(params)
result = sendRequestWithoutAuthorization("/aggTrades", params)
return result.json()
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
errorDetails = "".join(traceback.format_exception(exc_type, exc_obj, exc_tb))
print(errorDetails)
def getKline(self, symbol, interval, startDate=None, endDate=None, limit=500):
"""
Get market data of given parity from market in given dates.
:param symbol(str): Coin name with parity. ex. BTC_TRY
:param startDate(str): Start date. ex.(5 min ago, 4 hour ago)
:param endDate(str): End date. ex.(5 min ago, 4 hour ago)
:param limit(int): Number of recent trades to be returned.
:returns: (json) Return requested value from api.
"""
try:
params = {"symbol": symbol, "limit": str(limit), "interval": interval}
start_ts = None
end_ts = None
#
# if startDate:
# start_ts = date_to_milliseconds(startDate)
# end_ts = None
# if endDate:
# end_ts = date_to_milliseconds(endDate)
#
if endDate and startDate:
params["startTime"] = startDate
params["endTime"] = endDate
result = sendRequestWithoutAuthorization("/klines", params)
return result.json()
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
errorDetails = "".join(traceback.format_exception(exc_type, exc_obj, exc_tb))
print(errorDetails)
def getAccountInformation(self):
"""
Get account information.
:returns: (json) Return requested value from api.
"""
try:
result = sendRequest("GET", "/open/v1/account/spot", self.options)
return result
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
errorDetails = "".join(traceback.format_exception(exc_type, exc_obj, exc_tb))
print(errorDetails)
def getAssetInformation(self, assetName):
"""
Get asset information from account of given token.
:param assetName(str): Coin name to fetch information.
:returns: (json) Return requested value from api.
"""
try:
params = {"asset": assetName}
result = sendRequest("GET", "/open/v1/account/spot/asset", self.options, params)
return result
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
errorDetails = "".join(traceback.format_exception(exc_type, exc_obj, exc_tb))
print(errorDetails)
def getOrderById(self, orderID):
"""
Get order book by Id.
:param orderID(str): Unique orderid taken from BinanceTr.
:returns: (json) Return requested value from api.
"""
try:
params = {"orderID": orderID}
result = sendRequest("GET", "/open/v1/orders/default", self.options, params)
return result
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
errorDetails = "".join(traceback.format_exception(exc_type, exc_obj, exc_tb))
print(errorDetails)
def getAllOpenOrders(self, symbol, limit=500):
"""
Get all open orders from account.
:param symbol(str): Coin name with parity. ex. BTC_TRY.
:param limit(int): Give the limit to see how many order will be shown.
:returns: (json) Return requested value from api.
"""
try:
params = {"symbol": symbol, "limit": str(limit), "type": self.__constant["AllOrders"]["Open"]}
result = sendRequest("GET", "/open/v1/orders", self.options, params)
return result
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
errorDetails = "".join(traceback.format_exception(exc_type, exc_obj, exc_tb))
print(errorDetails)
def getAllOrders(self, symbol, limit=500):
"""
Get all orders from account.
:param symbol(str): Coin name with parity. ex. BTC_TRY.
:param limit(int): Give the limit to see how many order will be shown.
:returns: (json) Return requested value from api.
"""
try:
params = {"symbol": symbol, "limit": str(limit)}
result = sendRequest("GET", "/open/v1/orders", self.options, params)
return result
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
errorDetails = "".join(traceback.format_exception(exc_type, exc_obj, exc_tb))
print(errorDetails)
def getAllOpenBuyOrders(self, symbol, limit=500):
"""
Get all open buy orders from account.
:param symbol(str): Coin name with parity. ex. BTC_TRY.
:param limit(int): Give the limit to see how many order will be shown.
:returns: (json) Return requested value from api.
"""
try:
params = {"symbol": symbol, "limit": str(limit), "type": self.__constant["AllOrders"]["Open"],
"side": self.__constant["OrderSide"]["BUY"]}
result = sendRequest("GET", "/open/v1/orders", self.options, params)
return result
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
errorDetails = "".join(traceback.format_exception(exc_type, exc_obj, exc_tb))
print(errorDetails)
def getAllOpenSellOrders(self, symbol, limit=500):
"""
Get all open sell orders from account.
:param symbol(str): Coin name with parity. ex. BTC_TRY.
:param limit(int): Give the limit to see how many order will be shown.
:returns: (json) Return requested value from api.
"""
try:
params = {"symbol": symbol, "limit": str(limit), "type": self.__constant["AllOrders"]["Open"],
"side": self.__constant["OrderSide"]["SELL"]}
result = sendRequest("GET", "/open/v1/orders", self.options, params)
return result
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
errorDetails = "".join(traceback.format_exception(exc_type, exc_obj, exc_tb))
print(errorDetails)
def postNewLimitOrder(self, symbol, side, origQuoteQuantity, price):
"""
Post new limit order from account.
:param symbol(str): Coin name with parity. ex. BTC_TRY.
:param side(str): BUY or SELL, usage with uppercase "BUY" "SELL".
:param origQuoteQuantity(float): Quantity of coin for the order.
:param price(int, float): Price count.
:returns: (json) Return requested value from api.
"""
try:
params = {"symbol": symbol, "side": self.__constant["OrderSide"][side],
"type": self.__constant["OrderTypes"]["Limit"], "quantity": str(origQuoteQuantity),
"price": str(price)}
result = sendRequest("POST", "/open/v1/orders", self.options, params)
return result
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
errorDetails = "".join(traceback.format_exception(exc_type, exc_obj, exc_tb))
print(errorDetails)
def postBuyMarketOrder(self, symbol, origQuantity):
"""
Post buy market order from account.
:param symbol(str): Coin name with parity. ex. BTC_TRY.
:param origQuoteQuantity(float): Quantity of coin for the order.
:returns: (json) Return requested value from api.
"""
try:
params = {"symbol": symbol, "side": self.__constant["OrderSide"]["BUY"],
"type": self.__constant["OrderTypes"]["Market"], "quoteOrderQty": str(origQuantity)}
result = sendRequest("POST", "/open/v1/orders", self.options, params)
return result
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
errorDetails = "".join(traceback.format_exception(exc_type, exc_obj, exc_tb))
print(errorDetails)
def postSellMarketOrder(self, symbol, origQuoteQuantity):
"""
Post sell market order from account.
:param symbol(str): Coin name with parity. ex. BTC_TRY.
:param origQuoteQuantity(float): Quantity of coin for the order.
:returns: (json) Return requested value from api.
"""
try:
params = {"symbol": symbol, "side": self.__constant["OrderSide"]["SELL"],
"type": self.__constant["OrderTypes"]["Market"], "quantity": str(origQuoteQuantity)}
result = sendRequest("POST", "/open/v1/orders", self.options, params)
return result
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
errorDetails = "".join(traceback.format_exception(exc_type, exc_obj, exc_tb))
print(errorDetails)
def postStopLimitOrder(self, symbol, side, origQuoteQuantity, limitPrice, stopPrice):
"""
Post stop limit orders from account.
:param symbol(str): Coin name with parity. ex. BTC_TRY.
:param side(str): BUY or SELL, usage with uppercase "BUY" "SELL".
:param origQuoteQuantity(float): Quantity of coin for the order.
:param limitPrice(int, float): Limit price count.
:param stopPrice(int, float): Stop price count.
:returns: (json) Return requested value from api.
"""
try:
params = {"symbol": symbol, "side": self.__constant["OrderSide"][side],
"type": self.__constant["OrderTypes"]["Limit"], "quantity": str(origQuoteQuantity),
"price": str(limitPrice), "stopPrice": str(stopPrice)}
result = sendRequest("POST", "/open/v1/orders", self.options, params)
return result
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
errorDetails = "".join(traceback.format_exception(exc_type, exc_obj, exc_tb))
print(errorDetails)
def cancelOrderById(self, orderID):
"""
Cancellation order by Id.
:param orderID(str): Unique orderid taken from BinanceTr.
:returns: (json) Return requested | |
= new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfELPCOfLocateExtPC2d_ChangeValue,None,Extrema_SeqPCOfPCFOfEPCOfELPCOfLocateExtPC2d)
Extrema_SeqPCOfPCFOfEPCOfELPCOfLocateExtPC2d.Remove = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfELPCOfLocateExtPC2d_Remove,None,Extrema_SeqPCOfPCFOfEPCOfELPCOfLocateExtPC2d)
Extrema_SeqPCOfPCFOfEPCOfELPCOfLocateExtPC2d_swigregister = _Extrema.Extrema_SeqPCOfPCFOfEPCOfELPCOfLocateExtPC2d_swigregister
Extrema_SeqPCOfPCFOfEPCOfELPCOfLocateExtPC2d_swigregister(Extrema_SeqPCOfPCFOfEPCOfELPCOfLocateExtPC2d)
class Extrema_SeqPCOfPCFOfEPCOfExtPC(OCC.TCollection.TCollection_BaseSequence):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_swiginit(self,_Extrema.new_Extrema_SeqPCOfPCFOfEPCOfExtPC(*args))
def Clear(self, *args):
"""
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_Clear(self, *args)
def Assign(self, *args):
"""
:param Other:
:type Other: Extrema_SeqPCOfPCFOfEPCOfExtPC &
:rtype: Extrema_SeqPCOfPCFOfEPCOfExtPC
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_Assign(self, *args)
def Set(self, *args):
"""
:param Other:
:type Other: Extrema_SeqPCOfPCFOfEPCOfExtPC &
:rtype: Extrema_SeqPCOfPCFOfEPCOfExtPC
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_Set(self, *args)
def Append(self, *args):
"""
:param T:
:type T: Extrema_POnCurv &
:rtype: None
:param S:
:type S: Extrema_SeqPCOfPCFOfEPCOfExtPC &
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_Append(self, *args)
def Prepend(self, *args):
"""
:param T:
:type T: Extrema_POnCurv &
:rtype: None
:param S:
:type S: Extrema_SeqPCOfPCFOfEPCOfExtPC &
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_Prepend(self, *args)
def InsertBefore(self, *args):
"""
:param Index:
:type Index: int
:param T:
:type T: Extrema_POnCurv &
:rtype: None
:param Index:
:type Index: int
:param S:
:type S: Extrema_SeqPCOfPCFOfEPCOfExtPC &
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_InsertBefore(self, *args)
def InsertAfter(self, *args):
"""
:param Index:
:type Index: int
:param T:
:type T: Extrema_POnCurv &
:rtype: None
:param Index:
:type Index: int
:param S:
:type S: Extrema_SeqPCOfPCFOfEPCOfExtPC &
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_InsertAfter(self, *args)
def First(self, *args):
"""
:rtype: Extrema_POnCurv
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_First(self, *args)
def Last(self, *args):
"""
:rtype: Extrema_POnCurv
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_Last(self, *args)
def Split(self, *args):
"""
:param Index:
:type Index: int
:param Sub:
:type Sub: Extrema_SeqPCOfPCFOfEPCOfExtPC &
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_Split(self, *args)
def Value(self, *args):
"""
:param Index:
:type Index: int
:rtype: Extrema_POnCurv
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_Value(self, *args)
def SetValue(self, *args):
"""
:param Index:
:type Index: int
:param I:
:type I: Extrema_POnCurv &
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_SetValue(self, *args)
def ChangeValue(self, *args):
"""
:param Index:
:type Index: int
:rtype: Extrema_POnCurv
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_ChangeValue(self, *args)
def Remove(self, *args):
"""
:param Index:
:type Index: int
:rtype: None
:param FromIndex:
:type FromIndex: int
:param ToIndex:
:type ToIndex: int
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_Remove(self, *args)
__swig_destroy__ = _Extrema.delete_Extrema_SeqPCOfPCFOfEPCOfExtPC
Extrema_SeqPCOfPCFOfEPCOfExtPC.Clear = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_Clear,None,Extrema_SeqPCOfPCFOfEPCOfExtPC)
Extrema_SeqPCOfPCFOfEPCOfExtPC.Assign = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_Assign,None,Extrema_SeqPCOfPCFOfEPCOfExtPC)
Extrema_SeqPCOfPCFOfEPCOfExtPC.Set = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_Set,None,Extrema_SeqPCOfPCFOfEPCOfExtPC)
Extrema_SeqPCOfPCFOfEPCOfExtPC.Append = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_Append,None,Extrema_SeqPCOfPCFOfEPCOfExtPC)
Extrema_SeqPCOfPCFOfEPCOfExtPC.Prepend = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_Prepend,None,Extrema_SeqPCOfPCFOfEPCOfExtPC)
Extrema_SeqPCOfPCFOfEPCOfExtPC.InsertBefore = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_InsertBefore,None,Extrema_SeqPCOfPCFOfEPCOfExtPC)
Extrema_SeqPCOfPCFOfEPCOfExtPC.InsertAfter = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_InsertAfter,None,Extrema_SeqPCOfPCFOfEPCOfExtPC)
Extrema_SeqPCOfPCFOfEPCOfExtPC.First = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_First,None,Extrema_SeqPCOfPCFOfEPCOfExtPC)
Extrema_SeqPCOfPCFOfEPCOfExtPC.Last = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_Last,None,Extrema_SeqPCOfPCFOfEPCOfExtPC)
Extrema_SeqPCOfPCFOfEPCOfExtPC.Split = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_Split,None,Extrema_SeqPCOfPCFOfEPCOfExtPC)
Extrema_SeqPCOfPCFOfEPCOfExtPC.Value = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_Value,None,Extrema_SeqPCOfPCFOfEPCOfExtPC)
Extrema_SeqPCOfPCFOfEPCOfExtPC.SetValue = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_SetValue,None,Extrema_SeqPCOfPCFOfEPCOfExtPC)
Extrema_SeqPCOfPCFOfEPCOfExtPC.ChangeValue = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_ChangeValue,None,Extrema_SeqPCOfPCFOfEPCOfExtPC)
Extrema_SeqPCOfPCFOfEPCOfExtPC.Remove = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_Remove,None,Extrema_SeqPCOfPCFOfEPCOfExtPC)
Extrema_SeqPCOfPCFOfEPCOfExtPC_swigregister = _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC_swigregister
Extrema_SeqPCOfPCFOfEPCOfExtPC_swigregister(Extrema_SeqPCOfPCFOfEPCOfExtPC)
class Extrema_SeqPCOfPCFOfEPCOfExtPC2d(OCC.TCollection.TCollection_BaseSequence):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_swiginit(self,_Extrema.new_Extrema_SeqPCOfPCFOfEPCOfExtPC2d(*args))
def Clear(self, *args):
"""
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_Clear(self, *args)
def Assign(self, *args):
"""
:param Other:
:type Other: Extrema_SeqPCOfPCFOfEPCOfExtPC2d &
:rtype: Extrema_SeqPCOfPCFOfEPCOfExtPC2d
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_Assign(self, *args)
def Set(self, *args):
"""
:param Other:
:type Other: Extrema_SeqPCOfPCFOfEPCOfExtPC2d &
:rtype: Extrema_SeqPCOfPCFOfEPCOfExtPC2d
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_Set(self, *args)
def Append(self, *args):
"""
:param T:
:type T: Extrema_POnCurv2d &
:rtype: None
:param S:
:type S: Extrema_SeqPCOfPCFOfEPCOfExtPC2d &
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_Append(self, *args)
def Prepend(self, *args):
"""
:param T:
:type T: Extrema_POnCurv2d &
:rtype: None
:param S:
:type S: Extrema_SeqPCOfPCFOfEPCOfExtPC2d &
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_Prepend(self, *args)
def InsertBefore(self, *args):
"""
:param Index:
:type Index: int
:param T:
:type T: Extrema_POnCurv2d &
:rtype: None
:param Index:
:type Index: int
:param S:
:type S: Extrema_SeqPCOfPCFOfEPCOfExtPC2d &
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_InsertBefore(self, *args)
def InsertAfter(self, *args):
"""
:param Index:
:type Index: int
:param T:
:type T: Extrema_POnCurv2d &
:rtype: None
:param Index:
:type Index: int
:param S:
:type S: Extrema_SeqPCOfPCFOfEPCOfExtPC2d &
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_InsertAfter(self, *args)
def First(self, *args):
"""
:rtype: Extrema_POnCurv2d
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_First(self, *args)
def Last(self, *args):
"""
:rtype: Extrema_POnCurv2d
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_Last(self, *args)
def Split(self, *args):
"""
:param Index:
:type Index: int
:param Sub:
:type Sub: Extrema_SeqPCOfPCFOfEPCOfExtPC2d &
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_Split(self, *args)
def Value(self, *args):
"""
:param Index:
:type Index: int
:rtype: Extrema_POnCurv2d
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_Value(self, *args)
def SetValue(self, *args):
"""
:param Index:
:type Index: int
:param I:
:type I: Extrema_POnCurv2d &
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_SetValue(self, *args)
def ChangeValue(self, *args):
"""
:param Index:
:type Index: int
:rtype: Extrema_POnCurv2d
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_ChangeValue(self, *args)
def Remove(self, *args):
"""
:param Index:
:type Index: int
:rtype: None
:param FromIndex:
:type FromIndex: int
:param ToIndex:
:type ToIndex: int
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_Remove(self, *args)
__swig_destroy__ = _Extrema.delete_Extrema_SeqPCOfPCFOfEPCOfExtPC2d
Extrema_SeqPCOfPCFOfEPCOfExtPC2d.Clear = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_Clear,None,Extrema_SeqPCOfPCFOfEPCOfExtPC2d)
Extrema_SeqPCOfPCFOfEPCOfExtPC2d.Assign = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_Assign,None,Extrema_SeqPCOfPCFOfEPCOfExtPC2d)
Extrema_SeqPCOfPCFOfEPCOfExtPC2d.Set = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_Set,None,Extrema_SeqPCOfPCFOfEPCOfExtPC2d)
Extrema_SeqPCOfPCFOfEPCOfExtPC2d.Append = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_Append,None,Extrema_SeqPCOfPCFOfEPCOfExtPC2d)
Extrema_SeqPCOfPCFOfEPCOfExtPC2d.Prepend = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_Prepend,None,Extrema_SeqPCOfPCFOfEPCOfExtPC2d)
Extrema_SeqPCOfPCFOfEPCOfExtPC2d.InsertBefore = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_InsertBefore,None,Extrema_SeqPCOfPCFOfEPCOfExtPC2d)
Extrema_SeqPCOfPCFOfEPCOfExtPC2d.InsertAfter = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_InsertAfter,None,Extrema_SeqPCOfPCFOfEPCOfExtPC2d)
Extrema_SeqPCOfPCFOfEPCOfExtPC2d.First = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_First,None,Extrema_SeqPCOfPCFOfEPCOfExtPC2d)
Extrema_SeqPCOfPCFOfEPCOfExtPC2d.Last = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_Last,None,Extrema_SeqPCOfPCFOfEPCOfExtPC2d)
Extrema_SeqPCOfPCFOfEPCOfExtPC2d.Split = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_Split,None,Extrema_SeqPCOfPCFOfEPCOfExtPC2d)
Extrema_SeqPCOfPCFOfEPCOfExtPC2d.Value = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_Value,None,Extrema_SeqPCOfPCFOfEPCOfExtPC2d)
Extrema_SeqPCOfPCFOfEPCOfExtPC2d.SetValue = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_SetValue,None,Extrema_SeqPCOfPCFOfEPCOfExtPC2d)
Extrema_SeqPCOfPCFOfEPCOfExtPC2d.ChangeValue = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_ChangeValue,None,Extrema_SeqPCOfPCFOfEPCOfExtPC2d)
Extrema_SeqPCOfPCFOfEPCOfExtPC2d.Remove = new_instancemethod(_Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_Remove,None,Extrema_SeqPCOfPCFOfEPCOfExtPC2d)
Extrema_SeqPCOfPCFOfEPCOfExtPC2d_swigregister = _Extrema.Extrema_SeqPCOfPCFOfEPCOfExtPC2d_swigregister
Extrema_SeqPCOfPCFOfEPCOfExtPC2d_swigregister(Extrema_SeqPCOfPCFOfEPCOfExtPC2d)
class Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC(OCC.TCollection.TCollection_BaseSequence):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_swiginit(self,_Extrema.new_Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC(*args))
def Clear(self, *args):
"""
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_Clear(self, *args)
def Assign(self, *args):
"""
:param Other:
:type Other: Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC &
:rtype: Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_Assign(self, *args)
def Set(self, *args):
"""
:param Other:
:type Other: Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC &
:rtype: Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_Set(self, *args)
def Append(self, *args):
"""
:param T:
:type T: Extrema_POnCurv &
:rtype: None
:param S:
:type S: Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC &
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_Append(self, *args)
def Prepend(self, *args):
"""
:param T:
:type T: Extrema_POnCurv &
:rtype: None
:param S:
:type S: Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC &
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_Prepend(self, *args)
def InsertBefore(self, *args):
"""
:param Index:
:type Index: int
:param T:
:type T: Extrema_POnCurv &
:rtype: None
:param Index:
:type Index: int
:param S:
:type S: Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC &
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_InsertBefore(self, *args)
def InsertAfter(self, *args):
"""
:param Index:
:type Index: int
:param T:
:type T: Extrema_POnCurv &
:rtype: None
:param Index:
:type Index: int
:param S:
:type S: Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC &
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_InsertAfter(self, *args)
def First(self, *args):
"""
:rtype: Extrema_POnCurv
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_First(self, *args)
def Last(self, *args):
"""
:rtype: Extrema_POnCurv
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_Last(self, *args)
def Split(self, *args):
"""
:param Index:
:type Index: int
:param Sub:
:type Sub: Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC &
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_Split(self, *args)
def Value(self, *args):
"""
:param Index:
:type Index: int
:rtype: Extrema_POnCurv
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_Value(self, *args)
def SetValue(self, *args):
"""
:param Index:
:type Index: int
:param I:
:type I: Extrema_POnCurv &
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_SetValue(self, *args)
def ChangeValue(self, *args):
"""
:param Index:
:type Index: int
:rtype: Extrema_POnCurv
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_ChangeValue(self, *args)
def Remove(self, *args):
"""
:param Index:
:type Index: int
:rtype: None
:param FromIndex:
:type FromIndex: int
:param ToIndex:
:type ToIndex: int
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_Remove(self, *args)
__swig_destroy__ = _Extrema.delete_Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC
Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC.Clear = new_instancemethod(_Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_Clear,None,Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC)
Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC.Assign = new_instancemethod(_Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_Assign,None,Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC)
Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC.Set = new_instancemethod(_Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_Set,None,Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC)
Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC.Append = new_instancemethod(_Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_Append,None,Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC)
Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC.Prepend = new_instancemethod(_Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_Prepend,None,Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC)
Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC.InsertBefore = new_instancemethod(_Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_InsertBefore,None,Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC)
Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC.InsertAfter = new_instancemethod(_Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_InsertAfter,None,Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC)
Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC.First = new_instancemethod(_Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_First,None,Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC)
Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC.Last = new_instancemethod(_Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_Last,None,Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC)
Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC.Split = new_instancemethod(_Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_Split,None,Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC)
Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC.Value = new_instancemethod(_Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_Value,None,Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC)
Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC.SetValue = new_instancemethod(_Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_SetValue,None,Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC)
Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC.ChangeValue = new_instancemethod(_Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_ChangeValue,None,Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC)
Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC.Remove = new_instancemethod(_Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_Remove,None,Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC)
Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_swigregister = _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_swigregister
Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC_swigregister(Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC)
class Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC2d(OCC.TCollection.TCollection_BaseSequence):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC2d_swiginit(self,_Extrema.new_Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC2d(*args))
def Clear(self, *args):
"""
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC2d_Clear(self, *args)
def Assign(self, *args):
"""
:param Other:
:type Other: Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC2d &
:rtype: Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC2d
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC2d_Assign(self, *args)
def Set(self, *args):
"""
:param Other:
:type Other: Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC2d &
:rtype: Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC2d
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC2d_Set(self, *args)
def Append(self, *args):
"""
:param T:
:type T: Extrema_POnCurv2d &
:rtype: None
:param S:
:type S: Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC2d &
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC2d_Append(self, *args)
def Prepend(self, *args):
"""
:param T:
:type T: Extrema_POnCurv2d &
:rtype: None
:param S:
:type S: Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC2d &
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC2d_Prepend(self, *args)
def InsertBefore(self, *args):
"""
:param Index:
:type Index: int
:param T:
:type T: Extrema_POnCurv2d &
:rtype: None
:param Index:
:type Index: int
:param S:
:type S: Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC2d &
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC2d_InsertBefore(self, *args)
def InsertAfter(self, *args):
"""
:param Index:
:type Index: int
:param T:
:type T: Extrema_POnCurv2d &
:rtype: None
:param Index:
:type Index: int
:param S:
:type S: Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC2d &
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC2d_InsertAfter(self, *args)
def First(self, *args):
"""
:rtype: Extrema_POnCurv2d
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC2d_First(self, *args)
def Last(self, *args):
"""
:rtype: Extrema_POnCurv2d
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC2d_Last(self, *args)
def Split(self, *args):
"""
:param Index:
:type Index: int
:param Sub:
:type Sub: Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC2d &
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC2d_Split(self, *args)
def Value(self, *args):
"""
:param Index:
:type Index: int
:rtype: Extrema_POnCurv2d
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC2d_Value(self, *args)
def SetValue(self, *args):
"""
:param Index:
:type Index: int
:param I:
:type I: Extrema_POnCurv2d &
:rtype: None
"""
return _Extrema.Extrema_SeqPCOfPCLocFOfLocEPCOfLocateExtPC2d_SetValue(self, *args)
def ChangeValue(self, *args):
"""
:param Index:
:type Index: int
| |
acct cont objc objc objc obj obj
# obj
self.app.memcache.store = {}
resp = controller.PUT(req)
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2')
# negative tests
# invalid x-copy-from path
req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c'})
self.app.update_request(req)
self.app.memcache.store = {}
resp = controller.PUT(req)
self.assertEquals(resp.status_int // 100, 4) # client error
# server error
req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
self.app.update_request(req)
proxy_server.http_connect = \
fake_http_connect(200, 200, 503, 503, 503)
# acct cont objc objc objc
self.app.memcache.store = {}
resp = controller.PUT(req)
self.assertEquals(resp.status_int, 503)
# not found
req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
self.app.update_request(req)
proxy_server.http_connect = \
fake_http_connect(200, 200, 404, 404, 404)
# acct cont objc objc objc
self.app.memcache.store = {}
resp = controller.PUT(req)
self.assertEquals(resp.status_int, 404)
# some missing containers
req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
self.app.update_request(req)
proxy_server.http_connect = \
fake_http_connect(200, 200, 404, 404, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
self.app.memcache.store = {}
resp = controller.PUT(req)
self.assertEquals(resp.status_int, 201)
# test object meta data
req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Object-Meta-Ours': 'okay'})
self.app.update_request(req)
proxy_server.http_connect = \
fake_http_connect(200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
self.app.memcache.store = {}
resp = controller.PUT(req)
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers.get('x-object-meta-test'),
'testing')
self.assertEquals(resp.headers.get('x-object-meta-ours'), 'okay')
def test_COPY(self):
with save_globals():
controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0'})
req.account = 'a'
proxy_server.http_connect = \
fake_http_connect(200, 200, 201, 201, 201)
# acct cont obj obj obj
resp = controller.PUT(req)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c/o'})
req.account = 'a'
proxy_server.http_connect = \
fake_http_connect(200, 200, 200, 200, 200, 200, 200, 201, 201,
201)
# acct cont acct cont objc objc objc obj obj
# obj
self.app.memcache.store = {}
resp = controller.COPY(req)
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from'], 'c/o')
req = Request.blank('/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c/o'})
req.account = 'a'
controller.object_name = 'o/o2'
proxy_server.http_connect = \
fake_http_connect(200, 200, 200, 200, 200, 200, 200, 201, 201,
201)
# acct cont acct cont objc objc objc obj obj
# obj
self.app.memcache.store = {}
resp = controller.COPY(req)
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2')
req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
req.account = 'a'
controller.object_name = 'o'
proxy_server.http_connect = \
fake_http_connect(200, 200, 200, 200, 200, 200, 200, 201, 201,
201)
# acct cont acct cont objc objc objc obj obj
# obj
self.app.memcache.store = {}
resp = controller.COPY(req)
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from'], 'c/o')
req = Request.blank('/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
req.account = 'a'
controller.object_name = 'o/o2'
proxy_server.http_connect = \
fake_http_connect(200, 200, 200, 200, 200, 200, 200, 201, 201,
201)
# acct cont acct cont objc objc objc obj obj
# obj
self.app.memcache.store = {}
resp = controller.COPY(req)
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from'], 'c/o/o2')
req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c_o'})
req.account = 'a'
controller.object_name = 'o'
proxy_server.http_connect = \
fake_http_connect(200, 200)
# acct cont
self.app.memcache.store = {}
resp = controller.COPY(req)
self.assertEquals(resp.status_int, 412)
req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
req.account = 'a'
controller.object_name = 'o'
proxy_server.http_connect = \
fake_http_connect(200, 200, 503, 503, 503)
# acct cont objc objc objc
self.app.memcache.store = {}
resp = controller.COPY(req)
self.assertEquals(resp.status_int, 503)
req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
req.account = 'a'
controller.object_name = 'o'
proxy_server.http_connect = \
fake_http_connect(200, 200, 404, 404, 404)
# acct cont objc objc objc
self.app.memcache.store = {}
resp = controller.COPY(req)
self.assertEquals(resp.status_int, 404)
req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
req.account = 'a'
controller.object_name = 'o'
proxy_server.http_connect = \
fake_http_connect(200, 200, 404, 404, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
self.app.memcache.store = {}
resp = controller.COPY(req)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o',
'X-Object-Meta-Ours': 'okay'})
req.account = 'a'
controller.object_name = 'o'
proxy_server.http_connect = \
fake_http_connect(200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
self.app.memcache.store = {}
resp = controller.COPY(req)
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers.get('x-object-meta-test'),
'testing')
self.assertEquals(resp.headers.get('x-object-meta-ours'), 'okay')
def test_COPY_newest(self):
with save_globals():
controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
req.account = 'a'
controller.object_name = 'o'
proxy_server.http_connect = \
fake_http_connect(200, 200, 200, 200, 200, 201, 201, 201,
timestamps=('1', '1', '1', '3', '2', '4', '4', '4'))
# acct cont objc objc objc obj obj obj
self.app.memcache.store = {}
resp = controller.COPY(req)
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers['x-copied-from-last-modified'],
'3')
def test_chunked_put(self):
class ChunkedFile():
def __init__(self, bytes):
self.bytes = bytes
self.read_bytes = 0
@property
def bytes_left(self):
return self.bytes - self.read_bytes
def read(self, amt=None):
if self.read_bytes >= self.bytes:
raise StopIteration()
if not amt:
amt = self.bytes_left
data = 'a' * min(amt, self.bytes_left)
self.read_bytes += len(data)
return data
with save_globals():
proxy_server.http_connect = fake_http_connect(201, 201, 201, 201)
controller = proxy_server.ObjectController(self.app, 'account',
'container', 'object')
req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'COPY'},
headers={'Transfer-Encoding': 'chunked',
'Content-Type': 'foo/bar'})
req.body_file = ChunkedFile(10)
self.app.memcache.store = {}
self.app.update_request(req)
res = controller.PUT(req)
self.assertEquals(res.status_int // 100, 2) # success
# test 413 entity to large
from swift.proxy import server
proxy_server.http_connect = fake_http_connect(201, 201, 201, 201)
req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'COPY'},
headers={'Transfer-Encoding': 'chunked',
'Content-Type': 'foo/bar'})
req.body_file = ChunkedFile(11)
self.app.memcache.store = {}
self.app.update_request(req)
try:
server.MAX_FILE_SIZE = 10
res = controller.PUT(req)
self.assertEquals(res.status_int, 413)
finally:
server.MAX_FILE_SIZE = MAX_FILE_SIZE
def test_chunked_put_bad_version(self):
# Check bad version
(prolis, acc1lis, acc2lis, con2lis, con2lis, obj1lis, obj2lis) = \
_test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v0 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 412'
self.assertEquals(headers[:len(exp)], exp)
def test_chunked_put_bad_path(self):
# Check bad path
(prolis, acc1lis, acc2lis, con2lis, con2lis, obj1lis, obj2lis) = \
_test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET invalid HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 404'
self.assertEquals(headers[:len(exp)], exp)
def test_chunked_put_bad_utf8(self):
# Check invalid utf-8
(prolis, acc1lis, acc2lis, con2lis, con2lis, obj1lis, obj2lis) = \
_test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a%80 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: <PASSWORD>'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 412'
self.assertEquals(headers[:len(exp)], exp)
def test_chunked_put_bad_path_no_controller(self):
# Check bad path, no controller
(prolis, acc1lis, acc2lis, con2lis, con2lis, obj1lis, obj2lis) = \
_test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: <PASSWORD>'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 412'
self.assertEquals(headers[:len(exp)], exp)
def test_chunked_put_bad_method(self):
# Check bad method
(prolis, acc1lis, acc2lis, con2lis, con2lis, obj1lis, obj2lis) = \
_test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('LICK /v1/a HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: <PASSWORD>'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 405'
self.assertEquals(headers[:len(exp)], exp)
def test_chunked_put_unhandled_exception(self):
# Check unhandled exception
(prosrv, acc1srv, acc2srv, con2srv, con2srv, obj1srv, obj2srv) = \
_test_servers
(prolis, acc1lis, acc2lis, con2lis, con2lis, obj1lis, obj2lis) = \
_test_sockets
orig_update_request = prosrv.update_request
def broken_update_request(*args, **kwargs):
raise Exception('fake: this should be printed')
prosrv.update_request = broken_update_request
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('HEAD /v1/a HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: <PASSWORD>'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 500'
self.assertEquals(headers[:len(exp)], exp)
prosrv.update_request = orig_update_request
def test_chunked_put_head_account(self):
# Head account, just a double check and really is here to test
# the part Application.log_request that 'enforces' a
# content_length on the response.
(prolis, acc1lis, acc2lis, con2lis, con2lis, obj1lis, obj2lis) = \
_test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('HEAD /v1/a HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: <PASSWORD>'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 204'
self.assertEquals(headers[:len(exp)], exp)
self.assert_('\r\nContent-Length: 0\r\n' in headers)
def test_client_ip_logging(self):
# test that the client ip field in the log gets populated with the
# ip instead of being blank
(prosrv, acc1srv, acc2srv, con2srv, con2srv, obj1srv, obj2srv) = \
_test_servers
(prolis, acc1lis, acc2lis, con2lis, con2lis, obj1lis, obj2lis) = \
_test_sockets
class Logger(object):
def info(self, msg):
self.msg = msg
orig_logger, orig_access_logger = prosrv.logger, prosrv.access_logger
prosrv.logger = prosrv.access_logger = Logger()
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write(
'GET /v1/a?format=json HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: <PASSWORD>'
'Content-Length: 0\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEquals(headers[:len(exp)], exp)
exp = '127.0.0.1 127.0.0.1'
self.assert_(exp in prosrv.logger.msg)
def test_chunked_put_logging(self):
# GET account with a query string to test that
# Application.log_request logs the query string. Also, throws
# in a test | |
'Britt', 'Britta', 'Brittaney', 'Brittani',
'Brittanie', 'Brittany', 'Brittnay', 'Brittnee', 'Brittney', 'Brittni',
'Brittnie', 'Brittny', 'Brook', 'Brooke', 'Brooklyn', 'Brooklynn',
'Bryana', 'Bryanna', 'Brylee', 'Bryn', 'Brynlee', 'Brynn', 'Buelah',
'Buena', 'Buffy', 'Bula', 'Bulah', 'Buna', 'Burnice', 'Byrd', 'Byrdie',
'Caddie', 'Cadence', 'Cailyn', 'Caitlin', 'Caitlyn', 'Caitlynn',
'Caldonia', 'Caleigh', 'Cali', 'Calista', 'Calla', 'Calleigh', 'Callie',
'Cambria', 'Cameron', 'Cami', 'Camila', 'Camilla', 'Camille', 'Camisha',
'Cammie', 'Campbell', 'Camryn', 'Candace', 'Candi', 'Candice',
'Candida', 'Candis', 'Candy', 'Candyce', 'Cannie', 'Capitola', 'Cappie',
'Caprice', 'Cara', 'Caren', 'Carey', 'Cari', 'Carie', 'Carin', 'Carina',
'Carisa', 'Carissa', 'Carla', 'Carlee', 'Carleen', 'Carleigh',
'Carlene', 'Carley', 'Carli', 'Carlie', 'Carlota', 'Carlotta', 'Carly',
'Carlyn', 'Carma', 'Carmel', 'Carmela', 'Carmelita', 'Carmella',
'Carmen', 'Caro', 'Carol', 'Carolann', 'Carole', 'Carolee', 'Carolina',
'Caroline', 'Carolyn', 'Carolyne', 'Carolynn', 'Caron', 'Carra',
'Carri', 'Carrie', 'Carrol', 'Carroll', 'Carry', 'Carson', 'Cary',
'Caryl', 'Caryn', 'Casandra', 'Casey', 'Casie', 'Cassandra', 'Cassidy',
'Cassie', 'Cassondra', 'Catalina', 'Catharine', 'Catherine', 'Cathern',
'Cathey', 'Cathi', 'Cathie', 'Cathleen', 'Cathrine', 'Cathryn', 'Cathy',
'Catina', 'Catrina', 'Caydence', 'Cayla', 'Caylee', 'Cecelia', 'Cecile',
'Cecilia', 'Cecily', 'Ceil', 'Celena', 'Celesta', 'Celeste', 'Celestia',
'Celestine', 'Celia', 'Celie', 'Celina', 'Celine', 'Cena', 'Ceola',
'Chaka', 'Chana', 'Chanda', 'Chandler', 'Chandra', 'Chanel', 'Chanelle',
'Chaney', 'Chanie', 'Channie', 'Channing', 'Chantal', 'Chante',
'Chantel', 'Chantelle', 'Charissa', 'Charisse', 'Charity', 'Charla',
'Charlee', 'Charleen', 'Charlene', 'Charley', 'Charlie', 'Charline',
'Charlize', 'Charlotta', 'Charlotte', 'Charlottie', 'Charlsie',
'Charmaine', 'Charolette', 'Chase', 'Chasity', 'Chastity', 'Chaya',
'Chelsea', 'Chelsey', 'Chelsi', 'Chelsie', 'Chelsy', 'Cher', 'Cherelle',
'Cheri', 'Cherie', 'Cherilyn', 'Cherise', 'Cherish', 'Cherrelle',
'Cherri', 'Cherrie', 'Cherry', 'Cherryl', 'Cheryl', 'Cheryle',
'Cheryll', 'Chessie', 'Chestina', 'Cheyanne', 'Cheyenne', 'Chimere',
'China', 'Chiquita', 'Chloe', 'Chloie', 'Chris', 'Chrissie', 'Chrissy',
'Christa', 'Christal', 'Christeen', 'Christel', 'Christen', 'Christena',
'Christene', 'Christi', 'Christian', 'Christiana', 'Christie',
'Christin', 'Christina', 'Christine', 'Christy', 'Chrystal', 'Chyna',
'Chynna', 'Ciara', 'Ciarra', 'Cicely', 'Cielo', 'Ciera', 'Cierra',
'Ciji', 'Cilla', 'Cinda', 'Cindi', 'Cindy', 'Cinnamon', 'Cinthia',
'Citlali', 'Citlalli', 'Clair', 'Claire', 'Clara', 'Clarabelle',
'Clare', 'Claribel', 'Clarice', 'Clarinda', 'Clarine', 'Clarisa',
'Clarissa', 'Classie', 'Claudette', 'Claudia', 'Claudie', 'Claudine',
'Cleda', 'Clella', 'Clem', 'Clemence', 'Clementina', 'Clementine',
'Clemie', 'Clemma', 'Clemmie', 'Cleo', 'Cleola', 'Cleone', 'Cleora',
'Cleta', 'Cleva', 'Clevie', 'Cliffie', 'Cloe', 'Clora', 'Clotilda',
'Clotilde', 'Clyda', 'Clydie', 'Clytie', 'Coleen', 'Coletta', 'Colette',
'Colleen', 'Collette', 'Columbia', 'Concepcion', 'Concetta', 'Concha',
'Connie', 'Constance', 'Consuela', 'Consuelo', 'Contina', 'Cora',
'Coraima', 'Coral', 'Coralie', 'Corda', 'Cordelia', 'Cordella',
'Cordia', 'Cordie', 'Corean', 'Corene', 'Coretta', 'Corey', 'Cori',
'Corie', 'Corina', 'Corine', 'Corinna', 'Corinne', 'Corliss',
'Cornelia', 'Cornie', 'Corrie', 'Corrina', 'Corrine', 'Cortney', 'Cory',
'Courtney', 'Creola', 'Cressie', 'Crete', 'Crissie', 'Crissy', 'Crista',
'Cristal', 'Cristen', 'Cristi', 'Cristin', 'Cristina', 'Cristine',
'Cristy', 'Cruz', 'Crysta', 'Crystal', 'Cuba', 'Cydney', 'Cyndi',
'Cyntha', 'Cynthia', 'Dafne', 'Dagmar', 'Dagny', 'Dahlia', 'Daija',
'Daijah', 'Daisey', 'Daisha', 'Daisie', 'Daisy', 'Daisye', 'Daja',
'Dakota', 'Dale', 'Dalia', 'Dallas', 'Damaris', 'Dana', 'Danae',
'Daneen', 'Danelle', 'Danette', 'Dani', 'Dania', 'Danica', 'Daniela',
'Daniele', 'Daniella', 'Danielle', 'Danika', 'Danita', 'Danna',
'Dannie', 'Dannielle', 'Danyel', 'Danyell', 'Danyelle', 'Daphne',
'Dara', 'Darby', 'Darci', 'Darcie', 'Darcy', 'Daria', 'Darian',
'Dariana', 'Darla', 'Darleen', 'Darlene', 'Darline', 'Darlyne', 'Dasia',
'Davina', 'Dawn', 'Dawna', 'Dawne', 'Dayami', 'Dayana', 'Dayanara',
'Dayle', 'Dayna', 'Dayse', 'Deana', 'Deandra', 'Deann', 'Deanna',
'Deanne', 'Deasia', 'Deb', 'Debbi', 'Debbie', 'Debbra', 'Debby',
'Debera', 'Debi', 'Debora', 'Deborah', 'Deborrah', 'Debra', 'Debrah',
'Debroah', 'Dedra', 'Dee', 'Deeann', 'Deedee', 'Deena', 'Deetta',
'Deidra', 'Deidre', 'Deirdre', 'Deja', 'Dejah', 'Delaney', 'Delcie',
'Delfina', 'Delia', 'Deliah', 'Delila', 'Delilah', 'Delina', 'Delinda',
'Delisa', 'Dell', 'Della', 'Dellar', 'Delle', 'Dellia', 'Dellie',
'Delma', 'Delois', 'Delora', 'Delores', 'Deloris', 'Delpha', 'Delphia',
'Delphine', 'Delsie', 'Delta', 'Dema', 'Demetra', 'Demetria', 'Demi',
'Dena', 'Deneen', 'Denese', 'Denice', 'Denine', 'Denise', 'Denisha',
'Denisse', 'Denita', 'Dennie', 'Desirae', 'Desiree', 'Dessa', 'Dessie',
'Destany', 'Destinee', 'Destiney', 'Destini', 'Destiny', 'Devan',
'Devin', 'Devon', 'Devyn', 'Dewey', 'Deyanira', 'Dezzie', 'Diamond',
'Dian', 'Diana', 'Diandra', 'Diane', 'Diann', 'Dianna', 'Dianne',
'Dicie', 'Dicy', 'Dillie', 'Dimple', 'Dina', 'Dinah', 'Dione', 'Dionne',
'Dixie', 'Diya', 'Djuana', 'Djuna', 'Docia', 'Dola', 'Dollie', 'Dolly',
'Dollye', 'Dolores', 'Doloris', 'Domenica', 'Dominga', 'Dominique',
'Dominque', 'Domonique', 'Dona', 'Donia', 'Donie', 'Donita', 'Donna',
'Donnie', 'Dora', 'Dorathea', 'Dorathy', 'Dorcas', 'Doreen', 'Dorene',
'Doretha', 'Doretta', 'Dori', 'Dorinda', 'Dorine', 'Doris', 'Dorla',
'Dorotha', 'Dorothea', 'Dorothy', 'Dorris', 'Dortha', 'Dorthea',
'Dorthey', 'Dorthy', 'Dosha', 'Doshia', 'Doshie', 'Dosia', 'Dossie',
'Dot', 'Dottie', 'Dotty', 'Dove', 'Dovie', 'Drema', 'Drew', 'Drucilla',
'Drusilla', 'Dulce', 'Dulcie', 'Dusty', 'Dwan', 'Dyan', 'Dylan',
'Earlean', 'Earlene', 'Earlie', 'Earline', 'Earnestine', 'Eartha',
'Easter', 'Eathel', 'Ebba', 'Eboni', 'Ebony', 'Echo', 'Eda', 'Eddie',
'Eden', 'Edie', 'Edith', 'Edla', 'Edmonia', 'Edna', 'Ednah', 'Edra',
'Edrie', 'Edris', 'Edwina', 'Edyth', 'Edythe', 'Effa', 'Effie',
'Eileen', 'Eithel', 'Ela', 'Elaina', 'Elaine', 'Elana', 'Elayne',
'Elba', 'Elberta', 'Elda', 'Eldora', 'Eleanor', 'Eleanora', 'Eleanore',
'Elease', 'Electa', 'Elena', 'Elenor', 'Elenora', 'Elenore', 'Eleonora',
'Eleonore', 'Elfie', 'Elfreda', 'Elfrieda', 'Elgie', 'Elia', 'Eliana',
'Elianna', 'Elida', 'Elinor', 'Elinore', 'Elisa', 'Elisabeth', 'Elise',
'Elisha', 'Elissa', 'Eliza', 'Elizabet', 'Elizabeth', 'Elizbeth',
'Elizebeth', 'Ella', 'Ellamae', 'Ellar', 'Elle', 'Ellen', 'Eller',
'Elliana', 'Ellie', 'Ellyn', 'Elma', 'Elmina', 'Elmira', 'Elmire',
'Elmyra', 'Elna', 'Elnora', 'Elodie', 'Elois', 'Eloisa', 'Eloise',
'Elouise', 'Elsa', 'Else', 'Elsie', 'Elta', 'Elva', 'Elvera', 'Elvia',
'Elvie', 'Elvina', 'Elvira', 'Elwanda', 'Elyse', 'Elyssa', 'Elza',
'Elzada', 'Ema', 'Emaline', 'Ember', 'Emelia', 'Emelie', 'Emeline',
'Emely', 'Emerald', 'Emerson', 'Emery', 'Emilee', 'Emilia', 'Emilie',
'Emily', 'Emma', 'Emmalee', 'Emmaline', 'Emmer', 'Emmie', 'Emmy',
'Emogene', 'Ena', 'Enid', 'Enola', 'Enriqueta', 'Eola', 'Eppie',
'Epsie', 'Era', 'Erica', 'Ericka', 'Erie', 'Erika', 'Erin', 'Eris',
'Erla', 'Erlene', 'Erlinda', 'Erline', 'Erma', 'Ermina', 'Ermine',
'Erna', 'Ernestina', 'Ernestine', 'Erykah', 'Eryn', 'Esmeralda',
'Esperanza', 'Essa', 'Essence', 'Essie', 'Esta', 'Estefani',
'Estefania', 'Estefany', 'Estela', 'Estell', 'Estella', 'Estelle',
'Ester', 'Esther', 'Estie', 'Estrella', 'Etha', 'Ethel', 'Ethelene',
'Ethelyn', 'Ether', 'Ethie', 'Ethyl', 'Ethyle', 'Etna', 'Etta', 'Etter',
'Ettie', 'Eudora', 'Eugenia', 'Eugenie', 'Eula', 'Eulah', 'Eulalia',
'Eulalie', 'Euna', 'Eunice', 'Euphemia', 'Eura', 'Eva', 'Evalena',
'Evaline', 'Evalyn', 'Evangelina', 'Evangeline', 'Eve', 'Evelena',
'Evelin', 'Evelina', 'Eveline', 'Evelyn', 'Evelyne', 'Evelynn', 'Ever',
'Evette', 'Evia', 'Evie', 'Evita', 'Evon', 'Evonne', 'Exa', 'Exie',
'Fabiola', 'Fae', 'Fairy', 'Faith', 'Fallon', 'Falon', 'Fannie',
'Fanny', 'Fannye', 'Farah', 'Farrah', 'Fatima', 'Fawn', 'Fay', 'Faye',
'Felecia', 'Felice', 'Felicia', 'Felicie', 'Felicitas', 'Felicity',
'Felipa', 'Felisha', 'Fern', 'Fernanda', 'Ferne', 'Fidelia', 'Filomena',
'Finley', 'Fiona', 'Flavia', 'Fleda', 'Fleeta', 'Fleta', 'Flo',
'Flonnie', 'Flor', 'Flora', 'Florance', 'Florence', 'Florene',
'Floretta', 'Florida', 'Florie', 'Florine', 'Florrie', 'Flossie',
'Floy', 'Fonda', 'Forest', 'Fran', 'Franc', 'Frances', 'Francesca',
'Francies', 'Francina', 'Francine', 'Francis', 'Francisca',
'Francisquita', 'Frankie', 'Freda', 'Freddie', 'Frederica',
'Fredericka', 'Freeda', 'Freida', 'Frida', 'Frieda', 'Frona', 'Fronia',
'Fronie', 'Fronnie', 'Fumiko', 'Gabriela', 'Gabriella', 'Gabrielle',
'Gail', 'Gale', 'Galilea', 'Garnet', 'Garnett', 'Gay', 'Gaye', 'Gayla',
'Gayle', 'Gaylene', 'Gaynell', 'Gearldine', 'Gemma', 'Gena', 'Gene',
'Genesis', 'Geneva', 'Genevieve', 'Genevra', 'Genie', 'Gennie',
'Genoveva', 'Georganna', 'Georgeann', 'Georgeanna', 'Georgene',
'Georgetta', 'Georgette', 'Georgia', 'Georgiana', 'Georgiann',
'Georgianna', 'Georgie', 'Georgina', 'Georgine', 'Geraldine', 'Geralyn',
'Gerda', 'Geri', 'Germaine', 'Gerri', 'Gerry', 'Gertha', 'Gertie',
'Gertrude', 'Gia', 'Giada', 'Giana', 'Gianna', 'Gidget', 'Gigi',
'Gilda', 'Gillian', 'Gillie', 'Gina', 'Ginger', 'Ginny', 'Giovanna',
'Girtha', 'Gisele', 'Giselle', 'Gisselle', 'Giuliana', 'Gladis',
'Gladyce', 'Gladys', 'Glenda', 'Glendora', 'Glenn', 'Glenna', 'Glennie',
'Glennis', 'Glinda', 'Gloria', 'Glynda', 'Glynis', 'Golda', 'Golden',
'Goldia', 'Goldie', 'Grace', 'Gracelyn', 'Gracia', 'Gracie', 'Graciela',
'Grayce', 'Grecia', 'Gregoria', 'Greta', 'Gretchen', 'Gretta', 'Grisel',
'Griselda', 'Guadalupe', 'Gunda', 'Gussie', 'Gusta', 'Gustie', 'Gwen',
'Gwenda', 'Gwendolyn', 'Gwyn', 'Gwyneth', 'Hadassah', 'Hadley',
'Hailee', 'Hailey', 'Hailie', 'Haleigh', 'Haley', 'Hali', 'Halie',
'Halle', 'Halley', 'Hallie', 'Hana', 'Hanna', 'Hannah', 'Harlene',
'Harley', 'Harlow', 'Harmony', 'Harper', 'Harriet', 'Harriett',
'Harriette', 'Haruko', 'Hasel', 'Hassie', 'Hattie', 'Haven', 'Hayden',
'Haylee', 'Hayleigh', 'Hayley', 'Haylie', 'Hazel', 'Hazelle', 'Hazle',
'Heather', 'Heaven', 'Hedwig', 'Hedy', 'Heidi', 'Heidy', 'Helaine',
'Helen', 'Helena', 'Helene', 'Helga', 'Hellen', 'Helma', 'Helyn',
'Hennie', 'Henretta', 'Henrietta', 'Henriette', 'Herlinda', 'Herma',
'Hermina', 'Hermine', 'Herminia', 'Hertha', 'Hessie', 'Hester',
'Hettie', 'Hetty', 'Hilah', 'Hilary', 'Hilda', 'Hildegard',
'Hildegarde', 'Hildred', 'Hildur', 'Hillary', 'Hilma', 'Holli',
'Hollie', 'Hollis', 'Holly', 'Honora', 'Hope', 'Hortencia', 'Hortense',
'Hortensia', 'Hulda', 'Huldah', 'Hunter', 'Ica', 'Icey', 'Icie', 'Icy',
'Ida', 'Idabelle', 'Idamae', 'Idell', 'Idella', 'Iesha', 'Ieshia',
'Ila', 'Ilah', 'Ilda', 'Ilene', 'Iliana', 'Illa', 'Ilma', 'Ilo',
'Ilona', 'Ima', 'Imani', 'Imelda', 'Imo', 'Imogene', 'Ina', 'India',
'Indiana', 'Inell', 'Ines', 'Inez', 'Infant', 'Inga', 'Ingeborg',
'Inger', 'Ingrid', 'Iola', 'Iona', 'Ione', 'Ira', 'Ireland', 'Irena',
'Irene', 'Iridian', 'Irine', 'Iris', 'Irma', 'Irva', 'Isa', 'Isabel',
'Isabela', 'Isabell', 'Isabella', 'Isabelle', 'Isadora', 'Isamar',
'Isis', 'Isla', 'Isobel', 'Itzel', 'Iva', 'Ivah', 'Ivana', 'Ivanna',
'Ivette', 'Ivey', 'Ivie', 'Ivonne', 'Ivory', 'Ivy', 'Iyana', 'Iyanna',
'Iza', 'Izabella', 'Izabelle', 'Izetta', 'Izola', 'Izora', 'Jacalyn',
'Jacey', 'Jackeline', 'Jacki', 'Jackie', 'Jacklyn', 'Jaclyn', 'Jacque',
'Jacquelin', 'Jacqueline', 'Jacquelyn', 'Jacquline', 'Jacqulyn', 'Jada',
'Jade', 'Jaden', 'Jadyn', 'Jaeda', 'Jaelyn', 'Jaelynn', 'Jaida',
'Jaiden', 'Jaidyn', 'Jailene', 'Jailyn', 'Jaime', 'Jaimee', 'Jakayla',
'Jaleesa', 'Jalisa', 'Jalissa', 'Jaliyah', 'Jalyn', 'Jalynn', 'Jamey',
'Jami', 'Jamie', 'Jamila', 'Jamiya', 'Jammie', 'Jamya', 'Jan', 'Jana',
'Janae', 'Janay', 'Jane', 'Janeen', 'Janel', 'Janell', 'Janelle',
'Janene', 'Janessa', 'Janet', 'Janette', 'Janey', 'Janiah', 'Janice',
'Janie', 'Janine', 'Janis', 'Janiya', 'Janiyah', 'Jann', 'Janna',
'Jannette', 'Jannie', 'January', 'Janyce', 'Jaquelin', 'Jaqueline',
'Jaslene', 'Jaslyn', 'Jasmin', 'Jasmine', 'Jasmyn', 'Jasmyne',
'Jaunita', 'Jaycee', 'Jaycie', 'Jayda', 'Jayde', 'Jayden', 'Jaye',
'Jayla', 'Jaylah', 'Jaylee', 'Jayleen', 'Jaylen', 'Jaylene', 'Jaylin',
'Jaylyn', 'Jaylynn', 'Jayme', 'Jayne', 'Jazlene', 'Jazlyn', 'Jazlynn',
'Jazmin', 'Jazmine', 'Jazmyn', 'Jazmyne', 'Jean', 'Jeana', 'Jeane',
'Jeanetta', 'Jeanette', 'Jeanie', 'Jeanine', 'Jeanmarie', 'Jeanna',
'Jeanne', 'Jeannette', 'Jeannie', 'Jeannine', 'Jeffie', 'Jemima',
'Jena', 'Jenelle', 'Jenifer', 'Jenilee', 'Jenna', 'Jennette', 'Jenni',
'Jennie', 'Jennifer', 'Jenniffer', 'Jenny', 'Jensen', 'Jeraldine',
'Jeri', 'Jerica', 'Jerilyn', 'Jerilynn', 'Jerri', 'Jerrica', 'Jerrie',
'Jerrilyn', | |
'''
Various statistical functions. Note that many have an `add_intercept` argument
and that this is True by default, which means the X matrix will have a column
of ones added for the calculation of the intercept. If you are already using
a design/ model matrix with an intercept term, be sure to set
`add_intercept = False`.
'''
from collections import Counter
import math
import numpy as np
summary_statistics = {
'boxplot': {
'nanmin': np.nanmin,
'nanmax': np.nanmax,
'nanmedian': np.nanmedian,
'q1': lambda x: np.percentile(x[~np.isnan(x)], 25),
'q2': lambda x: np.percentile(x[~np.isnan(x)], 75)
}
}
def detrend(y, x = None, fill = False):
'''
Detrends a 1D series (linearly). Equivalently, returns the residuals
of an OLS regression where the (transpose of the) design matrix is:
1 1 1 1 ... 1
0 1 2 3 ... N
This removes the linear (straight line) component, e.g., of a time
series with equal-size time steps.
Parameters
----------
y : numpy.ndarray
The 1D series to detrend
x : numpy.ndarray
(Optional) The linear series describing the trend; usually a series
of consecutive integers, e.g.: 1, 2, 3, ...
fill : bool
(Optional) True to fill NaNs with mean value (Default: False)
Returns
-------
numpy.ndarray
The detrended y values
'''
assert y.ndim == 1, 'Series to detrend must be a 1D vector'
n, m = (y.size, 1)
if x is not None:
if x.ndim == 2:
n, m = x.shape
# Return all NaNs if the input array is all NaNs
if np.all(np.isnan(y)):
return np.repeat(np.nan, n, axis = 0)
# Optionally, fill NaNs with the mean
if fill:
y = y.copy()
nan_mask = np.isnan(y)
y[np.isnan(y)] = np.nanmean(y)
x = np.arange(0, n) if x is None else x
beta = ols(x, y, add_intercept = True)
yhat = np.hstack(
(np.ones((n,)).reshape((n, 1)), x.reshape((n, m)))) @ beta
if fill:
return np.where(nan_mask, np.nan, np.subtract(y, yhat))
return np.subtract(y, yhat)
def entropy(seq, base = 2):
'''
Returns the Shannon entropy of an input sequence of elements. Default
units are bits but other units can be returned by changing the base.
All calculations are with base-2 logarithms; change in base is done
through multiplying by the constant factor `log_b(a)`` to change from
base `a` to base `b`. Adapted from [1].
NOTE: An estimate of the upper limit or "optimal" entropy for base `b`
with `N` possible symbols can be obtained:
math.log(n, b)
1. http://rosettacode.org/wiki/Entropy#Python
Parameters
----------
seq : list or tuple or str
Sequence of elements over which entropy is calculated
base : int or float
Base of the output units, e.g., 2 for "bits," e (2.718...) for "nats,"
and 10 for "bans" (Default: 2)
Returns
-------
float
'''
# Trivial case, all symbols are the same
if np.all(np.equal(seq[0], seq)):
return 0.0 # Avoid a "-0.0" return
p, lns = Counter(seq), float(len(seq))
e = -sum( count / lns * np.log2(count / lns) for count in p.values())
return e if base == 2 else e * math.log(2, base)
def harmonic_ols(x, y, period = 12):
r'''
Returns OLS estimates for harmonic series of X, i.e., the matrix `A` of
the equation `y = Ax + b` is a linear combination of sines and cosines.
$$
y_{i,t} = \alpha_i +
\beta_{i,1}\, \mathrm{cos}\left(\frac{2\pi}{T}x_{i,t}\right) +
\beta_{i,2}\,\mathrm{sin}\left(\frac{2\pi}{T}x_{i,t}\right) + \varepsilon_{i,t}
$$
Parameters
----------
x : numpy.ndarray
The independent variable, must be 1D
y : numpy.ndarray
The dependent variable, must be 1D
Returns
-------
numpy.ndarray
The solution to the least-squares problem
'''
assert x.ndim == 1, 'Array x must be 1D'
x0 = x.reshape((x.shape[0], 1))
# Create initial design/ model matrix (without intercept), as this is
# augmented by ols()
a = ((2 * np.pi) / period) # Harmonic coefficient
xm = np.hstack((x0, np.cos(a * x0), np.sin(a * x0)))
return ols(xm, y, add_intercept = True)
def linear_constraint(xmin, xmax, form = None):
'''
Returns a linear ramp function, for deriving a value on [0, 1] from
an input value `x`:
if x >= xmax:
return 1
if x <= xmin:
return 0
return (x - xmin) / (xmax - xmin)
Parameters
----------
xmin : int or float
Lower bound of the linear ramp function
xmax : int or float
Upper bound of the linear ramp function
form : str
Type of ramp function: "reversed" decreases as x increases;
"binary" returns xmax when x == 1; default (None) is increasing
as x increases.
Returns
-------
function
'''
assert form == 'binary' or np.any(xmax >= xmin),\
'xmax must be greater than/ equal to xmin'
if form == 'reversed':
return lambda x: np.where(x >= xmax, 0,
np.where(x < xmin, 1, 1 - np.divide(
np.subtract(x, xmin), xmax - xmin)))
if form == 'binary':
return lambda x: np.where(x == 1, xmax, xmin)
return lambda x: np.where(x >= xmax, 1,
np.where(x < xmin, 0,
np.divide(np.subtract(x, xmin), xmax - xmin)))
def ols(x, y, add_intercept = True, use_qr_decomp = True):
'''
Returns ordinary least squares (OLS) estimates for X. If X is univariate
(1D), returns the slope of the line between Y and X as well as the
"y-intercept" or the intersection of this line with the vertical axis.
Parameters
----------
x : numpy.ndarray
The independent variable(s); should N x M where M is the number of
variables
y : numpy.ndarray
The dependent variable, must be 1D
add_intercept : bool
True to add a y-intercept term (Default: True)
use_qr_decomp : bool
True to use QR decomposition to obtain solution (Default: True)
Returns
-------
numpy.ndarray
The solution to the least-squares problem
'''
assert y.ndim == 1, 'Array y must be 1D'
n = x.shape[0] # Num. of samples
m = 1 if x.ndim == 1 else x.shape[1] # Num. of covariates
# Create design/ model matrix
xm = x.reshape((n, m)) # Without y-intercept term, unless...
if add_intercept:
xm = np.hstack((np.ones((n,)).reshape((n, 1)), x.reshape((n, m))))
if xm.shape[1] > n:
raise ValueError('System of equations is rank deficient')
# Generally better to use QR decomposition to obtain \hat{\beta}
if use_qr_decomp:
q, r = np.linalg.qr(xm)
fit = np.linalg.inv(r) @ q.T @ y
else:
fit = np.linalg.inv(xm.T @ xm) @ xm.T @ y
return fit
def ols_variance(x, y, beta = None, add_intercept = True):
r'''
Returns the unbiased estimate of OLS model variance:
$$
SSE / (n - p)
$$
Where:
$$
SSE = (y - X\beta )' (y - X\beta )
$$
SSE is known as the sum of squared errors of prediction and is equivalent
to the residual sum of squares (RSS).
Parameters
----------
x : numpy.ndarray
The independent variable(s); should be N x M where M is the number of
variables
y : numpy.ndarray
The dependent variable, must be 1D
beta : numpy.ndarray
(Optional) Coefficient estimates, an M-dimensional vector
add_intercept : bool
True to add a y-intercept term (Default: True)
Returns
-------
float
The sum-of-squared-errors of prediction
'''
n = x.shape[0] # Num. of samples
p = 1 if x.ndim == 1 else x.shape[1] # Num. of covariates
p += 1 if add_intercept else 0
# Alternatively, use xm and beta calculated in sum_of_squares()...
# return ((y - xm @ beta).T @ (y - xm @ beta)) / (n - p)
return sum_of_squares(x, y, beta, add_intercept, which = 'sse') / (n - p)
def rmsd(x1, x2, n = None, weights = None):
r'''
Returns the root mean-squared deviation (RMSD) between two continuously
varying random quantities:
$$
RMSD(\hat{x}, x) = \sqrt{n^{-1}\sum_i^N (\hat{x}_i - x_i)^2}
$$
Where `N` (or `n`) is the number of samples (e.g., model cells or time
steps).
NOTE: `NoData` should be filled with `np.nan` prior to calling this
function; it is assumed that both vectors have the same missingness.
Parameters
----------
x1 : numpy.ndarray
A 1D or 2D numeric vector
x2 : numpy.ndarray
A 1D or 2D numeric vector
n : int
(Optional) The number of samples, for normalizing; if not provided,
calculated as the number of non-NaN samples
weights : numpy.ndarray
Weights array of a shape that can be broadcast to match both x1 and x2
Returns
-------
float
'''
assert isinstance(n, int) or | |
"""
Class that plays the Reinforcement Learning agent
"""
# !/usr/bin/python
import csv
import pprint
import threading
import numpy as np
import json
import random
import pathlib
from datetime import datetime
import time
import copy
from time import sleep
import logging
import sys
from formatter_for_output import format_console_output
from plotter.plot_output_data import PlotOutputData
from learning.run_output_Q_parameters import RunOutputQParameters
from request_builder.builder import build_command
from device_communication.client import operate_on_bulb, operate_on_bulb_json
from state_machine.state_machine_yeelight import compute_reward_from_states, compute_next_state_from_props, get_states, \
get_optimal_policy, get_optimal_path
from config import FrameworkConfiguration
class ReinforcementLearningAlgorithm(object):
def __init__(self, discovery_report, thread_id):
self.discovery_report = discovery_report
self.total_episodes = FrameworkConfiguration.total_episodes
self.max_steps = FrameworkConfiguration.max_steps
self.epsilon = FrameworkConfiguration.epsilon
self.alpha = FrameworkConfiguration.alpha
self.gamma = FrameworkConfiguration.gamma
self.decay_episode = FrameworkConfiguration.decay_episode
self.decay_value = FrameworkConfiguration.decay_value
self.show_graphs = FrameworkConfiguration.show_graphs
self.follow_policy = FrameworkConfiguration.follow_policy
self.seconds_to_wait = FrameworkConfiguration.seconds_to_wait
self.follow_partial_policy = FrameworkConfiguration.follow_partial_policy
self.follow_policy_every_tot_episodes = FrameworkConfiguration.follow_policy_every_tot_episodes
self.num_actions_to_use = FrameworkConfiguration.num_actions_to_use
self.algorithm = FrameworkConfiguration.algorithm
# lambda is needed only in case of sarsa(lambda) or Q(lambda) algorithms
if self.algorithm == 'sarsa_lambda' or self.algorithm == 'qlearning_lambda':
self.lam = FrameworkConfiguration.lam
if FrameworkConfiguration.date_old_matrix != 'YY_mm_dd_HH_MM_SS':
self.use_old_matrix = FrameworkConfiguration.use_old_matrix # in sarsa lambda also E is needed
self.date_old_matrix = FrameworkConfiguration.date_old_matrix # I should check it is in a correct format
else:
self.use_old_matrix = False
self.current_date = datetime.now()
if thread_id:
self.thread_id = thread_id
self.id_for_output = '%Y_%m_%d_%H_%M_%S' + '_' + str(self.thread_id)
self.storage_reward = 0 # temporary storage variable
def choose_action(self, state, q_matrix):
"""
Function to choose the next action, same for all algorithms
"""
# Here I should choose the method
if np.random.uniform(0, 1) < self.epsilon:
# print("\t\tSelect the action randomly")
action = random.randint(0, self.num_actions_to_use - 1) # don't use the first one
else:
# Select maximum, if multiple values select randomly
# print("\t\tSelect maximum")
# choose random action between the max ones
action = np.random.choice(np.where(q_matrix[state, :] == q_matrix[state, :].max())[0])
# The action then should be converted when used into a json_string returned by builder_yeelight
# action is an index
return action
def update_sarsa(self, state, state_2, reward, action, action_2, q_matrix):
"""
SARSA function to learn the Q-value
"""
predict = q_matrix[state, action]
target = reward + self.gamma * q_matrix[state_2, action_2]
q_matrix[state, action] = q_matrix[state, action] + self.alpha * (target - predict)
def update_sarsa_lambda(self, state, state_2, reward, action, action_2, len_states, len_actions, q_matrix,
e_matrix):
"""
SARSA(lambda) function to update the Q-value matrix and the Eligibility matrix
"""
predict = q_matrix[state, action]
target = reward + self.gamma * q_matrix[state_2, action_2]
delta = target - predict
e_matrix[state, action] = e_matrix[state, action] + 1
# For all s, a
for s in range(len_states):
for a in range(len_actions):
q_matrix[s, a] = q_matrix[s, a] + self.alpha * delta * e_matrix[s, a]
e_matrix[s, a] = self.gamma * self.lam * e_matrix[s, a]
def update_qlearning_lambda(self, state, state_2, reward, action, action_2, len_states, len_actions, q_matrix,
e_matrix):
"""
Q-learning(lambda) (Watkins's Q(lambda) algorithm) function to update the Q-value matrix and the Eligibility matrix
"""
predict = q_matrix[state, action]
maxQ = np.amax(q_matrix[state_2, :]) # Find maximum value for the new state Q(s', a*)
maxIndex = np.argmax(q_matrix[state_2, :]) # Find index of the maximum value a*
target = reward + self.gamma * maxQ
delta = target - predict
e_matrix[state, action] = e_matrix[state, action] + 1
# For all s, a
for s in range(len_states):
for a in range(len_actions):
q_matrix[s, a] = q_matrix[s, a] + self.alpha * delta * e_matrix[s, a]
if action_2 == maxIndex:
e_matrix[s, a] = self.gamma * self.lam * e_matrix[s, a]
else:
e_matrix[s, a] = 0
def update_qlearning(self, state, state_2, reward, action, q_matrix):
"""
# Q-learning function to learn the Q-value
"""
predict = q_matrix[state, action]
maxQ = np.amax(q_matrix[state_2, :]) # Find maximum value for the new state
target = reward + self.gamma * maxQ
q_matrix[state, action] = q_matrix[state, action] + self.alpha * (target - predict)
def initialize_log_files(self, output_directory, log_directory):
"""
Get log filenames and build non-existing directories
"""
log_dir = FrameworkConfiguration.directory + output_directory + '/' + log_directory
pathlib.Path(log_dir + '/').mkdir(parents=True, exist_ok=True) # for Python > 3.5 YY_mm_dd_HH_MM_SS'
log_filename = self.current_date.strftime(log_dir + '/' + 'log_' + self.id_for_output + '.log')
log_date_filename = FrameworkConfiguration.directory + output_directory + '/log_date.log'
return log_filename, log_date_filename
def initialize_output_q_params_files(self, output_directory, q_params_directory):
"""
Get output filenames for saving Q and parameters and build non-existing directories
"""
output_Q_params_dir = FrameworkConfiguration.directory + output_directory + '/' + q_params_directory
pathlib.Path(output_Q_params_dir + '/').mkdir(parents=True, exist_ok=True) # for Python > 3.5
output_Q_filename = self.current_date.strftime(
output_Q_params_dir + '/' + 'output_Q_' + self.id_for_output + '.csv')
output_parameters_filename = self.current_date.strftime(
output_Q_params_dir + '/' + 'output_parameters_' + self.id_for_output + '.csv')
output_E_filename = ''
if self.algorithm == 'sarsa_lambda' or self.algorithm == 'qlearning_lambda':
output_E_filename = self.current_date.strftime(
output_Q_params_dir + '/' + 'output_E_' + self.id_for_output + '.csv')
return output_Q_filename, output_parameters_filename, output_E_filename
def initialize_output_csv_files(self, output_directory, output_csv_directory):
"""
Get output filenames for saving all episodes result and build non-existing directories
"""
output_dir = FrameworkConfiguration.directory + output_directory + '/' + output_csv_directory
pathlib.Path(output_dir + '/').mkdir(parents=True, exist_ok=True) # for Python > 3.5
output_filename = self.current_date.strftime(
output_dir + '/' + 'output_' + self.algorithm + '_' + self.id_for_output + '.csv')
partial_output_filename = self.current_date.strftime(
output_dir + '/' + 'partial_output_' + self.algorithm + '_' + self.id_for_output + '.csv')
return output_filename, partial_output_filename
def write_date_id_to_log(self, log_date_filename):
"""
Write the identifier of files (date) and corresponding algorithm to log_date.log file
"""
with open(log_date_filename, mode='a') as output_file:
output_writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_NONE)
output_writer.writerow([self.current_date.strftime(self.id_for_output), self.algorithm])
def write_params_to_output_file(self, output_parameters_filename, optimal_policy, optimal_path):
"""
Write all parameters of the algorithm to output file
"""
with open(output_parameters_filename, mode='w') as output_file:
output_writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_NONE)
output_writer.writerow(['algorithm_used', self.algorithm])
output_writer.writerow(['epsilon', self.epsilon])
output_writer.writerow(['max_steps', self.max_steps])
output_writer.writerow(['total_episodes', self.total_episodes])
output_writer.writerow(['alpha', self.alpha])
output_writer.writerow(['num_actions_to_use', self.num_actions_to_use])
output_writer.writerow(['gamma', self.gamma])
output_writer.writerow(['decay_episode', self.decay_episode])
output_writer.writerow(['decay_value', self.decay_value])
output_writer.writerow(['seconds_to_wait', self.seconds_to_wait])
output_writer.writerow(['optimal_policy', "-".join(str(act) for act in optimal_policy)])
output_writer.writerow(['optimal_path', "-".join(str(pat) for pat in optimal_path)])
output_writer.writerow(['path', FrameworkConfiguration.path])
output_writer.writerow(['protocol', self.discovery_report['protocol']])
if self.algorithm == 'sarsa_lambda' or self.algorithm == 'qlearning_lambda':
output_writer.writerow(['lambda', self.lam])
def retrieve_old_q_matrix(self, output_directory, q_params_directory, len_states, len_actions, empty_matrix):
"""
Retrieve old save Q matrix
"""
file_Q = 'output_Q_' + self.date_old_matrix + '.csv'
try:
output_Q_params_dir = FrameworkConfiguration.directory + output_directory + '/' + q_params_directory
tmp_matrix = np.genfromtxt(output_Q_params_dir + '/' + file_Q, delimiter=',', dtype=np.float32)
Q_tmp = tmp_matrix[1:, 1:]
Q = copy.deepcopy(Q_tmp)
except Exception as e:
logging.warning("Wrong file format: " + str(e))
logging.warning("Using an empty Q matrix instead of the old one.")
return empty_matrix
# Check the format of the matrix is correct
if len_states != len(Q) or len_actions != len(Q[0]) or np.isnan(np.sum(Q)):
logging.warning("Wrong file format: wrong Q dimensions or nan values present")
logging.warning("Using an empty Q matrix instead of the old one.")
return empty_matrix
return Q
def retrieve_old_e_matrix(self, output_directory, q_params_directory, len_states, len_actions, empty_matrix):
"""
Retrieve old save Q matrix
"""
file_E = 'output_E_' + self.date_old_matrix + '.csv'
try:
output_Q_params_dir = FrameworkConfiguration.directory + output_directory + '/' + q_params_directory
tmp_matrix = np.genfromtxt(output_Q_params_dir + '/' + file_E, delimiter=',', dtype=np.float32)
E_tmp = tmp_matrix[1:, 1:]
E = copy.deepcopy(E_tmp)
except Exception as e:
logging.warning("Wrong file format: " + str(e))
logging.warning("Using an empty E matrix instead of the old one.")
return empty_matrix
# Check the format of the matrix is correct
if len_states != len(E) or len_actions != len(E[0]) or np.isnan(np.sum(E)):
logging.warning("Wrong file format: wrong E dimensions or nan values present")
logging.warning("Using an empty E matrix instead of the old one.")
return empty_matrix
return E
def write_headers_to_output_files(self, output_filename, partial_output_filename):
"""
Write headers to output csv files
"""
with open(output_filename, mode='w') as output_file:
output_writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
output_writer.writerow(['Episodes', 'Reward', 'CumReward', 'Timesteps'])
if self.follow_partial_policy:
with open(partial_output_filename, mode='w') as partial_output_file:
output_writer = csv.writer(partial_output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
output_writer.writerow(
['CurrentEpisode', 'Timesteps', 'ObtainedReward', 'Time', 'PolicySelected', 'StatesPassed'])
def set_initial_state(self):
"""
Set device to starting state (e.g. power off)
"""
num_actions = 0
if FrameworkConfiguration.path == 3:
# Special initial configuration for visual checks on the bulb
# ONLY FOR PATH 3
operate_on_bulb("set_power", str("\"on\", \"sudden\", 0"), self.discovery_report, self.discovery_report['protocol'])
num_actions += 1
sleep(self.seconds_to_wait)
operate_on_bulb("set_rgb", str("255" + ", \"sudden\", 500"), self.discovery_report, self.discovery_report['protocol'])
num_actions += 1
sleep(self.seconds_to_wait)
elif FrameworkConfiguration.path == 4:
# Special initial configuration for for path 4, starting to power on
# ONLY FOR PATH 4
if FrameworkConfiguration.DEBUG:
logging.debug("\t\tREQUEST: Setting power on")
operate_on_bulb("set_power", str("\"on\", \"sudden\", 0"), self.discovery_report,
self.discovery_report['protocol'])
num_actions += 1
sleep(self.seconds_to_wait)
return num_actions
# Turn off the lamp
if FrameworkConfiguration.DEBUG:
logging.debug("\t\tREQUEST: Setting power off")
operate_on_bulb("set_power", str("\"off\", \"sudden\", 0"), self.discovery_report, self.discovery_report['protocol'])
num_actions += 1
return num_actions
def write_log_file(self, log_filename, t, tmp_reward, state1, state2, action1, action2):
"""
Write data | |
str, List[kv.Event]]
class LeaseGrantRequest(Message):
pb_cls = rpc_pb2.LeaseGrantRequest
__slots__ = ['TTL', 'ID']
def __init__(
self,
TTL: int = 0,
ID: int = 0
):
self.TTL = TTL
self.ID = ID
@classmethod
def get_slot_types(cls):
return [int, int]
class LeaseGrantResponse(Message):
pb_cls = rpc_pb2.LeaseGrantResponse
__slots__ = ['header', 'ID', 'TTL', 'error']
def __init__(
self,
header: Optional['ResponseHeader'] = None,
ID: int = 0,
TTL: int = 0,
error: str = ''
):
self.header = header
self.ID = ID
self.TTL = TTL
self.error = error
@classmethod
def get_slot_types(cls):
return [ResponseHeader, int, int, str]
class LeaseRevokeRequest(Message):
pb_cls = rpc_pb2.LeaseRevokeRequest
__slots__ = ['ID']
def __init__(
self,
ID: int = 0
):
self.ID = ID
@classmethod
def get_slot_types(cls):
return [int]
class LeaseRevokeResponse(Message):
pb_cls = rpc_pb2.LeaseRevokeResponse
__slots__ = ['header']
def __init__(
self,
header: Optional['ResponseHeader'] = None
):
self.header = header
@classmethod
def get_slot_types(cls):
return [ResponseHeader]
class LeaseKeepAliveRequest(Message):
pb_cls = rpc_pb2.LeaseKeepAliveRequest
__slots__ = ['ID']
def __init__(
self,
ID: int = 0
):
self.ID = ID
@classmethod
def get_slot_types(cls):
return [int]
class LeaseKeepAliveResponse(Message):
pb_cls = rpc_pb2.LeaseKeepAliveResponse
__slots__ = ['header', 'ID', 'TTL']
def __init__(
self,
header: Optional['ResponseHeader'] = None,
ID: int = 0,
TTL: int = 0
):
self.header = header
self.ID = ID
self.TTL = TTL
@classmethod
def get_slot_types(cls):
return [ResponseHeader, int, int]
class LeaseTimeToLiveRequest(Message):
pb_cls = rpc_pb2.LeaseTimeToLiveRequest
__slots__ = ['ID', 'keys']
def __init__(
self,
ID: int = 0,
keys: bool = False
):
self.ID = ID
self.keys = keys
@classmethod
def get_slot_types(cls):
return [int, bool]
class LeaseTimeToLiveResponse(Message):
pb_cls = rpc_pb2.LeaseTimeToLiveResponse
__slots__ = ['header', 'ID', 'TTL', 'grantedTTL', 'keys']
def __init__(
self,
header: Optional['ResponseHeader'] = None,
ID: int = 0,
TTL: int = 0,
grantedTTL: int = 0,
keys: List[bytes] = None
):
self.header = header
self.ID = ID
self.TTL = TTL
self.grantedTTL = grantedTTL
self.keys = [] if keys is None else keys
@classmethod
def get_slot_types(cls):
return [ResponseHeader, int, int, int, List[bytes]]
class LeaseLeasesRequest(Message):
pb_cls = rpc_pb2.LeaseLeasesRequest
__slots__ = []
def __init__(
self
):
pass
@classmethod
def get_slot_types(cls):
return []
class LeaseStatus(Message):
pb_cls = rpc_pb2.LeaseStatus
__slots__ = ['ID']
def __init__(
self,
ID: int = 0
):
self.ID = ID
@classmethod
def get_slot_types(cls):
return [int]
class LeaseLeasesResponse(Message):
pb_cls = rpc_pb2.LeaseLeasesResponse
__slots__ = ['header', 'leases']
def __init__(
self,
header: Optional['ResponseHeader'] = None,
leases: List['LeaseStatus'] = None
):
self.header = header
self.leases = [] if leases is None else leases
@classmethod
def get_slot_types(cls):
return [ResponseHeader, List[LeaseStatus]]
class Member(Message):
pb_cls = rpc_pb2.Member
__slots__ = ['ID', 'name', 'peerURLs', 'clientURLs']
def __init__(
self,
ID: int = 0,
name: str = '',
peerURLs: List[str] = None,
clientURLs: List[str] = None
):
self.ID = ID
self.name = name
self.peerURLs = [] if peerURLs is None else peerURLs
self.clientURLs = [] if clientURLs is None else clientURLs
@classmethod
def get_slot_types(cls):
return [int, str, List[str], List[str]]
class MemberAddRequest(Message):
pb_cls = rpc_pb2.MemberAddRequest
__slots__ = ['peerURLs']
def __init__(
self,
peerURLs: List[str] = None
):
self.peerURLs = [] if peerURLs is None else peerURLs
@classmethod
def get_slot_types(cls):
return [List[str]]
class MemberAddResponse(Message):
pb_cls = rpc_pb2.MemberAddResponse
__slots__ = ['header', 'member', 'members']
def __init__(
self,
header: Optional['ResponseHeader'] = None,
member: Optional['Member'] = None,
members: List['Member'] = None
):
self.header = header
self.member = member
self.members = [] if members is None else members
@classmethod
def get_slot_types(cls):
return [ResponseHeader, Member, List[Member]]
class MemberRemoveRequest(Message):
pb_cls = rpc_pb2.MemberRemoveRequest
__slots__ = ['ID']
def __init__(
self,
ID: int = 0
):
self.ID = ID
@classmethod
def get_slot_types(cls):
return [int]
class MemberRemoveResponse(Message):
pb_cls = rpc_pb2.MemberRemoveResponse
__slots__ = ['header', 'members']
def __init__(
self,
header: Optional['ResponseHeader'] = None,
members: List['Member'] = None
):
self.header = header
self.members = [] if members is None else members
@classmethod
def get_slot_types(cls):
return [ResponseHeader, List[Member]]
class MemberUpdateRequest(Message):
pb_cls = rpc_pb2.MemberUpdateRequest
__slots__ = ['ID', 'peerURLs']
def __init__(
self,
ID: int = 0,
peerURLs: List[str] = None
):
self.ID = ID
self.peerURLs = [] if peerURLs is None else peerURLs
@classmethod
def get_slot_types(cls):
return [int, List[str]]
class MemberUpdateResponse(Message):
pb_cls = rpc_pb2.MemberUpdateResponse
__slots__ = ['header', 'members']
def __init__(
self,
header: Optional['ResponseHeader'] = None,
members: List['Member'] = None
):
self.header = header
self.members = [] if members is None else members
@classmethod
def get_slot_types(cls):
return [ResponseHeader, List[Member]]
class MemberListRequest(Message):
pb_cls = rpc_pb2.MemberListRequest
__slots__ = []
def __init__(
self
):
pass
@classmethod
def get_slot_types(cls):
return []
class MemberListResponse(Message):
pb_cls = rpc_pb2.MemberListResponse
__slots__ = ['header', 'members']
def __init__(
self,
header: Optional['ResponseHeader'] = None,
members: List['Member'] = None
):
self.header = header
self.members = [] if members is None else members
@classmethod
def get_slot_types(cls):
return [ResponseHeader, List[Member]]
class DefragmentRequest(Message):
pb_cls = rpc_pb2.DefragmentRequest
__slots__ = []
def __init__(
self
):
pass
@classmethod
def get_slot_types(cls):
return []
class DefragmentResponse(Message):
pb_cls = rpc_pb2.DefragmentResponse
__slots__ = ['header']
def __init__(
self,
header: Optional['ResponseHeader'] = None
):
self.header = header
@classmethod
def get_slot_types(cls):
return [ResponseHeader]
class MoveLeaderRequest(Message):
pb_cls = rpc_pb2.MoveLeaderRequest
__slots__ = ['targetID']
def __init__(
self,
targetID: int = 0
):
self.targetID = targetID
@classmethod
def get_slot_types(cls):
return [int]
class MoveLeaderResponse(Message):
pb_cls = rpc_pb2.MoveLeaderResponse
__slots__ = ['header']
def __init__(
self,
header: Optional['ResponseHeader'] = None
):
self.header = header
@classmethod
def get_slot_types(cls):
return [ResponseHeader]
class AlarmRequest(Message):
pb_cls = rpc_pb2.AlarmRequest
__slots__ = ['action', 'memberID', 'alarm']
class AlarmAction(Enum):
GET = 0
ACTIVATE = 1
DEACTIVATE = 2
def __init__(
self,
action: 'AlarmAction' = AlarmAction.GET,
memberID: int = 0,
alarm: 'AlarmType' = AlarmType.NONE
):
self.action = action
self.memberID = memberID
self.alarm = alarm
@classmethod
def get_slot_types(cls):
return [cls.AlarmAction, int, AlarmType]
class AlarmMember(Message):
pb_cls = rpc_pb2.AlarmMember
__slots__ = ['memberID', 'alarm']
def __init__(
self,
memberID: int = 0,
alarm: 'AlarmType' = AlarmType.NONE
):
self.memberID = memberID
self.alarm = alarm
@classmethod
def get_slot_types(cls):
return [int, AlarmType]
class AlarmResponse(Message):
pb_cls = rpc_pb2.AlarmResponse
__slots__ = ['header', 'alarms']
def __init__(
self,
header: Optional['ResponseHeader'] = None,
alarms: List['AlarmMember'] = None
):
self.header = header
self.alarms = [] if alarms is None else alarms
@classmethod
def get_slot_types(cls):
return [ResponseHeader, List[AlarmMember]]
class StatusRequest(Message):
pb_cls = rpc_pb2.StatusRequest
__slots__ = []
def __init__(
self
):
pass
@classmethod
def get_slot_types(cls):
return []
class StatusResponse(Message):
pb_cls = rpc_pb2.StatusResponse
__slots__ = ['header', 'version', 'dbSize', 'leader', 'raftIndex', 'raftTerm', 'raftAppliedIndex', 'errors', 'dbSizeInUse']
def __init__(
self,
header: Optional['ResponseHeader'] = None,
version: str = '',
dbSize: int = 0,
leader: int = 0,
raftIndex: int = 0,
raftTerm: int = 0,
raftAppliedIndex: int = 0,
errors: List[str] = None,
dbSizeInUse: int = 0
):
self.header = header
self.version = version
self.dbSize = dbSize
self.leader = leader
self.raftIndex = raftIndex
self.raftTerm = raftTerm
self.raftAppliedIndex = raftAppliedIndex
self.errors = [] if errors is None else errors
self.dbSizeInUse = dbSizeInUse
@classmethod
def get_slot_types(cls):
return [ResponseHeader, str, int, int, int, int, int, List[str], int]
class AuthEnableRequest(Message):
pb_cls = rpc_pb2.AuthEnableRequest
__slots__ = []
def __init__(
self
):
pass
@classmethod
def get_slot_types(cls):
return []
class AuthDisableRequest(Message):
pb_cls = rpc_pb2.AuthDisableRequest
__slots__ = []
def __init__(
self
):
pass
@classmethod
def get_slot_types(cls):
return []
class AuthenticateRequest(Message):
pb_cls = rpc_pb2.AuthenticateRequest
__slots__ = ['name', 'password']
def __init__(
self,
name: str = '',
password: str = ''
):
self.name = name
self.password = password
@classmethod
def get_slot_types(cls):
return [str, str]
class AuthUserAddRequest(Message):
pb_cls = rpc_pb2.AuthUserAddRequest
__slots__ = ['name', 'password']
def __init__(
self,
name: str = '',
password: str = ''
):
self.name = name
self.password = password
@classmethod
def get_slot_types(cls):
return [str, str]
class AuthUserGetRequest(Message):
pb_cls = rpc_pb2.AuthUserGetRequest
__slots__ = ['name']
def __init__(
self,
name: str = ''
):
self.name = name
@classmethod
def get_slot_types(cls):
return [str]
class AuthUserDeleteRequest(Message):
pb_cls = rpc_pb2.AuthUserDeleteRequest
__slots__ = ['name']
def __init__(
self,
name: str = ''
):
self.name = name
@classmethod
def get_slot_types(cls):
return [str]
class AuthUserChangePasswordRequest(Message):
pb_cls = rpc_pb2.AuthUserChangePasswordRequest
__slots__ = ['name', 'password']
def __init__(
self,
name: str = '',
password: str = ''
):
self.name = name
self.password = password
@classmethod
def get_slot_types(cls):
return [str, str]
class AuthUserGrantRoleRequest(Message):
pb_cls = rpc_pb2.AuthUserGrantRoleRequest
__slots__ = ['user', 'role']
def __init__(
self,
user: str = '',
role: str = ''
):
self.user = user
self.role = role
@classmethod
def get_slot_types(cls):
return [str, str]
class AuthUserRevokeRoleRequest(Message):
pb_cls = rpc_pb2.AuthUserRevokeRoleRequest
__slots__ = ['name', 'role']
def __init__(
self,
name: str = '',
role: str = ''
):
self.name = name
self.role = role
@classmethod
def get_slot_types(cls):
return [str, str]
class AuthRoleAddRequest(Message):
pb_cls = rpc_pb2.AuthRoleAddRequest
__slots__ = ['name']
def __init__(
self,
name: str = ''
):
| |
#! /usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
# Copyright 2018 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""Find and display part hookups."""
import os
import copy
import json
from argparse import Namespace
from astropy.time import Time
from . import mc, cm_utils, cm_transfer, cm_sysdef, cm_dossier, cm_active
class Hookup(object):
"""
Class to find and display the signal path hookup information.
Hookup traces parts and connections through the signal path (as defined
by the connections in cm_sysdef).
Parameters
----------
session : session object or None
If None, it will start a new session on the database.
"""
hookup_list_to_cache = cm_sysdef.hera_zone_prefixes
hookup_cache_file = os.path.expanduser('~/.hera_mc/hookup_cache_3.json')
def __init__(self, session=None):
if session is None: # pragma: no cover
db = mc.connect_to_mc_db(None)
self.session = db.sessionmaker()
else:
self.session = session
self.part_type_cache = {}
self.cached_hookup_dict = None
self.sysdef = cm_sysdef.Sysdef()
self.active = None
def get_hookup_from_db(self, hpn, pol, at_date, exact_match=False, hookup_type=None):
"""
Get the hookup dict from the database for the supplied match parameters.
This gets called by the get_hookup wrapper if the database needs to be
read (for instance, to generate a cache file, or search for parts
different than those keyed on in the cache file.) It will look over
all active revisions.
Parameters
----------
hpn : str, list
List/string of input hera part number(s) (whole or 'startswith')
If string
- 'default' uses default station prefixes in cm_sysdef
- otherwise converts as csv-list
If element of list is of format '.xxx:a/b/c' it finds the appropriate
method as cm_sysdef.Sysdef.xxx([a, b, c])
pol : str
A port polarization to follow, or 'all', ('e', 'n', 'all')
at_date : str, int
Date for query. Anything intelligible to cm_utils.get_astropytime
exact_match : bool
If False, will only check the first characters in each hpn entry. E.g. 'HH1'
would allow 'HH1', 'HH10', 'HH123', etc
hookup_type : str or None
Type of hookup to use (current observing system is 'parts_hera').
If 'None' it will determine which system it thinks it is based on
the part-type. The order in which it checks is specified in cm_sysdef.
Only change if you know you want a different system (like 'parts_paper').
Returns
-------
dict
Hookup dossier dictionary as defined in cm_dossier
"""
# Reset at_date
at_date = cm_utils.get_astropytime(at_date)
self.at_date = at_date
self.active = cm_active.ActiveData(self.session, at_date=at_date)
self.active.load_parts(at_date=None)
self.active.load_connections(at_date=None)
hpn, exact_match = self._proc_hpnlist(hpn, exact_match)
parts = self._cull_dict(hpn, self.active.parts, exact_match)
hookup_dict = {}
for k, part in parts.items():
self.hookup_type = self.sysdef.find_hookup_type(
part_type=part.hptype, hookup_type=hookup_type)
if part.hptype in self.sysdef.redirect_part_types[self.hookup_type]:
redirect_parts = self.sysdef.handle_redirect_part_types(part, self.active)
redirect_hookup_dict = self.get_hookup_from_db(
hpn=redirect_parts, pol=pol, at_date=self.at_date,
exact_match=True, hookup_type=self.hookup_type)
for rhdk, vhd in redirect_hookup_dict.items():
hookup_dict[rhdk] = vhd
redirect_hookup_dict = None
continue
self.sysdef.setup(part=part, pol=pol, hookup_type=self.hookup_type)
hookup_dict[k] = cm_dossier.HookupEntry(entry_key=k, sysdef=self.sysdef)
for port_pol in self.sysdef.ppkeys:
hookup_dict[k].hookup[port_pol] = self._follow_hookup_stream(
part=part.hpn, rev=part.hpn_rev, port_pol=port_pol)
part_types_found = self._get_part_types_found(hookup_dict[k].hookup[port_pol])
hookup_dict[k].get_hookup_type_and_column_headers(port_pol, part_types_found)
hookup_dict[k].add_timing_and_fully_connected(port_pol)
return hookup_dict
def get_hookup(self, hpn, pol='all', at_date='now', exact_match=False,
use_cache=False, hookup_type='parts_hera'):
"""
Return the hookup to the supplied part/pol in the form of a dictionary.
It will return all active revisions at_date from either the database or
the cache file if use_cache == True and the part number keys agree. This
is a wrapper for get_hookup_from_db to allow for the cache file.
Parameters
----------
hpn : str, list
List/string of input hera part number(s) (whole or 'startswith')
If string
- 'cache' returns the entire cache file
- 'default' uses default station prefixes in cm_sysdef
- otherwise converts as csv-list
If element of list is of format '.xxx:a/b/c' it finds the appropriate
method as cm_sysdef.Sysdef.xxx([a, b, c])
pol : str
A port polarization to follow, or 'all', ('e', 'n', 'all') Default is 'all'.
at_date : str, int
Date for query. Anything intelligible to cm_utils.get_astropytime. Default
is 'now'
exact_match : bool
If False, will only check the first characters in each hpn entry. E.g. 'HH1'
would allow 'HH1', 'HH10', 'HH123', etc.
use_cache : bool
Flag to force the cache to be read, if present and keys agree.
This is largely deprecated, but kept for archival possibilities
for the future.
hookup_type : str or None
Type of hookup to use. Default is 'parts_hera'.
If 'None' it will determine which system it thinks it is based on
the part-type. The order in which it checks is specified in cm_sysdef.
Only change if you know you want a different system (like 'parts_paper').
Returns
-------
dict
Hookup dossier dictionary as defined in cm_dossier.py
"""
at_date = cm_utils.get_astropytime(at_date)
self.at_date = at_date
self.hookup_type = hookup_type
if isinstance(hpn, str) and hpn.lower() == 'cache':
self.read_hookup_cache_from_file()
return self.cached_hookup_dict
if use_cache:
hpn, exact_match = self._proc_hpnlist(hpn, exact_match)
if self._requested_list_OK_for_cache(hpn):
self.read_hookup_cache_from_file()
return self._cull_dict(hpn, self.cached_hookup_dict, exact_match)
return self.get_hookup_from_db(hpn=hpn, pol=pol, at_date=at_date,
exact_match=exact_match, hookup_type=hookup_type)
def show_hookup(self, hookup_dict, cols_to_show='all', state='full', ports=False, revs=False,
sortby=None, filename=None, output_format='table'):
"""
Generate a printable hookup table.
Parameters
----------
hookup_dict : dict
Hookup dictionary generated in self.get_hookup
cols_to_show : list, str
list of columns to include in hookup listing
state : str
String designating whether to show the full hookups only, or all
ports : bool
Flag to include ports or not
revs : bool
Flag to include revisions or not
sortby : list, str or None
Columns to sort the listed hookup. None uses the keys. str is a csv-list
List items may have an argument separated by ':' for 'N'umber'P'refix'R'ev
order (see cm_utils.put_keys_in_order). Not included uses 'NRP'
filename : str or None
File name to use, None goes to stdout. The file that gets written is
in all cases an "ascii" file
output_format : str
Set output file type.
'html' for a web-page version,
'csv' for a comma-separated value version, or
'table' for a formatted text table
Returns
-------
str
Table as a string
"""
show = {'ports': ports, 'revs': revs}
headers = self._make_header_row(hookup_dict, cols_to_show)
table_data = []
total_shown = 0
sorted_hukeys = self._sort_hookup_display(sortby, hookup_dict, def_sort_order='NRP')
for hukey in sorted_hukeys:
for pol in cm_utils.put_keys_in_order(hookup_dict[hukey].hookup.keys(),
sort_order='PNR'):
if not len(hookup_dict[hukey].hookup[pol]):
continue
use_this_row = False
if state.lower() == 'all':
use_this_row = True
elif state.lower() == 'full' and hookup_dict[hukey].fully_connected[pol]:
use_this_row = True
if not use_this_row:
continue
total_shown += 1
td = hookup_dict[hukey].table_entry_row(pol, headers, self.part_type_cache, show)
table_data.append(td)
if total_shown == 0:
print("None found for {} (show-state is {})".format(
cm_utils.get_time_for_display(self.at_date), state))
return
table = cm_utils.general_table_handler(headers, table_data, output_format)
if filename is not None:
with open(filename, 'w') as fp:
print(table, file=fp)
return table
# ##################################### Notes ############################################
def get_notes(self, hookup_dict, state='all', return_dict=False):
"""
Retrieve information for hookup.
Parameters
----------
hookup_dict : dict
Hookup dictionary generated in self.get_hookup
state : str
String designating whether to show the full hookups only, or all
return_dict : bool
Flag to return a dictionary with additional information or just the note.
Returns
-------
dict
hookup notes
"""
if self.active is None:
self.active = cm_active.ActiveData(self.session, at_date=self.at_date)
if self.active.info is None:
self.active.load_info(self.at_date)
info_keys = list(self.active.info.keys())
hu_notes = {}
for hkey in hookup_dict.keys():
all_hu_hpn = set()
for pol in hookup_dict[hkey].hookup.keys():
for hpn in hookup_dict[hkey].hookup[pol]:
if (state == 'all'
or (state == 'full' and hookup_dict[hkey].fully_connected[pol])):
all_hu_hpn.add(
cm_utils.make_part_key(hpn.upstream_part, hpn.up_part_rev))
all_hu_hpn.add(
cm_utils.make_part_key(hpn.downstream_part, hpn.down_part_rev))
hu_notes[hkey] = {}
for ikey in all_hu_hpn:
if ikey in info_keys:
hu_notes[hkey][ikey] = {}
for entry in self.active.info[ikey]:
if return_dict:
hu_notes[hkey][ikey][entry.posting_gpstime] =\
{"note": entry.comment.replace('\\n', '\n'),
"ref": entry.reference}
else:
hu_notes[hkey][ikey][entry.posting_gpstime] =\
entry.comment.replace('\\n', '\n')
return hu_notes
def show_notes(self, hookup_dict, state='all'):
"""
Print out the information for hookup.
Parameters
----------
hookup_dict : dict
Hookup dictionary generated in self.get_hookup
state : str
String designating whether to show the full hookups only, or all
Returns
-------
str
Content as a string
"""
hu_notes = self.get_notes(hookup_dict=hookup_dict, state=state, return_dict=True)
full_info_string = ''
for hkey in cm_utils.put_keys_in_order(list(hu_notes.keys()), sort_order='NPR'):
hdr = "---{}---".format(hkey)
entry_info = ''
part_hu_hpn = cm_utils.put_keys_in_order(list(hu_notes[hkey].keys()), sort_order='PNR')
if hkey in part_hu_hpn: # Do the hkey first
part_hu_hpn.remove(hkey)
part_hu_hpn = [hkey] + part_hu_hpn
for ikey in part_hu_hpn:
gps_times = sorted(hu_notes[hkey][ikey].keys())
for gtime in gps_times:
atime = cm_utils.get_time_for_display(gtime)
this_note = ("{} ({})".format(hu_notes[hkey][ikey][gtime]['note'],
hu_notes[hkey][ikey][gtime]['ref']))
entry_info += "\t{} ({}) {}\n".format(ikey, atime, this_note)
if len(entry_info):
full_info_string += "{}\n{}\n".format(hdr, entry_info)
return full_info_string
# ################################ Internal methods ######################################
def _cull_dict(self, hpn, search_dict, exact_match):
"""
Determine the complete appropriate set of parts to use within search_dict.
Based on the | |
for name in [label_0, label_1]:
legend_label_dict[name] = name
label_size = 14
assert binning_features.shape[0] == softmaxes.shape[0], 'Error: binning_features must have same length as softmaxes'
#bin by whatever feature
if isinstance(bins, int):
_,bins = np.histogram(binning_features, bins=bins)
bins = bins[0:-1]
bin_assignments = np.digitize(binning_features, bins)
bin_data = []
for bin_idx in range(len(bins)):
bin_num = bin_idx + 1 #these are one-indexed for some reason
this_bin_idxs = np.where(bin_assignments==bin_num)[0]
bin_data.append({'softmaxes':softmaxes[this_bin_idxs], 'labels' : labels[this_bin_idxs], 'n' : this_bin_idxs.shape[0]})
#compute efficiency, thresholds, purity per bin
bin_metrics = []
for bin_idx, data in enumerate(bin_data):
(softmaxes_0,softmaxes_1),(labels_0,labels_1) = separate_particles([data['softmaxes'],data['labels']],data['labels'],index_dict,desired_labels=[label_0,label_1])
fps, tps, thresholds = binary_clf_curve(np.concatenate((labels_0,labels_1)),np.concatenate((softmaxes_0,softmaxes_1))[:,index_dict[label_0]],
pos_label=index_dict[label_0])
fns = tps[-1] - tps
tns = fps[-1] - fps
efficiencies = tps/(tps + fns)
operating_point_idx = (np.abs(efficiencies - efficiency)).argmin()
if metric == 'purity': performance = tps/(tps + fps)
elif metric == 'rejection': performance = tns / (tns + fps)
elif metric == 'inverse fpr': performance = np.where(fps != 0, (fps +tns) / fps, fps+tns)
bin_metrics.append((efficiencies[operating_point_idx], performance[operating_point_idx], np.sqrt(tns[operating_point_idx])/(tns[operating_point_idx] + fps[operating_point_idx])))
bin_metrics = np.array(bin_metrics)
bin_centers = [(bins[i+1] - bins[i])/2 + bins[i] for i in range(0,len(bins)-1)]
bin_centers.append((np.max(binning_features) - bins[-1])/2 + bins[-1])
if metric == 'purity':
metric_name = '{}-{} Signal Purity'.format(label_0,label_1)
elif metric=='rejection': metric_name = '{} Rejection Fraction'.format(legend_label_dict[label_1])
else: metric_name = '{} Rejection'.format(legend_label_dict[label_1])
title = '{} \n vs {} At Bin {} Signal Efficiency {}{}'.format(metric_name, binning_label, legend_label_dict[label_0], efficiency,title_note)
if ax is None:
fig = plt.figure(figsize=(12,6))
plt.errorbar(bin_centers,bin_metrics[:,1],yerr=bin_metrics[:,2],fmt=marker,color=color,ecolor='k',elinewidth=0.5,capsize=4,capthick=1,alpha=0.5, linewidth=2)
plt.ylabel(metric_name, fontsize=label_size)
plt.xlabel(binning_label, fontsize=label_size)
plt.title(title)
else:
ax.errorbar(bin_centers,bin_metrics[:,1],yerr=bin_metrics[:,2],fmt=marker,color=color,ecolor='k',elinewidth=0.5,capsize=4,capthick=1,alpha=0.5, linewidth=2)
ax.set_ylabel(metric_name, fontsize=label_size)
ax.set_xlabel(binning_label, fontsize=label_size)
ax.set_title(title)
if metric=='inverse fpr': ax.set_yscale('log')
def plot_fitqun_binned_performance(scores, labels, true_momentum, reconstructed_momentum, fpr_fixed_point, index_dict, recons_mom_bin_size=50, true_mom_bins=20,
ax=None,marker='o',color='k',title_note='',metric='efficiency',yrange=None):
'''
plot_fitqun_binned_performance(scores, labels, true_momentum, reconstructed_momentum, fpr_fixed_point, index_dict, recons_mom_bin_size=50, true_mom_bins=20,
ax=None,marker='o',color='k',title_note='',metric='efficiency',yrange=None)
Purpose: Re-create official FiTQun plots.
Args:
scores ... network scores for each class
labels ... 1d array of labels
true_momentum ... 1d array of event true momentum
reconstructed_momentum ... 1d array of FQ reconstructed momentum
fpr_fixed_point ... fixed false-positive rate for FQ recons. mom. bins
index_dict ... dictionary with 'e', 'mu' keys pointing to corresponding integer labels
recons_mom_bin_size ... size of reconstructed mom. bin
true_mom_bins ... number of true momentum bins
ax ... axis to plot on
marker ... marker for plor
color ... curve color
title_note ... string to append to title
metric ... 'efficiency' will give signal efficiency, any other will give FPR
yrange ... range for the y axis
'''
label_size = 14
#remove gamma events
scores, labels, true_momentum, reconstructed_momentum = separate_particles([scores, labels, true_momentum, reconstructed_momentum],labels,index_dict,desired_labels=['e','mu'])
scores = np.concatenate(scores)
labels = np.concatenate(labels)
true_momentum = np.concatenate(true_momentum)
reconstructed_momentum = np.concatenate(reconstructed_momentum)
#bin by reconstructed momentum
bins = [0. + recons_mom_bin_size * i for i in range(math.ceil(np.max(reconstructed_momentum)/recons_mom_bin_size))]
bins = bins[0:-1]
recons_mom_bin_assignments = np.digitize(reconstructed_momentum, bins)
recons_mom_bin_idxs_list = [[]]*len(bins)
for bin_idx in range(len(bins)):
bin_num = bin_idx + 1 #these are one-indexed for some reason
recons_mom_bin_idxs_list[bin_idx] = np.where(recons_mom_bin_assignments==bin_num)[0]
#compute threshold giving fixed fpr per reconstructed energy bin
thresholds_per_event = np.ones_like(labels, dtype=float)
for bin_idx, bin_idxs in enumerate(recons_mom_bin_idxs_list):
if bin_idxs.shape[0] > 0:
fps, tps, thresholds = binary_clf_curve(labels[bin_idxs],scores[bin_idxs],
pos_label=index_dict['e'])
fns = tps[-1] - tps
tns = fps[-1] - fps
fprs = fps/(fps + tns)
operating_point_idx = (np.abs(fprs - fpr_fixed_point)).argmin()
thresholds_per_event[bin_idxs] = thresholds[operating_point_idx]
#bin by true momentum
ns,bins = np.histogram(true_momentum, bins=true_mom_bins, range=(200., np.max(true_momentum)) if metric=='mu fpr' else (0,1000))
bins = bins[0:-1]
true_mom_bin_assignments = np.digitize(true_momentum, bins)
true_mom_bin_idxs_list = [[]]*len(bins)
for bin_idx in range(len(bins)):
bin_num = bin_idx + 1 #these are one-indexed for some reason
true_mom_bin_idxs_list[bin_idx]=np.where(true_mom_bin_assignments==bin_num)[0]
#find metrics for each true momentum bin
bin_metrics=[]
for bin_idxs in true_mom_bin_idxs_list:
pred_pos_idxs = np.where(scores[bin_idxs] - thresholds_per_event[bin_idxs] > 0)[0]
pred_neg_idxs = np.where(scores[bin_idxs] - thresholds_per_event[bin_idxs] < 0)[0]
fp = np.where(labels[bin_idxs[pred_pos_idxs]] == index_dict['mu'] )[0].shape[0]
tp = np.where(labels[bin_idxs[pred_pos_idxs]] == index_dict['e'] )[0].shape[0]
fn = np.where(labels[bin_idxs[pred_neg_idxs]] == index_dict['e'] )[0].shape[0]
tn = np.where(labels[bin_idxs[pred_neg_idxs]] == index_dict['mu'] )[0].shape[0]
if metric=='efficiency':
bin_metrics.append(tp/(tp+fn))
else:
bin_metrics.append(fp/(fp + tn))
#plot metrics
bin_centers = [(bins[i+1] - bins[i])/2 + bins[i] for i in range(0,len(bins)-1)]
bin_centers.append((np.max(true_momentum) - bins[-1])/2 + bins[-1] if metric=='mu fpr' else (1000 - bins[-1])/2 + bins[-1])
metric_name = 'e- Signal Efficiency' if metric== 'efficiency' else '\u03BC- Mis-ID Rate'
title = '{} \n vs True Momentum At Reconstructed Momentum Bin \u03BC- Mis-ID Rate of {}%{}'.format(metric_name, fpr_fixed_point*100, title_note)
if ax is None:
fig = plt.figure(figsize=(12,6))
plt.errorbar(bin_centers,bin_metrics,yerr=np.zeros_like(bin_metrics),fmt=marker,color=color,ecolor='k',elinewidth=0.5,capsize=4,capthick=1,alpha=0.5, linewidth=2)
plt.ylabel(metric_name)
plt.xlabel("True Momentum (MeV/c)", fontsize=label_size)
if yrange is not None: plt.ylim(yrange)
plt.title(title)
else:
ax.errorbar(bin_centers[:50],bin_metrics[:50],yerr=np.zeros_like(bin_metrics[:50]),fmt=marker,color=color,ecolor='k',elinewidth=0.5,capsize=4,capthick=1,alpha=0.5, linewidth=2)
nax = ax.twinx()
# nax.bar(bin_centers,ns,fill=False,width=bins[3]-bins[2])
ax.set_ylabel(metric_name)
ax.set_xlabel("True Momentum (MeV/c)", fontsize=label_size)
if yrange is not None: ax.set_ylim(yrange)
ax.set_title(title)
return true_momentum, thresholds_per_event
def plot_response(softmaxes, labels, particle_names, index_dict,linestyle=None,bins=None,fig=None,axes=None,legend_locs=None,fitqun=False,
extra_panes=[], xlim=None,label_size=14, legend_label_dict=None):
'''
Plots classifier softmax outputs for each particle type.
Args:
softmaxes ... 2d array with first dimension n_samples
labels ... 1d array of particle labels to use in every output plot, or list of 4 lists of particle names to use in each respectively
particle_names ... list of string names of particle types to plot. All must be keys in 'index_dict'
index_dict ... dictionary of particle labels, with string particle name keys and values corresponsing to
values taken by 'labels'
bins ... optional, number of bins for histogram
fig, axes ... optional, figure and axes on which to do plotting (use to build into bigger grid)
legend_locs ... list of 4 strings for positioning the legends
fitqun ... designate if the given scores are from fitqun
xlim ... limit the x-axis
label_size ... font size
legend_label_dict ... dictionary of display symbols for each string label, to use for displaying pretty characters
author: <NAME>
June 2020
'''
if legend_label_dict is None:
legend_label_dict={}
for name in particle_names:
legend_label_dict[name] = name
legend_size=label_size
num_panes = softmaxes.shape[1]+len(extra_panes)
if axes is None:
fig,axes = plt.subplots(1,num_panes,figsize=(5*num_panes,5)) if not fitqun else plt.subplots(1,1,figsize=(7,7))
label_dict = {value:key for key, value in index_dict.items()}
softmaxes_list = separate_particles([softmaxes], labels, index_dict, [name for name in index_dict.keys()])[0]
if isinstance(particle_names[0],str):
particle_names = [particle_names for _ in range(num_panes)]
if fitqun:
ax = axes
density = False
for i in [index_dict[particle_name] for particle_name in particle_names[1]]:
_,bins,_ = ax.hist(softmaxes_list[i][:,1],
label=legend_label_dict[label_dict[i]],range=xlim,
alpha=0.7,histtype=u'step',bins=bins,density=density,
linestyle=linestyle[i],linewidth=2)
ax.legend(loc=legend_locs[0] if legend_locs is not None else 'best', fontsize=legend_size)
ax.set_xlabel('e-muon nLL Difference')
ax.set_ylabel('Normalized Density' if density else 'N Events', fontsize=label_size)
else:
for output_idx,ax in enumerate(axes[:softmaxes.shape[1]]):
for i in [index_dict[particle_name] for particle_name in particle_names[output_idx]]:
ax.hist(softmaxes_list[i][:,output_idx],
label=f"{legend_label_dict[label_dict[i]]} Events",
alpha=0.7,histtype=u'step',bins=bins,density=True,
linestyle=linestyle[i],linewidth=2)
ax.legend(loc=legend_locs[output_idx] if legend_locs is not None else 'best', fontsize=legend_size)
ax.set_xlabel('P({})'.format(legend_label_dict[label_dict[output_idx]]), fontsize=label_size)
ax.set_ylabel('Normalized Density', fontsize=label_size)
ax.set_yscale('log')
ax = axes[-1]
for n, extra_pane_particle_names in enumerate(extra_panes):
pane_idx = softmaxes.shape[1]+n
ax=axes[pane_idx]
for i in [index_dict[particle_name] for particle_name in particle_names[pane_idx]]:
ax.hist(reduce(lambda x,y : x+y, [softmaxes_list[i][:,index_dict[pname]] for pname in extra_pane_particle_names]),
label=legend_label_dict[particle_names[-1][i]],
alpha=0.7,histtype=u'step',bins=bins,density=True,
linestyle=linestyle[i],linewidth=2)
ax.legend(loc=legend_locs[-1] if legend_locs is not None else 'best', fontsize=legend_size)
ax.set_xlabel('P({}) + P({})'.format(legend_label_dict['gamma'],legend_label_dict['e']), fontsize=label_size)
ax.set_ylabel('Normalized Density', fontsize=label_size)
ax.set_yscale('log')
plt.tight_layout()
return fig
def rms(arr):
'''
Returns RMS value of the array.
Args:
arr ... 1d array of numbers
author: <NAME>
June 2020
'''
return math.sqrt(reduce(lambda a, x: a + x * x, arr, 0) / len(arr))
def plot_binned_response(softmaxes, labels, particle_names, binning_features, binning_label,efficiency, bins, p_bins, index_dict, extra_panes=None, log_scales=[], legend_label_dict=None, wrap_size=35):
'''
Plot softmax response, binned in a feature of the event.
Args:
softmaxes ... 2d array of softmax output, shape (nsamples, noutputs)
labels ... 1d array of labels, length n_samples
particle_names ... string particle names for which to plot the response, must be keys of index_dict
binning_features ... 1d array of feature to use in binning, length n_samples
binning_label ... string, name of binning feature to use in title and x-axis label
efficiency ... bin signal efficiency to fix
bins ... number of bins to use in feature histogram
p_bins ... number of bins to use in probability density histogram
index_dict ... dictionary of particle labels, must have all of particle_names as keys, pointing to values taken by 'labels'
extra_panes ... list of lists of particle names to combine into an "extra output" e.g. [["e", "gamma"]] adds the P(e-)+P(gamma) pane
log_scales ... indices of axes.flatten() to which to apply log color scaling
legend_label_dict ... dictionary of display symbols for each string label, to use for displaying pretty characters
wrap_size ... width of box to wrap title into
author: <NAME>
June 2020
'''
if legend_label_dict is None:
legend_label_dict = {}
for | |
"DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 21, 24+8, "HT.Y", ["FLZ050","FLZ151"]),
("Fcst", "Hazards", "DISCRETE", 24+8, 24+17, "EH.W", ["FLZ050","FLZ151"]),
("Fcst", "Hazards", "DISCRETE", 24+17, 48+9, "HT.Y", ["FLZ050","FLZ151"]),
],
"checkStrings": [
"WWUS72 KTBW 062104",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"404 PM EST Mon Dec 6 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-151-070515-",
"/O.NEW.KTBW.EH.W.0001.101207T0800Z-101207T1700Z/",
"/O.NEW.KTBW.HT.Y.0003.101207T1700Z-101208T0900Z/",
"/O.EXT.KTBW.HT.Y.0002.000000T0000Z-101207T0800Z/",
"Pinellas-Coastal Hillsborough-",
"404 PM EST Mon Dec 6 2010",
"...HEAT ADVISORY NOW IN EFFECT UNTIL 3 AM EST TUESDAY...",
"...EXCESSIVE HEAT WARNING IN EFFECT FROM 3 AM TO NOON EST TUESDAY...",
"...HEAT ADVISORY IN EFFECT FROM NOON TUESDAY TO 4 AM EST WEDNESDAY...",
# "The National Weather Service in Tampa Bay Ruskin has issued an Excessive Heat Warning, which is in effect from 3 AM to noon EST Tuesday. A Heat Advisory HAS ALSO BEEN ISSUED. THIS Heat Advisory is in effect from noon Tuesday to 4 AM EST Wednesday. The Heat Advisory is now in effect until 3 AM EST Tuesday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Heat Advisory means that a period of hot temperatures is expected. The combination of hot temperatures and high humidity will combine to create a situation in which heat illnesses are possible. Drink plenty of fluids, stay in an air-conditioned room, stay out of the sun, and check up on relatives and neighbors.",
"An Excessive Heat Warning means that a prolonged period of dangerously hot temperatures will occur. The combination of hot temperatures and high humidity will combine to create a DANGEROUS SITUATION in which heat illnesses are likely. Drink plenty of fluids, stay in an air-conditioned room, stay out of the sun, and check up on relatives and neighbors.",
"$$",
],
},
{
"commentary": "Step4-delete first HT.Y, leaving EH.W and HT.Y",
"name": "ETNReuse_2e",
"drtTime": "20101206_2107",
"gridsStartTime": "20101206_0000",
"cmdLineVars": None,
"comboFlag": 0,
"combinations": None,
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24+8, 24+17, "EH.W", ["FLZ050","FLZ151"]),
("Fcst", "Hazards", "DISCRETE", 24+17, 48+9, "HT.Y", ["FLZ050","FLZ151"]),
],
"checkStrings": [
"WWUS72 KTBW 062107",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"407 PM EST Mon Dec 6 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-151-070515-",
"/O.CAN.KTBW.HT.Y.0002.000000T0000Z-101207T0800Z/",
"/O.CON.KTBW.EH.W.0001.101207T0800Z-101207T1700Z/",
"/O.CON.KTBW.HT.Y.0003.101207T1700Z-101208T0900Z/",
"Pinellas-Coastal Hillsborough-",
"407 PM EST Mon Dec 6 2010",
"...EXCESSIVE HEAT WARNING REMAINS IN EFFECT FROM 3 AM TO NOON EST TUESDAY...",
"...HEAT ADVISORY REMAINS IN EFFECT FROM NOON TUESDAY TO 4 AM EST WEDNESDAY...",
"...HEAT ADVISORY IS CANCELLED...",
# "The National Weather Service in Tampa Bay Ruskin has cancelled the Heat Advisory. An Excessive Heat Warning remains in effect from 3 AM to noon EST Tuesday. A Heat Advisory remains in effect from noon Tuesday to 4 AM EST Wednesday. ",
# "|*|* SEGMENT TEXT GOES HERE *|.*|",
"An Excessive Heat Warning means that a prolonged period of dangerously hot temperatures will occur. The combination of hot temperatures and high humidity will combine to create a DANGEROUS SITUATION in which heat illnesses are likely. Drink plenty of fluids, stay in an air-conditioned room, stay out of the sun, and check up on relatives and neighbors.",
"A Heat Advisory means that a period of hot temperatures is expected. The combination of hot temperatures and high humidity will combine to create a situation in which heat illnesses are possible. Drink plenty of fluids, stay in an air-conditioned room, stay out of the sun, and check up on relatives and neighbors.",
"$$",
],
},
{
"commentary": "Step5-create HT.Y again in 1st slot",
"name": "ETNReuse_2f",
"drtTime": "20101206_2113",
"gridsStartTime": "20101206_0000",
"cmdLineVars": None,
"comboFlag": 0,
"combinations": None,
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 23, 24+8, "HT.Y", ["FLZ050","FLZ151"]),
("Fcst", "Hazards", "DISCRETE", 24+8, 24+17, "EH.W", ["FLZ050","FLZ151"]),
("Fcst", "Hazards", "DISCRETE", 24+17, 48+9, "HT.Y", ["FLZ050","FLZ151"]),
],
"checkStrings": [
"WWUS72 KTBW 062113",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"413 PM EST Mon Dec 6 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-151-070515-",
"/O.NEW.KTBW.HT.Y.0004.101206T2300Z-101207T0800Z/",
"/O.CON.KTBW.EH.W.0001.101207T0800Z-101207T1700Z/",
"/O.CON.KTBW.HT.Y.0003.101207T1700Z-101208T0900Z/",
"Pinellas-Coastal Hillsborough-",
"413 PM EST Mon Dec 6 2010",
"...HEAT ADVISORY IN EFFECT UNTIL 3 AM EST TUESDAY...",
"...EXCESSIVE HEAT WARNING REMAINS IN EFFECT FROM 3 AM TO NOON EST TUESDAY...",
"...HEAT ADVISORY REMAINS IN EFFECT FROM NOON TUESDAY TO 4 AM EST WEDNESDAY...",
# "The National Weather Service in Tampa Bay Ruskin has issued a Heat Advisory, which is in effect until 3 AM EST Tuesday. An Excessive Heat Warning remains in effect from 3 AM to noon EST Tuesday. A Heat Advisory remains in effect from noon Tuesday to 4 AM EST Wednesday. ",
# "|*|* SEGMENT TEXT GOES HERE *|.*|",
"An Excessive Heat Warning means that a prolonged period of dangerously hot temperatures will occur. The combination of hot temperatures and high humidity will combine to create a DANGEROUS SITUATION in which heat illnesses are likely. Drink plenty of fluids, stay in an air-conditioned room, stay out of the sun, and check up on relatives and neighbors.",
"A Heat Advisory means that a period of hot temperatures is expected. The combination of hot temperatures and high humidity will combine to create a situation in which heat illnesses are possible. Drink plenty of fluids, stay in an air-conditioned room, stay out of the sun, and check up on relatives and neighbors.",
"$$",
],
},
{
"commentary": "Step6-remove first HT.Y",
"name": "ETNReuse_2g",
"drtTime": "20101206_2115",
"gridsStartTime": "20101206_0000",
"cmdLineVars": None,
"comboFlag": 0,
"combinations": None,
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24+8, 24+17, "EH.W", ["FLZ050","FLZ151"]),
("Fcst", "Hazards", "DISCRETE", 24+17, 48+9, "HT.Y", ["FLZ050","FLZ151"]),
],
"checkStrings": [
"WWUS72 KTBW 062115",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"415 PM EST Mon Dec 6 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-151-070515-",
"/O.CAN.KTBW.HT.Y.0004.101206T2300Z-101207T0800Z/",
"/O.CON.KTBW.EH.W.0001.101207T0800Z-101207T1700Z/",
"/O.CON.KTBW.HT.Y.0003.101207T1700Z-101208T0900Z/",
"Pinellas-Coastal Hillsborough-",
"415 PM EST Mon Dec 6 2010",
"...EXCESSIVE HEAT WARNING REMAINS IN EFFECT FROM 3 AM TO NOON EST TUESDAY...",
"...HEAT ADVISORY REMAINS IN EFFECT FROM NOON TUESDAY TO 4 AM EST WEDNESDAY...",
"...HEAT ADVISORY IS CANCELLED...",
# "The National Weather Service in Tampa Bay Ruskin has cancelled the Heat Advisory. An Excessive Heat Warning remains in effect from 3 AM to noon EST Tuesday. A Heat Advisory remains in effect from noon Tuesday to 4 AM EST Wednesday. ",
# "|*|*|* SEGMENT TEXT GOES HERE *|.*|*|",
"An Excessive Heat Warning means that a prolonged period of dangerously hot temperatures will occur. The combination of hot temperatures and high humidity will combine to create a DANGEROUS SITUATION in which heat illnesses are likely. Drink plenty of fluids, stay in an air-conditioned room, stay out of the sun, and check up on relatives and neighbors.",
"A Heat Advisory means that a period of hot temperatures is expected. The combination of hot temperatures and high humidity will combine to create a situation in which heat illnesses are possible. Drink plenty of fluids, stay in an air-conditioned room, stay out of the sun, and check up on relatives and neighbors.",
"$$",
],
},
{
"commentary": "Step7-put back in part of HT.Y in 1st slot",
"name": "ETNReuse_2h",
"drtTime": "20101206_2134",
"gridsStartTime": "20101206_0000",
"cmdLineVars": None,
"comboFlag": 0,
"combinations": None,
"productType": "Hazard_NPW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 21, 24+1, "HT.Y", ["FLZ050","FLZ151"]),
("Fcst", "Hazards", "DISCRETE", 24+8, 24+17, "EH.W", ["FLZ050","FLZ151"]),
("Fcst", "Hazards", "DISCRETE", 24+17, 48+9, "HT.Y", ["FLZ050","FLZ151"]),
],
"checkStrings": [
"WWUS72 KTBW 062134",
"NPWTBW",
"URGENT - WEATHER MESSAGE",
"National Weather Service Tampa Bay Ruskin FL",
"434 PM EST Mon Dec 6 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-151-070545-",
"/O.NEW.KTBW.HT.Y.0005.101206T2134Z-101207T0100Z/",
"/O.CON.KTBW.EH.W.0001.101207T0800Z-101207T1700Z/",
"/O.CON.KTBW.HT.Y.0003.101207T1700Z-101208T0900Z/",
"Pinellas-Coastal Hillsborough-",
"434 PM EST Mon Dec 6 2010",
"...HEAT ADVISORY IN EFFECT UNTIL 8 PM EST THIS EVENING...",
"...EXCESSIVE HEAT WARNING REMAINS IN EFFECT FROM 3 AM TO NOON EST TUESDAY...",
"...HEAT ADVISORY REMAINS IN EFFECT FROM NOON TUESDAY TO 4 AM EST WEDNESDAY...",
# | |
(1,2,3), Nic))
# Wrong type, s/b tuple of 625 ints
self.assertRaises(TypeError, self.gen.setstate, (2, ('a',)*625, Nic))
# Last element s/b an int also
self.assertRaises(TypeError, self.gen.setstate, (2, (0,)*624+('a',), Nic))
# Last element s/b between 0 oraz 624
przy self.assertRaises((ValueError, OverflowError)):
self.gen.setstate((2, (1,)*624+(625,), Nic))
przy self.assertRaises((ValueError, OverflowError)):
self.gen.setstate((2, (1,)*624+(-1,), Nic))
# Little trick to make "tuple(x % (2**32) dla x w internalstate)"
# podnieś ValueError. I cannot think of a simple way to achieve this, so
# I am opting dla using a generator jako the middle argument of setstate
# which attempts to cast a NaN to integer.
state_values = self.gen.getstate()[1]
state_values = list(state_values)
state_values[-1] = float('nan')
state = (int(x) dla x w state_values)
self.assertRaises(TypeError, self.gen.setstate, (2, state, Nic))
def test_referenceImplementation(self):
# Compare the python implementation przy results z the original
# code. Create 2000 53-bit precision random floats. Compare only
# the last ten entries to show that the independent implementations
# are tracking. Here jest the main() function needed to create the
# list of expected random numbers:
# void main(void){
# int i;
# unsigned long init[4]={61731, 24903, 614, 42143}, length=4;
# init_by_array(init, length);
# dla (i=0; i<2000; i++) {
# printf("%.15f ", genrand_res53());
# jeżeli (i%5==4) printf("\n");
# }
# }
expected = [0.45839803073713259,
0.86057815201978782,
0.92848331726782152,
0.35932681119782461,
0.081823493762449573,
0.14332226470169329,
0.084297823823520024,
0.53814864671831453,
0.089215024911993401,
0.78486196105372907]
self.gen.seed(61731 + (24903<<32) + (614<<64) + (42143<<96))
actual = self.randomlist(2000)[-10:]
dla a, e w zip(actual, expected):
self.assertAlmostEqual(a,e,places=14)
def test_strong_reference_implementation(self):
# Like test_referenceImplementation, but checks dla exact bit-level
# equality. This should dalej on any box where C double contains
# at least 53 bits of precision (the underlying algorithm suffers
# no rounding errors -- all results are exact).
z math zaimportuj ldexp
expected = [0x0eab3258d2231f,
0x1b89db315277a5,
0x1db622a5518016,
0x0b7f9af0d575bf,
0x029e4c4db82240,
0x04961892f5d673,
0x02b291598e4589,
0x11388382c15694,
0x02dad977c9e1fe,
0x191d96d4d334c6]
self.gen.seed(61731 + (24903<<32) + (614<<64) + (42143<<96))
actual = self.randomlist(2000)[-10:]
dla a, e w zip(actual, expected):
self.assertEqual(int(ldexp(a, 53)), e)
def test_long_seed(self):
# This jest most interesting to run w debug mode, just to make sure
# nothing blows up. Under the covers, a dynamically resized array
# jest allocated, consuming space proportional to the number of bits
# w the seed. Unfortunately, that's a quadratic-time algorithm,
# so don't make this horribly big.
seed = (1 << (10000 * 8)) - 1 # about 10K bytes
self.gen.seed(seed)
def test_53_bits_per_float(self):
# This should dalej whenever a C double has 53 bit precision.
span = 2 ** 53
cum = 0
dla i w range(100):
cum |= int(self.gen.random() * span)
self.assertEqual(cum, span-1)
def test_bigrand(self):
# The randrange routine should build-up the required number of bits
# w stages so that all bit positions are active.
span = 2 ** 500
cum = 0
dla i w range(100):
r = self.gen.randrange(span)
self.assertPrawda(0 <= r < span)
cum |= r
self.assertEqual(cum, span-1)
def test_bigrand_ranges(self):
dla i w [40,80, 160, 200, 211, 250, 375, 512, 550]:
start = self.gen.randrange(2 ** (i-2))
stop = self.gen.randrange(2 ** i)
jeżeli stop <= start:
kontynuuj
self.assertPrawda(start <= self.gen.randrange(start, stop) < stop)
def test_rangelimits(self):
dla start, stop w [(-2,0), (-(2**60)-2,-(2**60)), (2**60,2**60+2)]:
self.assertEqual(set(range(start,stop)),
set([self.gen.randrange(start,stop) dla i w range(100)]))
def test_genrandbits(self):
# Verify cross-platform repeatability
self.gen.seed(1234567)
self.assertEqual(self.gen.getrandbits(100),
97904845777343510404718956115)
# Verify ranges
dla k w range(1, 1000):
self.assertPrawda(0 <= self.gen.getrandbits(k) < 2**k)
# Verify all bits active
getbits = self.gen.getrandbits
dla span w [1, 2, 3, 4, 31, 32, 32, 52, 53, 54, 119, 127, 128, 129]:
cum = 0
dla i w range(100):
cum |= getbits(span)
self.assertEqual(cum, 2**span-1)
# Verify argument checking
self.assertRaises(TypeError, self.gen.getrandbits)
self.assertRaises(TypeError, self.gen.getrandbits, 'a')
self.assertRaises(TypeError, self.gen.getrandbits, 1, 2)
self.assertRaises(ValueError, self.gen.getrandbits, 0)
self.assertRaises(ValueError, self.gen.getrandbits, -1)
def test_randbelow_logic(self, _log=log, int=int):
# check bitcount transition points: 2**i oraz 2**(i+1)-1
# show that: k = int(1.001 + _log(n, 2))
# jest equal to albo one greater than the number of bits w n
dla i w range(1, 1000):
n = 1 << i # check an exact power of two
numbits = i+1
k = int(1.00001 + _log(n, 2))
self.assertEqual(k, numbits)
self.assertEqual(n, 2**(k-1))
n += n - 1 # check 1 below the next power of two
k = int(1.00001 + _log(n, 2))
self.assertIn(k, [numbits, numbits+1])
self.assertPrawda(2**k > n > 2**(k-2))
n -= n >> 15 # check a little farther below the next power of two
k = int(1.00001 + _log(n, 2))
self.assertEqual(k, numbits) # note the stronger assertion
self.assertPrawda(2**k > n > 2**(k-1)) # note the stronger assertion
@unittest.mock.patch('random.Random.random')
def test_randbelow_overriden_random(self, random_mock):
# Random._randbelow() can only use random() when the built-in one
# has been overridden but no new getrandbits() method was supplied.
random_mock.side_effect = random.SystemRandom().random
maxsize = 1<<random.BPF
przy warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
# Population range too large (n >= maxsize)
self.gen._randbelow(maxsize+1, maxsize = maxsize)
self.gen._randbelow(5640, maxsize = maxsize)
# This might be going too far to test a single line, but because of our
# noble aim of achieving 100% test coverage we need to write a case w
# which the following line w Random._randbelow() gets executed:
#
# rem = maxsize % n
# limit = (maxsize - rem) / maxsize
# r = random()
# dopóki r >= limit:
# r = random() # <== *This line* <==<
#
# Therefore, to guarantee that the dopóki loop jest executed at least
# once, we need to mock random() so that it returns a number greater
# than 'limit' the first time it gets called.
n = 42
epsilon = 0.01
limit = (maxsize - (maxsize % n)) / maxsize
random_mock.side_effect = [limit + epsilon, limit - epsilon]
self.gen._randbelow(n, maxsize = maxsize)
def test_randrange_bug_1590891(self):
start = 1000000000000
stop = -100000000000000000000
step = -200
x = self.gen.randrange(start, stop, step)
self.assertPrawda(stop < x <= start)
self.assertEqual((x+stop)%step, 0)
def gamma(z, sqrt2pi=(2.0*pi)**0.5):
# Reflection to right half of complex plane
jeżeli z < 0.5:
zwróć pi / sin(pi*z) / gamma(1.0-z)
# Lanczos approximation przy g=7
az = z + (7.0 - 0.5)
zwróć az ** (z-0.5) / exp(az) * sqrt2pi * fsum([
0.9999999999995183,
676.5203681218835 / z,
-1259.139216722289 / (z+1.0),
771.3234287757674 / (z+2.0),
-176.6150291498386 / (z+3.0),
12.50734324009056 / (z+4.0),
-0.1385710331296526 / (z+5.0),
0.9934937113930748e-05 / (z+6.0),
0.1659470187408462e-06 / (z+7.0),
])
klasa TestDistributions(unittest.TestCase):
def test_zeroinputs(self):
# Verify that distributions can handle a series of zero inputs'
g = random.Random()
x = [g.random() dla i w range(50)] + [0.0]*5
g.random = x[:].pop; g.uniform(1,10)
g.random = x[:].pop; g.paretovariate(1.0)
g.random = x[:].pop; g.expovariate(1.0)
g.random = x[:].pop; g.weibullvariate(1.0, 1.0)
g.random = x[:].pop; g.vonmisesvariate(1.0, 1.0)
g.random = x[:].pop; g.normalvariate(0.0, 1.0)
g.random = x[:].pop; g.gauss(0.0, 1.0)
g.random = x[:].pop; g.lognormvariate(0.0, 1.0)
g.random = x[:].pop; g.vonmisesvariate(0.0, 1.0)
g.random = x[:].pop; g.gammavariate(0.01, 1.0)
g.random = x[:].pop; g.gammavariate(1.0, 1.0)
g.random = x[:].pop; g.gammavariate(200.0, 1.0)
g.random = x[:].pop; g.betavariate(3.0, 3.0)
g.random = x[:].pop; g.triangular(0.0, 1.0, 1.0/3.0)
def test_avg_std(self):
# Use integration to test distribution average oraz standard deviation.
# Only works dla distributions which do nie consume variates w pairs
g = random.Random()
N = 5000
x = [i/float(N) dla i w range(1,N)]
dla variate, args, mu, sigmasqrd w [
(g.uniform, (1.0,10.0), (10.0+1.0)/2, (10.0-1.0)**2/12),
(g.triangular, (0.0, 1.0, 1.0/3.0), 4.0/9.0, 7.0/9.0/18.0),
(g.expovariate, (1.5,), 1/1.5, 1/1.5**2),
(g.vonmisesvariate, (1.23, 0), pi, pi**2/3),
(g.paretovariate, (5.0,), 5.0/(5.0-1),
5.0/((5.0-1)**2*(5.0-2))),
(g.weibullvariate, (1.0, 3.0), gamma(1+1/3.0),
gamma(1+2/3.0)-gamma(1+1/3.0)**2) ]:
g.random = x[:].pop
y = []
dla i w range(len(x)):
spróbuj:
y.append(variate(*args))
wyjąwszy IndexError:
dalej
s1 = s2 = 0
dla e w y:
s1 += e
s2 += (e - mu) ** 2
N = len(y)
self.assertAlmostEqual(s1/N, mu, places=2,
msg='%s%r' % (variate.__name__, args))
self.assertAlmostEqual(s2/(N-1), sigmasqrd, places=2,
msg='%s%r' % (variate.__name__, args))
def test_constant(self):
g = random.Random()
N = 100
dla variate, args, expected w [
(g.uniform, (10.0, 10.0), 10.0),
(g.triangular, (10.0, 10.0), 10.0),
(g.triangular, (10.0, 10.0, 10.0), 10.0),
(g.expovariate, (float('inf'),), 0.0),
(g.vonmisesvariate, (3.0, float('inf')), 3.0),
(g.gauss, (10.0, 0.0), 10.0),
(g.lognormvariate, (0.0, 0.0), 1.0),
(g.lognormvariate, (-float('inf'), 0.0), 0.0),
(g.normalvariate, (10.0, 0.0), 10.0),
(g.paretovariate, (float('inf'),), 1.0),
(g.weibullvariate, (10.0, float('inf')), 10.0),
(g.weibullvariate, (0.0, 10.0), 0.0),
]:
dla i w range(N):
self.assertEqual(variate(*args), | |
Add legend
if legend:
h = mpl.patches.Patch(facecolor='skyblue')
x = mpl.patches.Patch(facecolor='g', alpha=0.0)
dx = mpl.patches.Patch(facecolor='g', alpha=0.0)
tri = mpl.patches.Patch(facecolor='white', alpha=0.0)
ax.legend(
[h, x, dx, tri],
['Histogram D(NNI)', 'D(X): %i' % D.max(), 'X: %.3f' % bins[np.argmax(D)],
'TriIndex: %.3f' % tri_index],
loc=0
)
# Show plot
if show:
plt.show()
# Output
args = (fig, tri_index,)
names = ('tri_histogram', 'tri_index',)
# If histogram should not be plotted
else:
D, bins = _get_histogram(nn, figsize=figsize, binsize=binsize, legend=legend, plot=plot)
# Compute Triangular index: number of nn intervals / maximum value of the distribution
tri_index = nn.size / D.max()
# Output
args = (tri_index, )
names = ('tri_index', )
return utils.ReturnTuple(args, names)
def _get_histogram(nn=None, plot=True, figsize=None, binsize=None, legend=True):
"""Prepares NNI histogram data for all geometrical functions.
Parameters
----------
nn : array
NN intervals in [ms] or [s].
plot : bool
If True, creates histogram plot using matplotlib, else uses numpy (data only, no plot).
figsize : array, optional
Matplotlib figure size (width, height) (default: (6, 6)).
binsize : int, float
Bin size of the histogram bins.
legend : bool
If True, highlights D(X) marker to the plot to be added to the legends (default=True).
Returns
-------
fig : matplotlib figure object
Figure of the histogram plot (only if input parameter 'plot' is True).
vals : array
Histogram distribution values.
bins : array
Histogram bins.
Raises
------
TypeError
If no input data provided for 'nn'.
TypeError
If no input data provided for 'binsize'.
Notes
-----
.. 'figsize' has only effect if 'plot' is also True.
.. 'legend' has only effect if 'plot' is also True.
"""
# Check input data & confirm numpy
if nn is None:
raise TypeError("No input data provided for 'nn'.")
else:
nn = np.asarray(nn)
if binsize is None:
raise TypeError("No input data provided for 'binsize'")
# Create bins array
bins = np.arange(0, np.max(nn) + binsize, binsize)
# Get histogram plot and data
if plot:
# Check figsize
if figsize is None:
figsize = (6, 6)
# Prepare plot figure
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
vals, bins, patches = ax.hist(nn, bins, density=False, align='left', facecolor='skyblue', edgecolor='black')
bins = bins[:-1]
# Highlight bin of the histograms maximum value with a different color and prepare legend
if legend:
ax.vlines(bins[np.argmax(vals)], 0, (vals.max() * 1.1),
linestyles='--', color='g', linewidth=0.6)
pos = (bins[np.argmax(vals)], vals.max() * 1.11)
ax.annotate('D(X)', xy=pos, xytext=pos, ha='center', color='g')
# Configure figure and plot
ax.axis([nn.min() - (3 * binsize), nn.max() + (3 * binsize), 0, vals.max() * 1.15])
ax.set_xlabel('NNI Bins [ms]')
ax.set_ylabel('D(NNI) [-]')
ax.set_title('NNI Histogram')
return fig, ax, vals, bins
else:
vals, bins = np.histogram(nn, bins, density=False)
return vals, bins[:-1]
def geometrical_parameters(nni=None, rpeaks=None, binsize=7.815, plot=True, show=True, figsize=None, legend=True):
"""Creates NNI histogram with specified binsize (default: 7.815ms) and computes geometrical parameters (triangular
index, TINN, N, and M).
References: [Electrophysiology1996]
Docs: https://pyhrv.readthedocs.io/en/latest/_pages/api/time.html#geometrical-parameters-function-geometrical-parameters
Parameters
----------
nni : array
NN intervals in [ms] or [s].
rpeaks : array
R-peak times in [ms] or [s].
binsize : int, float
Bin size of the histogram bins (default: 7.8125ms).
plot : bool
If True, creates histogram plot using matplotlib, else uses numpy (data only, no plot).
show : bool, optional
If true, shows histogram (default: True).
figsize : array, optional
Matplotlib figure size (width, height) (default: (6, 6)).
legend : bool, optional
If True, adds legend to the histogram (default: True).
Returns (biosppy.utils.ReturnTuple Object)
------------------------------------------
[key : format]
Description.
nni_histogram : matplotlib figure object
Histogram figure (only if input parameter 'plot' is True).
tri_index : float
Triangular index.
tinn_n : float
N value of the TINN computation.
tinn_m : float
M value of the TINN computation.
tinn : float
TINN value.
Raises
------
TypeError (via 'check_input()')
If no input data for 'rpeaks' or 'nni' provided.
Notes
-----
.. Default bin size set to recommended bin size of 1/128 (with 128Hz being the minimum recommended sampling
frequency) as recommended by the HRV guidelines.
.. 'show' has only effect if 'plot' is also True.
.. 'legend' has only effect if 'plot' is also True.
.. 'figsize' has only effect if 'plot' is also True.
"""
# Check input
nn = tools.check_input(nni, rpeaks)
# Get Histogram data & plot (optional)
if plot:
fig, ax, D, bins = _get_histogram(nn, figsize=figsize, binsize=binsize, legend=legend, plot=plot)
else:
fig = None
# Get TINN values without plot figure
tinn_vals = tinn(nni=nn, rpeaks=rpeaks, binsize=binsize, show=False, legend=False, figsize=figsize, plot=False)
# Get triangular index without plot figure
trindex = triangular_index(nni=nn, rpeaks=rpeaks, binsize=binsize, show=False, legend=False, plot=False)['tri_index']
# Histogram plot & settings
if plot:
# Plot triangular interpolation
N, M = tinn_vals['tinn_n'], tinn_vals['tinn_m']
ax.plot([N, bins[np.argmax(D)]], [0, D.max()], 'r--', linewidth=0.8)
ax.plot([bins[np.argmax(D)], M], [D.max(), 0], 'r--', linewidth=0.8)
# Add Legend
if legend:
l1 = mpl.patches.Patch(facecolor='skyblue', label='Histogram D(NNI)')
l2 = mpl.lines.Line2D([0, 0], [0, 0], linestyle='--', linewidth=0.8, color='r', label='Tri. Interpol.')
l3 = mpl.patches.Patch(facecolor='g', alpha=0.0, label='D(X): %i' % D.max())
l4 = mpl.patches.Patch(facecolor='g', alpha=0.0, label='X: %.3f$ms$' % bins[np.argmax(D)])
l5 = mpl.patches.Patch(facecolor='white', alpha=0.0, label='N: %.3f$ms$' % tinn_vals['tinn_n'])
l6 = mpl.patches.Patch(facecolor='white', alpha=0.0, label='M: %.3fms' % tinn_vals['tinn_m'])
l7 = mpl.patches.Patch(facecolor='white', alpha=0.0, label='TINN: %.3fms' % tinn_vals['tinn'])
l8 = mpl.patches.Patch(facecolor='white', alpha=0.0, label='Tri. Index: %.3f' % trindex)
ax.legend(handles=[l1, l2, l3, l4, l5, l6, l7, l8], loc=0, ncol=1)
# Show plot
if show:
plt.show()
# Output
args = (fig, tinn_vals['tinn_n'], tinn_vals['tinn_m'], tinn_vals['tinn'], trindex)
names = ('nni_histogram', 'tinn_n', 'tinn_m', 'tinn', 'tri_index')
return utils.ReturnTuple(args, names)
def time_domain(nni=None,
rpeaks=None,
signal=None,
sampling_rate=1000.,
threshold=None,
plot=True,
show=False,
binsize=7.8125):
"""Computes all time domain parameters of the HRV time domain module and returns them in a ReturnTuple object.
Parameters
----------
nni : array
NN intervals in [ms] or [s].
rpeaks : array
R-peak times in [ms] or [s].
signal : array
ECG signal.
sampling_rate : int, float, optional
Sampling rate used for the ECG acquisition in [Hz] (default: 1000.).
threshold : int, optional
Custom threshold in [ms] for the NNXX and pNNXX parameters (default: None).
plot : bool
If True, creates histogram plot using matplotlib, else uses numpy (data only, no plot) - (geometrical params).
figsize : array, optional
Matplotlib figure size for the histogram (width, height) (default: (6, 6)) - (geometrical params).
binsize : int, float
Bin size in [ms] of the histogram bins - (geometrical params).
legend : bool
If True, highlights D(X) marker to the plot to be added to the legends (default=True) - (geometrical params).
Returns
-------
results : biosppy.utils.ReturnTuple object
All time domain results (see list and keys below)
Returned Parameters
-------------------
.. NNI parameters (# of NNI, mean, min, max) in [count] and [ms] (keys: 'nni_counter', 'nni_mean', 'nni_min',
'nni_max')
.. NNI differences (mean, min, max, standard deviation) in [ms] (keys: 'nni_diff_mean', 'nni_diff_min',
'nn_diff_max')
.. HR parameters (mean, min, max, standard deviation) in [BPM] (keys: 'hr_mean', 'hr_min', 'hr_max', 'hr_std')
.. SDNN in [ms] (key: 'sdnn')
.. SDNN index in [ms] (key: 'sdnn_index')
.. SDANN in [ms] (key: 'sdann')
.. RMSSD in [ms] (key: 'rmssd')
.. SDSD in [ms] (key: 'sdsd')
.. nn50 in [count] & pNN50 in [%] (keys: 'nn50', 'pnn50')
.. nn20 in [count] & pNN20 in [%] (keys: 'nn20', 'pnn20')
.. nnXX (XX = custom threshold) if specified (keys: 'nnXX', 'pnnXX')
.. Triangular Index [-] (key: 'tri_index')
.. TINN in [ms] (key: 'tinn', 'tinn_n', 'tinn_m')
.. NNI histogram (key: 'nni_histogram')
Notes
-----
.. Results are stored in a biosppy.utils.ReturnTuple object and need to be accessed with the respective keys as
done with dictionaries (see list of parameters and keys above).
.. Only one type of input data is required (signal, nni, or rpeaks).
.. Input data will be prioritized in the following order: 1. signal, 2. nni, 3. rpeaks.
.. SDNN Index and SDANN: In some cases, the NN interval may start in a segment (or
.. Default bin size set to recommended bin size of 1/128 (with 128Hz being the minimum recommended sampling
frequency) as recommended by the HRV guidelines.
.. 'show' has only effect if 'plot' is also True.
.. 'legend' has only effect if 'plot' is also True.
.. 'figsize' has only effect if 'plot' is also True.
Raises
------
TypeError
If no input data for 'nni', 'rpeaks', and 'signal' provided.
"""
# Check input
if signal is not None:
rpeaks = ecg(signal=signal, sampling_rate=sampling_rate, show=False)[2]
elif nni is None and rpeaks is None:
raise TypeError('No input data provided. Please specify input data.')
# Get NNI series
nn = tools.check_input(nni, rpeaks)
# Call time domain functions & wrap results in a single biosspy.utils.ReturnTuple object
results = nni_parameters(nn)
results = tools.join_tuples(results, hr_parameters(nn))
results = tools.join_tuples(results, nni_differences_parameters(nn))
results = tools.join_tuples(results, sdnn(nn))
results = tools.join_tuples(results, sdnn_index(nn))
results = tools.join_tuples(results, sdann(nn))
results = tools.join_tuples(results, rmssd(nn))
results = tools.join_tuples(results, sdsd(nn))
results = tools.join_tuples(results, nn50(nn))
results = tools.join_tuples(results, nn20(nn))
# Compute custom threshold if required
if threshold is not None:
results = tools.join_tuples(results, nnXX(nn, threshold=int(threshold)))
# Compute geometrical parameters
results = tools.join_tuples(results, geometrical_parameters(nn, plot=plot, show=show, binsize=binsize))
# Output
return results
if __name__ == "__main__":
"""
Example Script - HRV Time Domain Analysis
"""
# Load sample NNI series
nni = np.load('./files/SampleNNISeries.npy')
# Time Domain results
print("=========================")
print("TIME DOMAIN Results")
print("=========================")
hr_ = hr_parameters(nni)
print("HR Results")
print("> Mean HR: %f [bpm]" % hr_['hr_mean'])
print("> Min HR: %f [bpm]" % hr_['hr_min'])
print("> Max HR: %f [bpm]" % hr_['hr_max'])
print("> Std. Dev. HR: %f [bpm]" % hr_['hr_std'])
nni_para_ = nni_parameters(nni)
print("NN Results")
print("> Mean NN: %f [ms]" % nni_para_['nni_mean'])
print("> Min NN: %f [ms]" % nni_para_['nni_min'])
print("> Max NN: %f [ms]" % nni_para_['nni_max'])
nni_diff_ = nni_differences_parameters(nni)
print("∆NN Results")
print("> Mean ∆NN: %f [ms]" % nni_diff_['nni_diff_mean'])
print("> Min ∆NN: %f [ms]" % nni_diff_['nni_diff_min'])
print("> Max ∆NN: %f [ms]" % nni_diff_['nni_diff_max'])
print("SDNN: %f [ms]" % sdnn(nni)['sdnn'])
print("SDNN Index: %f [ms]" % sdnn_index(nni)['sdnn_index'])
print("SDANN: %f [ms]" % sdann(nni)['sdann'])
print("RMMSD: %f [ms]" % rmssd(nni)['rmssd'])
print("SDSD: %f [ms]" % sdsd(nni)['sdsd'])
print("NN50: %i [-]" % nn50(nni)['nn50'])
print("pNN50: %f [%%]" % nn50(nni)['pnn50'])
print("NN20: %i [-]" % nn20(nni)['nn20'])
print("pNN20: %f [%%]" % nn20(nni)['pnn20'])
# Compute geometrical parameters (without plot)
print("=== Geometrical Parameters")
geo = geometrical_parameters(nni, plot=True, show=True)
print("Triangular Index: %f [-]" % geo['tri_index'])
print("TINN: %f [ms]" % geo['tinn'])
print("> N: %f [ms]" % geo['tinn_n'])
print("> M: %f [ms]" % geo['tinn_m'])
# Alternatively use the individual geometrical parameter functions
geo = triangular_index(nni, plot=False)
geo = tinn(nni, plot=False)
# | |
msg="Domain %s succesfully refreshed !" % module.params['domain'])
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
if module.params['domain'] and module.params['ip']:
if module.check_mode:
module.exit_json(changed=True, msg="DNS succesfully %s on %s - (dry run mode)" % (module.params['state'], module.params['name']))
try:
check = ovhclient.get('/domain/zone/%s/record' % module.params['domain'],
fieldType=u'A',
subDomain=module.params['name'])
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
if module.params['state'] == 'present':
if not check:
try:
result = ovhclient.post('/domain/zone/%s/record' % module.params['domain'],
fieldType=u'A',
subDomain=module.params['name'],
target=module.params['ip'])
module.exit_json(changed=True, contents=result)
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
else:
module.exit_json(changed=False, msg="%s is already registered in domain %s" % (module.params['name'], module.params['domain']))
elif module.params['state'] == 'modified':
if check:
try:
for ind in check:
resultpost = ovhclient.put('/domain/zone/%s/record/%s' % (module.params['domain'], ind),
subDomain=module.params['name'],
target=module.params['ip'])
msg += '{ "fieldType": "A", "id": "%s", "subDomain": "%s", "target": "%s", "zone": "%s" } ' % (ind, module.params['name'], module.params['ip'], module.params['domain'])
module.exit_json(changed=True, msg=msg)
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
else:
module.fail_json(changed=False, msg="The target %s doesn't exist in domain %s" % (module.params['name'], module.params['domain']))
elif module.params['state'] == 'absent':
if check:
try:
for ind in check:
resultpost = ovhclient.delete('/domain/zone/%s/record/%s' % (module.params['domain'], ind))
module.exit_json(changed=True, msg="Target %s succesfully deleted from domain %s" % (module.params['name'], module.params['domain']))
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
else:
module.exit_json(changed=False, msg="Target %s doesn't exist on domain %s" % (module.params['name'], module.params['domain']))
else:
if not module.params['domain']:
module.fail_json(changed=False, msg="Please give a domain to add your target")
if not module.params['ip']:
module.fail_json(changed=False, msg="Please give an IP to add your target")
def changeVRACK(ovhclient, module):
if module.params['vrack']:
if module.check_mode:
module.exit_json(changed=True, msg="%s succesfully %s on %s - (dry run mode)" % (module.params['name'], module.params['state'],module.params['vrack']))
if module.params['state'] == 'present':
try:
# There is no easy way to know if the server is on an old or new network generation.
# So we need to call this new route to ask for virtualNetworkInterface, and if the answer is empty, it's on a old generation.
# The /vrack/%s/allowedServices route used previously has availability and scaling problems.
result = ovhclient.get('/dedicated/server/%s/virtualNetworkInterface' % module.params['name'], mode='vrack')
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
# XXX: In a near future, OVH will add the possibility to add multiple interfaces to the same VRACK or another one
# This code may break at this moment because each server will have a list of dedicatedServerInterface
# New generation
if len(result):
try:
is_already_registered = ovhclient.get('/vrack/%s/dedicatedServerInterfaceDetails' % module.params['vrack'])
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
for new_server in is_already_registered:
if new_server['dedicatedServer'] == module.params['name']:
module.exit_json(changed=False, msg="%s is already registered on %s" % (module.params['name'], module.params['vrack']))
try:
serverInterface="".join(result)
result2 = ovhclient.post('/vrack/%s/dedicatedServerInterface' % module.params['vrack'],
dedicatedServerInterface=serverInterface)
module.exit_json(changed=True, contents=result2)
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
# Old generation
else:
try:
is_already_registered = ovhclient.get('/vrack/%s/dedicatedServer' % module.params['vrack'])
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
for old_server in is_already_registered:
if old_server == module.params['name']:
module.exit_json(changed=False, msg="%s is already registered on %s" % (module.params['name'], module.params['vrack']))
try:
result2 = ovhclient.post('/vrack/%s/dedicatedServer' % module.params['vrack'],
dedicatedServer=module.params['name'])
module.exit_json(changed=True, contents=result2)
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
elif module.params['state'] == 'absent':
try:
result_new = ovhclient.get('/vrack/%s/dedicatedServerInterfaceDetails' % module.params['vrack'])
result_old = ovhclient.get('/vrack/%s/dedicatedServer' % module.params['vrack'])
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
for new_server in result_new:
if new_server['dedicatedServer'] == module.params['name']:
try:
result = ovhclient.delete('/vrack/%s/dedicatedServerInterface/%s' % (module.params['vrack'], new_server['dedicatedServerInterface']))
module.exit_json(changed=True, contents=result)
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
for old_server in result_old:
if old_server == module.params['name']:
try:
result = ovhclient.delete('/vrack/%s/dedicatedServer/%s' % (module.params['vrack'], module.params['name']))
module.exit_json(changed=True, contents=result)
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
module.exit_json(changed=False, msg="No %s in %s" % (module.params['name'], module.params['vrack']))
else:
module.exit_json(changed=False, msg="Vrack service only uses present/absent state")
else:
module.fail_json(changed=False, msg="Please give a vrack name to add/remove your server")
def generateTemplate(ovhclient, module):
if module.params['template']:
if module.check_mode:
module.exit_json(changed=True, msg="%s succesfully %s on ovh API - (dry run mode)" % (module.params['template'], module.params['state']))
src = module.params['template']
with open(src, 'r') as stream:
content = yaml.load(stream)
conf = {}
for i,j in content.iteritems():
conf[i] = j
if module.params['state'] == 'present':
try:
result = ovhclient.post('/me/installationTemplate', baseTemplateName = conf['baseTemplateName'], defaultLanguage = conf['defaultLanguage'], name = conf['templateName'])
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
Templates = { 'customization': {"customHostname":conf['customHostname'],"postInstallationScriptLink":conf['postInstallationScriptLink'],"postInstallationScriptReturn":conf['postInstallationScriptReturn'],"sshKeyName":conf['sshKeyName'],"useDistributionKernel":conf['useDistributionKernel']},'defaultLanguage':conf['defaultLanguage'],'templateName':conf['templateName'] }
try:
result = ovhclient.put('/me/installationTemplate/%s' % conf['templateName'], **Templates)
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
try:
result = ovhclient.post('/me/installationTemplate/%s/partitionScheme' % conf['templateName'], name=conf['partitionScheme'], priority=conf['partitionSchemePriority'])
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
if conf['isHardwareRaid']:
result = ovhclient.get('/dedicated/server/%s/install/hardwareRaidProfile' % module.params['name'])
if len(result['controllers']) == 1:
# XXX: Only works with a server who has one controller. All the disks in this controller are taken to form one raid
# In the future, some of our servers could have more than one controller, so we will have to adapt this code
disks = result['controllers'][0]['disks'][0]['names']
try:
result = ovhclient.post('/me/installationTemplate/%s/partitionScheme/%s/hardwareRaid' % (conf['templateName'], conf['partitionScheme']),
disks=disks,
mode=conf['raidMode'],
name=conf['partitionScheme'],
step=1)
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
else:
module.fail_json(changed=False, msg="Failed to call OVH API: {0} Code can't handle more than one controller when using Hardware Raid setups")
partition = {}
for k in conf['partition']:
partition = ast.literal_eval(k)
try:
if 'raid' in partition.keys():
ovhclient.post('/me/installationTemplate/%s/partitionScheme/%s/partition' % (conf['templateName'], conf['partitionScheme']),
filesystem=partition['filesystem'],
mountpoint=partition['mountpoint'],
raid=partition['raid'],
size=partition['size'],
step=partition['step'],
type=partition['type'])
else:
ovhclient.post('/me/installationTemplate/%s/partitionScheme/%s/partition' % (conf['templateName'], conf['partitionScheme']),
filesystem=partition['filesystem'],
mountpoint=partition['mountpoint'],
size=partition['size'],
step=partition['step'],
type=partition['type'])
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
module.exit_json(changed=True, msg="Template %s succesfully created" % conf['templateName'])
elif module.params['state'] == 'absent':
try:
ovhclient.delete('/me/installationTemplate/%s' % conf['templateName'])
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
module.exit_json(changed=True, msg="Template %s succesfully deleted" % conf['templateName'])
else:
module.fail_json(changed=False, msg="State %s not supported. Only present/absent" % module.params['state'])
else:
module.fail_json(changed=False, msg="No template parameter given")
def changeBootDedicated(ovhclient, module):
bootid = { 'harddisk':1, 'rescue':1122 }
if module.check_mode:
module.exit_json(changed=True, msg="%s is now set to boot on %s. Reboot in progress... - (dry run mode)" % (module.params['name'], module.params['boot']))
try:
check = ovhclient.get('/dedicated/server/%s' % module.params['name'])
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
if bootid[module.params['boot']] != check['bootId']:
try:
ovhclient.put('/dedicated/server/%s' % module.params['name'],
bootId=bootid[module.params['boot']])
ovhclient.post('/dedicated/server/%s/reboot' % module.params['name'])
module.exit_json(changed=True, msg="%s is now set to boot on %s. Reboot in progress..." % (module.params['name'], module.params['boot']))
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
else:
if module.params['force_reboot'] == 'yes' or module.params['force_reboot'] == 'true':
try:
ovhclient.post('/dedicated/server/%s/reboot' % module.params['name'])
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
module.exit_json(changed=False, msg="%s already configured for boot on %s" % (module.params['name'], module.params['boot']))
def listDedicated(ovhclient, module):
customlist = []
try:
result = ovhclient.get('/dedicated/server')
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
try:
for i in result:
test = ovhclient.get('/dedicated/server/%s' % i)
customlist.append('%s=%s' % (test['reverse'], i))
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
module.exit_json(changedFalse=False, objects=customlist)
def listOVHInstallationTemplates(ovhclient, module):
customlist = []
try:
result = ovhclient.get('/dedicated/installationTemplate')
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
try:
for i in result:
if 'tmp-mgr' not in i:
customlist.append(i)
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
module.exit_json(changedFalse=False, objects=customlist)
def listTemplates(ovhclient, module):
customlist = []
try:
result = ovhclient.get('/me/installationTemplate')
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
try:
for i in result:
if 'tmp-mgr' not in i:
customlist.append(i)
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
module.exit_json(changedFalse=False, objects=customlist)
def main():
module = AnsibleModule(
argument_spec = dict(
endpoint = dict(required=False, default=None),
application_key = dict(required=False, default=None),
application_secret = dict(required=False, default=None),
consumer_key = dict(required=False, default=None),
state = dict(default='present', choices=['present', 'absent', 'modified']),
name = dict(required=True),
service = dict(choices=['boot', 'dns', 'vrack', 'reverse', 'monitoring', 'install', 'status', 'list', 'template', 'terminate'], required=True),
domain = dict(required=False, default=None),
ip = dict(required=False, default=None),
vrack = dict(required=False, default=None),
boot = dict(default='harddisk', choices=['harddisk', 'rescue']),
force_reboot = dict(required=False, type='bool', default=False),
template = dict(required=False, default=None),
hostname = dict(required=False, default=None),
ssh_key_name = dict(required=False, default=None),
use_distrib_kernel = dict(required=False, type='bool', default=False)
),
supports_check_mode=True
)
if not HAS_OVH:
module.fail_json(msg='OVH Api wrapper not installed')
credentials = ['endpoint', 'application_key', 'application_secret', 'consumer_key']
credentials_in_parameters = [cred in module.params for cred in credentials]
try:
if all(credentials_in_parameters):
client = ovh.Client(**{credential: module.params[credential] for credential | |
<reponame>metal-stack/metal-python
# coding: utf-8
"""
metal-api
API to manage and control plane resources like machines, switches, operating system images, machine sizes, networks, IP addresses and more # noqa: E501
OpenAPI spec version: v0.15.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class V1MachineResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'allocation': 'V1MachineAllocation',
'bios': 'V1MachineBIOS',
'changed': 'datetime',
'created': 'datetime',
'description': 'str',
'events': 'V1MachineRecentProvisioningEvents',
'hardware': 'V1MachineHardware',
'id': 'str',
'ledstate': 'V1ChassisIdentifyLEDState',
'liveliness': 'str',
'name': 'str',
'partition': 'V1PartitionResponse',
'rackid': 'str',
'size': 'V1SizeResponse',
'state': 'V1MachineState',
'tags': 'list[str]'
}
attribute_map = {
'allocation': 'allocation',
'bios': 'bios',
'changed': 'changed',
'created': 'created',
'description': 'description',
'events': 'events',
'hardware': 'hardware',
'id': 'id',
'ledstate': 'ledstate',
'liveliness': 'liveliness',
'name': 'name',
'partition': 'partition',
'rackid': 'rackid',
'size': 'size',
'state': 'state',
'tags': 'tags'
}
def __init__(self, allocation=None, bios=None, changed=None, created=None, description=None, events=None, hardware=None, id=None, ledstate=None, liveliness=None, name=None, partition=None, rackid=None, size=None, state=None, tags=None): # noqa: E501
"""V1MachineResponse - a model defined in Swagger""" # noqa: E501
self._allocation = None
self._bios = None
self._changed = None
self._created = None
self._description = None
self._events = None
self._hardware = None
self._id = None
self._ledstate = None
self._liveliness = None
self._name = None
self._partition = None
self._rackid = None
self._size = None
self._state = None
self._tags = None
self.discriminator = None
if allocation is not None:
self.allocation = allocation
self.bios = bios
if changed is not None:
self.changed = changed
if created is not None:
self.created = created
if description is not None:
self.description = description
self.events = events
self.hardware = hardware
self.id = id
self.ledstate = ledstate
self.liveliness = liveliness
if name is not None:
self.name = name
if partition is not None:
self.partition = partition
if rackid is not None:
self.rackid = rackid
if size is not None:
self.size = size
self.state = state
self.tags = tags
@property
def allocation(self):
"""Gets the allocation of this V1MachineResponse. # noqa: E501
the allocation data of an allocated machine # noqa: E501
:return: The allocation of this V1MachineResponse. # noqa: E501
:rtype: V1MachineAllocation
"""
return self._allocation
@allocation.setter
def allocation(self, allocation):
"""Sets the allocation of this V1MachineResponse.
the allocation data of an allocated machine # noqa: E501
:param allocation: The allocation of this V1MachineResponse. # noqa: E501
:type: V1MachineAllocation
"""
self._allocation = allocation
@property
def bios(self):
"""Gets the bios of this V1MachineResponse. # noqa: E501
bios information of this machine # noqa: E501
:return: The bios of this V1MachineResponse. # noqa: E501
:rtype: V1MachineBIOS
"""
return self._bios
@bios.setter
def bios(self, bios):
"""Sets the bios of this V1MachineResponse.
bios information of this machine # noqa: E501
:param bios: The bios of this V1MachineResponse. # noqa: E501
:type: V1MachineBIOS
"""
if bios is None:
raise ValueError("Invalid value for `bios`, must not be `None`") # noqa: E501
self._bios = bios
@property
def changed(self):
"""Gets the changed of this V1MachineResponse. # noqa: E501
the last changed timestamp of this entity # noqa: E501
:return: The changed of this V1MachineResponse. # noqa: E501
:rtype: datetime
"""
return self._changed
@changed.setter
def changed(self, changed):
"""Sets the changed of this V1MachineResponse.
the last changed timestamp of this entity # noqa: E501
:param changed: The changed of this V1MachineResponse. # noqa: E501
:type: datetime
"""
self._changed = changed
@property
def created(self):
"""Gets the created of this V1MachineResponse. # noqa: E501
the creation time of this entity # noqa: E501
:return: The created of this V1MachineResponse. # noqa: E501
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this V1MachineResponse.
the creation time of this entity # noqa: E501
:param created: The created of this V1MachineResponse. # noqa: E501
:type: datetime
"""
self._created = created
@property
def description(self):
"""Gets the description of this V1MachineResponse. # noqa: E501
a description for this entity # noqa: E501
:return: The description of this V1MachineResponse. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this V1MachineResponse.
a description for this entity # noqa: E501
:param description: The description of this V1MachineResponse. # noqa: E501
:type: str
"""
self._description = description
@property
def events(self):
"""Gets the events of this V1MachineResponse. # noqa: E501
recent events of this machine during provisioning # noqa: E501
:return: The events of this V1MachineResponse. # noqa: E501
:rtype: V1MachineRecentProvisioningEvents
"""
return self._events
@events.setter
def events(self, events):
"""Sets the events of this V1MachineResponse.
recent events of this machine during provisioning # noqa: E501
:param events: The events of this V1MachineResponse. # noqa: E501
:type: V1MachineRecentProvisioningEvents
"""
if events is None:
raise ValueError("Invalid value for `events`, must not be `None`") # noqa: E501
self._events = events
@property
def hardware(self):
"""Gets the hardware of this V1MachineResponse. # noqa: E501
the hardware of this machine # noqa: E501
:return: The hardware of this V1MachineResponse. # noqa: E501
:rtype: V1MachineHardware
"""
return self._hardware
@hardware.setter
def hardware(self, hardware):
"""Sets the hardware of this V1MachineResponse.
the hardware of this machine # noqa: E501
:param hardware: The hardware of this V1MachineResponse. # noqa: E501
:type: V1MachineHardware
"""
if hardware is None:
raise ValueError("Invalid value for `hardware`, must not be `None`") # noqa: E501
self._hardware = hardware
@property
def id(self):
"""Gets the id of this V1MachineResponse. # noqa: E501
the unique ID of this entity # noqa: E501
:return: The id of this V1MachineResponse. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this V1MachineResponse.
the unique ID of this entity # noqa: E501
:param id: The id of this V1MachineResponse. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def ledstate(self):
"""Gets the ledstate of this V1MachineResponse. # noqa: E501
the state of this chassis identify LED # noqa: E501
:return: The ledstate of this V1MachineResponse. # noqa: E501
:rtype: V1ChassisIdentifyLEDState
"""
return self._ledstate
@ledstate.setter
def ledstate(self, ledstate):
"""Sets the ledstate of this V1MachineResponse.
the state of this chassis identify LED # noqa: E501
:param ledstate: The ledstate of this V1MachineResponse. # noqa: E501
:type: V1ChassisIdentifyLEDState
"""
if ledstate is None:
raise ValueError("Invalid value for `ledstate`, must not be `None`") # noqa: E501
self._ledstate = ledstate
@property
def liveliness(self):
"""Gets the liveliness of this V1MachineResponse. # noqa: E501
the liveliness of this machine # noqa: E501
:return: The liveliness of this V1MachineResponse. # noqa: E501
:rtype: str
"""
return self._liveliness
@liveliness.setter
def liveliness(self, liveliness):
"""Sets the liveliness of this V1MachineResponse.
the liveliness of this machine # noqa: E501
:param liveliness: The liveliness of this V1MachineResponse. # noqa: E501
:type: str
"""
if liveliness is None:
raise ValueError("Invalid value for `liveliness`, must not be `None`") # noqa: E501
self._liveliness = liveliness
@property
def name(self):
"""Gets the name of this V1MachineResponse. # noqa: E501
a readable name for this entity # noqa: E501
:return: The name of this V1MachineResponse. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1MachineResponse.
a readable name for this entity # noqa: E501
:param name: The name of this V1MachineResponse. # noqa: E501
:type: str
"""
self._name = name
@property
def partition(self):
"""Gets the partition of this V1MachineResponse. # noqa: E501
the partition assigned to this machine # noqa: E501
:return: The partition of this V1MachineResponse. # noqa: E501
:rtype: V1PartitionResponse
"""
return self._partition
@partition.setter
def partition(self, partition):
"""Sets the partition of this V1MachineResponse.
the partition assigned to this machine # noqa: E501
:param partition: The partition of this V1MachineResponse. # noqa: E501
:type: V1PartitionResponse
"""
self._partition = partition
@property
def rackid(self):
"""Gets the rackid of this V1MachineResponse. # | |
<path\x0a \
d=\x22M 0,1\
6 H 16 V 0 H 0 Z\
\x22\x0a id=\x22p\
ath235\x22 />\x0a <\
/clipPath>\x0a </d\
efs>\x0a <metadata\
\x0a id=\x22metada\
ta5\x22>\x0a <rdf:R\
DF>\x0a <cc:Wo\
rk\x0a rdf:\
about=\x22\x22>\x0a \
<dc:format>ima\
ge/svg+xml</dc:f\
ormat>\x0a <\
dc:type\x0a \
rdf:resource=\
\x22http://purl.org\
/dc/dcmitype/Sti\
llImage\x22 />\x0a \
<dc:title></\
dc:title>\x0a \
</cc:Work>\x0a <\
/rdf:RDF>\x0a </me\
tadata>\x0a <g\x0a \
style=\x22display\
:none\x22\x0a tran\
sform=\x22matrix(0.\
8671849,0,0,0.86\
71849,-4.8131908\
,-2.502956)\x22\x0a \
id=\x22layer7\x22>\x0a \
<path\x0a \
id=\x22path1385\x22\x0a \
d=\x22M 6.3144\
531,3.1914062 5.\
8554688,3.650390\
6 7.53125,5.3281\
25 5.8554688,7.0\
039062 6.3144531\
,7.4628906 7.992\
1875,5.7851562 9\
.6679688,7.46289\
06 10.126953,7.0\
039062 8.4492188\
,5.328125 10.126\
953,3.6503906 9.\
6679688,3.191406\
2 7.9921875,4.86\
71875 Z\x22\x0a \
style=\x22color:#00\
0000;font-style:\
normal;font-vari\
ant:normal;font-\
weight:normal;fo\
nt-stretch:norma\
l;font-size:medi\
um;line-height:n\
ormal;font-famil\
y:sans-serif;fon\
t-variant-ligatu\
res:normal;font-\
variant-position\
:normal;font-var\
iant-caps:normal\
;font-variant-nu\
meric:normal;fon\
t-variant-altern\
ates:normal;font\
-variant-east-as\
ian:normal;font-\
feature-settings\
:normal;font-var\
iation-settings:\
normal;text-inde\
nt:0;text-align:\
start;text-decor\
ation:none;text-\
decoration-line:\
none;text-decora\
tion-style:solid\
;text-decoration\
-color:#000000;l\
etter-spacing:no\
rmal;word-spacin\
g:normal;text-tr\
ansform:none;wri\
ting-mode:lr-tb;\
direction:ltr;te\
xt-orientation:m\
ixed;dominant-ba\
seline:auto;base\
line-shift:basel\
ine;text-anchor:\
start;white-spac\
e:normal;shape-p\
adding:0;shape-m\
argin:0;inline-s\
ize:0;clip-rule:\
nonzero;display:\
inline;overflow:\
visible;visibili\
ty:visible;opaci\
ty:1;isolation:a\
uto;mix-blend-mo\
de:normal;color-\
interpolation:sR\
GB;color-interpo\
lation-filters:l\
inearRGB;solid-c\
olor:#000000;sol\
id-opacity:1;vec\
tor-effect:none;\
fill:#bdbdbd;fil\
l-opacity:1;fill\
-rule:nonzero;st\
roke:none;stroke\
-width:0.648425;\
stroke-linecap:b\
utt;stroke-linej\
oin:round;stroke\
-miterlimit:10;s\
troke-dasharray:\
none;stroke-dash\
offset:0;stroke-\
opacity:1;color-\
rendering:auto;i\
mage-rendering:a\
uto;shape-render\
ing:auto;text-re\
ndering:auto;ena\
ble-background:a\
ccumulate;stop-c\
olor:#000000;sto\
p-opacity:1\x22 />\x0a\
</g>\x0a <g\x0a \
style=\x22display:\
none\x22\x0a trans\
form=\x22matrix(0.2\
8257645,-0.03062\
801,0.03062801,0\
.28257645,0.0403\
8206,0.29828767)\
\x22\x0a id=\x22layer\
6\x22>\x0a <path\x0a \
d=\x22m 4.6433\
796,6.9992927 c \
0.2355252,0.2887\
782 0.5676425,0.\
4699275 0.935677\
,0.5080141 L 5.4\
360775,8.8889283\
C 4.7000085,8.8\
127551 4.035465,\
8.4534405 3.5644\
145,7.8758841 2.\
6282817,6.721388\
9 2.8161412,4.90\
60834 3.96875,3.\
96875 L 4.846542\
,5.0458286 C 4.2\
689855,5.5168791\
4.1762049,6.423\
1427 4.6433796,6\
.9992927 M 1.574\
3107,5.6952456 C\
1.3375562,7.983\
0278 3.005131,10\
.035586 5.292913\
2,10.27234 7.579\
7007,10.508992 9\
.6322587,8.84141\
71 9.8690132,6.5\
536349 10.105768\
,4.2658527 8.438\
1929,2.2132946 6\
.1514054,1.97664\
31 3.8636232,1.7\
398887 1.8110652\
,3.4074634 1.574\
3107,5.6952456 M\
11.251629,6.696\
7169 C 11.119562\
,7.9729015 10.55\
9542,9.1002435 9\
.7345668,9.96089\
51 L 12.458867,1\
3.314138 11.3799\
02,14.19073 8.65\
56018,10.837487 \
C 7.6442508,11.4\
68735 6.425124,1\
1.785927 5.14993\
4,11.653962 2.10\
02209,11.338358 \
-0.12380647,8.60\
0882 0.19169453,\
5.5521635 0.5071\
9563,2.5034451 3\
.2446714,0.27941\
76 6.2943845,0.5\
950216 9.3431029\
,0.9105227 11.56\
713,3.6479985 11\
.251629,6.696716\
9\x22\x0a style=\
\x22fill:#bdbdbd;fi\
ll-opacity:1;fil\
l-rule:nonzero;s\
troke:none\x22\x0a \
id=\x22path245\x22 \
/>\x0a </g>\x0a <g\x0a \
style=\x22displ\
ay:none\x22\x0a tr\
ansform=\x22transla\
te(-2.1166667,-2\
.1166667)\x22\x0a \
id=\x22layer4\x22>\x0a \
<path\x0a id\
=\x22path247\x22\x0a \
style=\x22display\
:inline;fill:#bd\
bdbd;fill-opacit\
y:1;fill-rule:no\
nzero;stroke:non\
e;stroke-width:0\
.284936\x22\x0a \
d=\x22m 3.3072917,5\
.159375 h 2.7781\
25 v 0.4630209 h\
-2.778125 z m 0\
,-1.1576946 h 2.\
778125 V 4.46470\
13 H 3.3072917 Z\
M 6.0854167,3.3\
072917 V 2.84427\
08 h -0.2778125 \
-2.2225 -0.27239\
87 v 0.4630209 h\
0.2723987 2.222\
5 z M 2.3812501,\
5.159375 H 2.844\
2708 V 5.6223959\
H 2.3812501 Z m\
0,-1.1576946 H \
2.8442708 V 4.46\
47013 H 2.381250\
1 Z m 0,-1.15740\
96 H 2.8442708 V\
3.3072917 H 2.3\
812501 Z\x22 />\x0a <\
/g>\x0a <g\x0a st\
yle=\x22display:inl\
ine\x22\x0a transf\
orm=\x22translate(-\
2.1166667,-2.116\
6667)\x22\x0a id=\x22\
layer3\x22>\x0a <pa\
th\x0a id=\x22pa\
th271\x22\x0a st\
yle=\x22display:inl\
ine;fill:#bdbdbd\
;fill-opacity:1;\
fill-rule:nonzer\
o;stroke:none;st\
roke-width:0.284\
936\x22\x0a d=\x22M\
5.3730769,4.910\
0561 H 6.0854167\
V 5.6223959 H 5\
.3730769 Z m 0,-\
1.0328927 H 6.08\
54167 V 4.589503\
3 H 5.3730769 Z \
m 0,-1.0328926 H\
6.0854167 V 3.5\
566105 H 5.37307\
69 Z M 4.3758013\
,4.9100561 H 5.0\
881411 V 5.62239\
59 H 4.3758013 Z\
m 0,-1.0328927 \
H 5.0881411 V 4.\
5895033 H 4.3758\
013 Z m 0,-1.032\
8926 H 5.0881411\
V 3.5566105 H 4\
.3758013 Z M 3.3\
785256,4.9100561\
H 4.0908654 V 5\
.6223959 H 3.378\
5256 Z m 0,-1.03\
28927 H 4.090865\
4 V 4.5895033 H \
3.3785256 Z m 0,\
-1.0328926 H 4.0\
908654 V 3.55661\
05 H 3.3785256 Z\
M 2.3812501,4.9\
100561 H 3.09358\
98 V 5.6223959 H\
2.3812501 Z m 0\
,-1.0328927 H 3.\
0935898 V 4.5895\
033 H 2.3812501 \
Z m 0,-1.0328926\
H 3.0935898 V 3\
.5566105 H 2.381\
2501 Z\x22 />\x0a </g\
>\x0a <g\x0a styl\
e=\x22display:none\x22\
\x0a id=\x22layer2\
\x22>\x0a <path\x0a \
id=\x22path197\x22\
\x0a style=\x22d\
isplay:inline;fi\
ll:#bdbdbd;fill-\
opacity:1;fill-r\
ule:nonzero;stro\
ke:none;stroke-w\
idth:0.284936\x22\x0a \
d=\x22M 0.264\
58337,3.0427083 \
H 3.96875 V 3.50\
57292 H 0.264583\
37 Z m 0,-1.1576\
946 H 3.96875 V \
2.3480346 H 0.26\
458337 Z m 0,-1.\
15740958 H 3.968\
75 V 1.190625 H \
0.26458337 Z\x22 />\
\x0a </g>\x0a</svg>\x0a\
\x00\x00\x18\x00\
<\
?xml version=\x221.\
0\x22 encoding=\x22UTF\
-8\x22 standalone=\x22\
no\x22?>\x0a<svg\x0a xm\
lns:dc=\x22http://p\
url.org/dc/eleme\
nts/1.1/\x22\x0a xml\
ns:cc=\x22http://cr\
eativecommons.or\
g/ns#\x22\x0a xmlns:\
rdf=\x22http://www.\
w3.org/1999/02/2\
2-rdf-syntax-ns#\
\x22\x0a xmlns:svg=\x22\
http://www.w3.or\
g/2000/svg\x22\x0a x\
mlns=\x22http://www\
.w3.org/2000/svg\
\x22\x0a id=\x22svg8\x22\x0a \
version=\x221.1\x22\x0a\
viewBox=\x220 0 \
4.2333333 4.2333\
333\x22\x0a height=\x22\
16\x22\x0a width=\x2216\
\x22>\x0a <defs\x0a \
id=\x22defs2\x22>\x0a \
<clipPath\x0a \
clipPathUnits=\x22\
userSpaceOnUse\x22\x0a\
id=\x22clipP\
ath209\x22>\x0a <\
path\x0a d=\
\x22M 0,16 H 16 V 0\
H 0 Z\x22\x0a \
id=\x22path207\x22 />\
\x0a </clipPath>\
\x0a <clipPath\x0a \
clipPathUn\
its=\x22userSpaceOn\
Use\x22\x0a id=\x22\
clipPath225\x22>\x0a \
<path\x0a \
d=\x22M 0,16 H 1\
6 V 0 H 0 Z\x22\x0a \
id=\x22path22\
3\x22 />\x0a </clip\
Path>\x0a <clipP\
ath\x0a clipP\
athUnits=\x22userSp\
aceOnUse\x22\x0a \
id=\x22clipPath237\
\x22>\x0a <path\x0a \
d=\x22M 0,1\
6 H 16 V 0 H 0 Z\
\x22\x0a id=\x22p\
ath235\x22 />\x0a <\
/clipPath>\x0a </d\
efs>\x0a <metadata\
\x0a id=\x22metada\
ta5\x22>\x0a <rdf:R\
DF>\x0a <cc:Wo\
rk\x0a rdf:\
about=\x22\x22>\x0a \
<dc:format>ima\
ge/svg+xml</dc:f\
ormat>\x0a <\
dc:type\x0a \
rdf:resource=\
\x22http://purl.org\
/dc/dcmitype/Sti\
llImage\x22 />\x0a \
<dc:title></\
dc:title>\x0a \
</cc:Work>\x0a <\
/rdf:RDF>\x0a </me\
tadata>\x0a <g\x0a \
style=\x22display\
:none\x22\x0a tran\
sform=\x22matrix(0.\
8671849,0,0,0.86\
71849,-4.8131908\
,-2.502956)\x22\x0a \
id=\x22layer7\x22>\x0a \
<path\x0a \
id=\x22path1385\x22\x0a \
d=\x22M 6.3144\
531,3.1914062 5.\
8554688,3.650390\
6 7.53125,5.3281\
25 5.8554688,7.0\
039062 6.3144531\
,7.4628906 7.992\
1875,5.7851562 9\
.6679688,7.46289\
06 10.126953,7.0\
039062 8.4492188\
,5.328125 10.126\
953,3.6503906 9.\
6679688,3.191406\
2 7.9921875,4.86\
71875 Z\x22\x0a \
style=\x22color:#00\
0000;font-style:\
normal;font-vari\
ant:normal;font-\
weight:normal;fo\
nt-stretch:norma\
l;font-size:medi\
um;line-height:n\
ormal;font-famil\
y:sans-serif;fon\
t-variant-ligatu\
res:normal;font-\
variant-position\
:normal;font-var\
iant-caps:normal\
;font-variant-nu\
meric:normal;fon\
t-variant-altern\
ates:normal;font\
-variant-east-as\
ian:normal;font-\
feature-settings\
:normal;font-var\
iation-settings:\
normal;text-inde\
nt:0;text-align:\
start;text-decor\
ation:none;text-\
decoration-line:\
none;text-decora\
tion-style:solid\
;text-decoration\
-color:#000000;l\
etter-spacing:no\
rmal;word-spacin\
g:normal;text-tr\
ansform:none;wri\
ting-mode:lr-tb;\
direction:ltr;te\
xt-orientation:m\
ixed;dominant-ba\
seline:auto;base\
line-shift:basel\
ine;text-anchor:\
start;white-spac\
e:normal;shape-p\
adding:0;shape-m\
argin:0;inline-s\
ize:0;clip-rule:\
nonzero;display:\
inline;overflow:\
visible;visibili\
ty:visible;opaci\
ty:1;isolation:a\
uto;mix-blend-mo\
de:normal;color-\
interpolation:sR\
GB;color-interpo\
lation-filters:l\
inearRGB;solid-c\
olor:#000000;sol\
id-opacity:1;vec\
tor-effect:none;\
fill:#bdbdbd;fil\
l-opacity:1;fill\
-rule:nonzero;st\
roke:none;stroke\
-width:0.648425;\
stroke-linecap:b\
utt;stroke-linej\
oin:round;stroke\
-miterlimit:10;s\
troke-dasharray:\
none;stroke-dash\
offset:0;stroke-\
opacity:1;color-\
rendering:auto;i\
mage-rendering:a\
uto;shape-render\
ing:auto;text-re\
ndering:auto;ena\
ble-background:a\
ccumulate;stop-c\
olor:#000000;sto\
p-opacity:1\x22 />\x0a\
</g>\x0a <g\x0a \
style=\x22display:\
none\x22\x0a trans\
form=\x22matrix(0.2\
8257645,-0.03062\
801,0.03062801,0\
.28257645,0.0403\
8206,0.29828767)\
\x22\x0a id=\x22layer\
6\x22>\x0a <path\x0a \
d=\x22m 4.6433\
796,6.9992927 c \
0.2355252,0.2887\
782 0.5676425,0.\
4699275 0.935677\
,0.5080141 L 5.4\
360775,8.8889283\
C 4.7000085,8.8\
127551 4.035465,\
8.4534405 3.5644\
145,7.8758841 2.\
6282817,6.721388\
9 2.8161412,4.90\
60834 3.96875,3.\
96875 L 4.846542\
,5.0458286 C 4.2\
689855,5.5168791\
4.1762049,6.423\
1427 4.6433796,6\
.9992927 M 1.574\
3107,5.6952456 C\
1.3375562,7.983\
0278 3.005131,10\
.035586 5.292913\
2,10.27234 7.579\
7007,10.508992 9\
.6322587,8.84141\
71 9.8690132,6.5\
536349 10.105768\
,4.2658527 8.438\
1929,2.2132946 6\
.1514054,1.97664\
31 3.8636232,1.7\
398887 1.8110652\
,3.4074634 1.574\
3107,5.6952456 M\
11.251629,6.696\
7169 C 11.119562\
,7.9729015 10.55\
9542,9.1002435 9\
.7345668,9.96089\
51 L 12.458867,1\
3.314138 11.3799\
02,14.19073 8.65\
56018,10.837487 \
C 7.6442508,11.4\
68735 6.425124,1\
1.785927 5.14993\
4,11.653962 2.10\
02209,11.338358 \
-0.12380647,8.60\
0882 0.19169453,\
5.5521635 0.5071\
9563,2.5034451 3\
.2446714,0.27941\
76 6.2943845,0.5\
950216 9.3431029\
,0.9105227 11.56\
713,3.6479985 11\
.251629,6.696716\
9\x22\x0a style=\
\x22fill:#bdbdbd;fi\
ll-opacity:1;fil\
l-rule:nonzero;s\
troke:none\x22\x0a \
id=\x22path245\x22 \
/>\x0a </g>\x0a <g\x0a \
style=\x22displ\
ay:none\x22\x0a tr\
ansform=\x22transla\
te(-2.1166667,-2\
.1166667)\x22\x0a \
id=\x22layer4\x22>\x0a \
<path\x0a id\
=\x22path247\x22\x0a \
style=\x22display\
:inline;fill:#bd\
bdbd;fill-opacit\
y:1;fill-rule:no\
nzero;stroke:non\
e;stroke-width:0\
.284936\x22\x0a \
d=\x22m 3.3072917,5\
.159375 h 2.7781\
25 v 0.4630209 h\
-2.778125 z m 0\
,-1.1576946 h 2.\
778125 V 4.46470\
13 H 3.3072917 Z\
M 6.0854167,3.3\
072917 V 2.84427\
08 h -0.2778125 \
-2.2225 -0.27239\
87 v 0.4630209 h\
0.2723987 2.222\
5 z M 2.3812501,\
5.159375 H 2.844\
2708 V 5.6223959\
H 2.3812501 Z m\
0,-1.1576946 H \
2.8442708 V 4.46\
47013 H 2.381250\
1 Z m 0,-1.15740\
96 H 2.8442708 V\
3.3072917 H 2.3\
812501 Z\x22 />\x0a <\
/g>\x0a <g\x0a st\
yle=\x22display:non\
e\x22\x0a transfor\
m=\x22translate(-2.\
1166667,-2.11666\
67)\x22\x0a id=\x22la\
yer3\x22>\x0a <path\
\x0a id=\x22path\
271\x22\x0a styl\
e=\x22display:inlin\
e;fill:#bdbdbd;f\
ill-opacity:1;fi\
ll-rule:nonzero;\
stroke:none;stro\
ke-width:0.28493\
6\x22\x0a d=\x22M 5\
.3730769,4.91005\
61 H 6.0854167 V\
5.6223959 H 5.3\
730769 Z m 0,-1.\
0328927 H 6.0854\
167 V 4.5895033 \
H 5.3730769 Z m \
0,-1.0328926 H 6\
.0854167 V 3.556\
6105 H 5.3730769\
Z M 4.3758013,4\
.9100561 H 5.088\
1411 V 5.6223959\
H 4.3758013 Z m\
0,-1.0328927 H \
5.0881411 V 4.58\
95033 H 4.375801\
3 Z m 0,-1.03289\
26 H 5.0881411 V\
3.5566105 H 4.3\
758013 Z M 3.378\
5256,4.9100561 H\
4.0908654 V 5.6\
223959 H 3.37852\
56 Z m 0,-1.0328\
927 H 4.0908654 \
V 4.5895033 H 3.\
3785256 Z m 0,-1\
.0328926 H 4.090\
8654 V 3.5566105\
H 3.3785256 Z M\
2.3812501,4.910\
0561 H 3.0935898\
V 5.6223959 H 2\
.3812501 Z m 0,-\
1.0328927 H 3.09\
35898 V 4.589503\
3 H 2.3812501 Z \
m 0,-1.0328926 H\
3.0935898 V 3.5\
566105 H 2.38125\
01 Z\x22 />\x0a </g>\x0a\
<g\x0a style=\
\x22display:inline\x22\
\x0a id=\x22layer2\
\x22>\x0a <path\x0a \
id=\x22path197\x22\
\x0a style=\x22d\
isplay:inline;fi\
ll:#bdbdbd;fill-\
opacity:1;fill-r\
ule:nonzero;stro\
ke:none;stroke-w\
idth:0.284936\x22\x0a \
d=\x22M 0.264\
58337,3.0427083 \
H 3.96875 V 3.50\
57292 H 0.264583\
37 Z m 0,-1.1576\
946 H 3.96875 V \
2.3480346 H 0.26\
458337 Z m 0,-1.\
15740958 H 3.968\
75 V 1.190625 H \
0.26458337 Z\x22 />\
\x0a </g>\x0a</svg>\x0a\
\x00\x00 ~\
<\
?xml version=\x221.\
0\x22 encoding=\x22UTF\
-8\x22 standalone=\x22\
no\x22?>\x0a<svg\x0a xm\
lns:dc=\x22http://p\
url.org/dc/eleme\
nts/1.1/\x22\x0a xml\
ns:cc=\x22http://cr\
eativecommons.or\
g/ns#\x22\x0a xmlns:\
rdf=\x22http://www.\
w3.org/1999/02/2\
2-rdf-syntax-ns#\
\x22\x0a xmlns:svg=\x22\
http://www.w3.or\
g/2000/svg\x22\x0a x\
mlns=\x22http://www\
.w3.org/2000/svg\
\x22\x0a id=\x22svg8\x22\x0a \
version=\x221.1\x22\x0a\
viewBox=\x220 0 \
8.4666667 8.4666\
667\x22\x0a height=\x22\
32\x22\x0a width=\x2232\
\x22>\x0a <defs\x0a \
id=\x22defs2\x22>\x0a \
<clipPath\x0a \
clipPathUnits=\x22\
userSpaceOnUse\x22\x0a\
id=\x22clipP\
ath203\x22>\x0a <\
path\x0a d=\
\x22M 0,32 H 32 V 0\
H 0 Z\x22\x0a \
id=\x22path201\x22 />\
\x0a </clipPath>\
\x0a <clipPath\x0a \
clipPathUn\
its=\x22userSpaceOn\
Use\x22\x0a id=\x22\
clipPath219\x22>\x0a \
<path\x0a \
d=\x22M 0,32 H 3\
2 V 0 H 0 Z\x22\x0a \
id=\x22path21\
7\x22 />\x0a </clip\
Path>\x0a <clipP\
ath\x0a clipP\
athUnits=\x22userSp\
aceOnUse\x22\x0a \
id=\x22clipPath231\
\x22>\x0a <path\x0a \
d=\x22M 0,3\
2 H 32 V 0 H 0 Z\
\x22\x0a id=\x22p\
ath229\x22 />\x0a <\
/clipPath>\x0a <\
clipPath\x0a \
clipPathUnits=\x22u\
serSpaceOnUse\x22\x0a \
id=\x22clipPa\
th255\x22>\x0a <p\
ath\x0a d=\x22\
M 0,32 H 32 V 0 \
H 0 Z\x22\x0a \
id=\x22path253\x22 />\x0a\
</clipPath>\x0a\
</defs>\x0a <met\
adata\x0a id=\x22m\
etadata5\x22>\x0a <\
rdf:RDF>\x0a <\
cc:Work\x0a \
rdf:about=\x22\x22>\x0a \
<dc:forma\
t>image/svg+xml<\
/dc:format>\x0a \
<dc:type\x0a \
rdf:reso\
urce=\x22http://pur\
l.org/dc/dcmityp\
e/StillImage\x22 />\
\x0a <dc:tit\
le></dc:title>\x0a \
</cc:Work>\x0a\
</rdf:RDF>\x0a \
</metadata>\x0a <\
g\x0a transform\
=\x22matrix(0.75001\
608,0,0,0.750016\
08,1.0582653,1.0\
555027)\x22\x0a st\
yle=\x22display:inl\
ine\x22\x0a id=\x22la\
yer1\x22>\x0a <g\x0a \
transform=\x22\
matrix(0.3527777\
7,0,0,-0.3527777\
7,-1.4110757,9.8\
815066)\x22\x0a \
id=\x22g197\x22\x0a \
style=\x22display:\
inline\x22>\x0a <\
g\x0a clip-\
path=\x22url(#clipP\
ath203)\x22\x0a \
id=\x22g199\x22>\x0a \
<g\x0a \
transform=\x22tr\
anslate(29.8226,\
5.9184)\x22\x0a \
id=\x22g205\x22>\x0a \
<path\x0a \
id=\x22\
path207\x22\x0a \
style=\x22fil\
l:#bdbdbd;fill-o\
pacity:1;fill-ru\
le:nonzero;strok\
e:none\x22\x0a \
d=\x22m 0,0 -1\
8.083,17.5 c -0.\
229,0.22 -0.592,\
0.217 -0.817,-0.\
007 l -1.75,-1.7\
5 c -0.224,-0.22\
5 -0.227,-0.588 \
-0.007,-0.817 l \
17.5,-18.083 c 0\
.11,-0.113 0.261\
,-0.177 0.419,-0\
.177 1.61,0 2.91\
5,1.305 2.915,2.\
915 C 0.177,-0.2\
61 0.113,-0.11 0\
,0\x22 />\x0a <\
/g>\x0a <g\x0a \
transf\
orm=\x22translate(1\
7.7502,29.4158)\x22\
\x0a id=\x22\
g209\x22>\x0a \
<path\x0a \
id=\x22path211\x22\
\x0a st\
yle=\x22fill:#5285a\
6;fill-opacity:1\
;fill-rule:nonze\
ro;stroke:none\x22\x0a\
d=\x22\
m 0,0 c -3.892,0\
.011 -7.626,-1.5\
36 -10.37,-4.296\
l -1.709,-1.708\
c -0.11,-0.109 \
-0.171,-0.258 -0\
.171,-0.413 0,-0\
.966 -0.783,-1.7\
5 -1.75,-1.75 -0\
.155,10e-4 -0.30\
3,-0.06 -0.413,-\
0.17 l -1.166,-1\
.166 c -0.228,-0\
.228 -0.228,-0.5\
97 0,-0.825 l 2.\
333,-2.334 c 0.2\
28,-0.227 0.597,\
-0.227 0.825,0 l\
1.166,1.167 c 0\
.11,0.109 0.172,\
0.257 0.172,0.41\
2 0,0.966 0.783,\
1.75 1.75,1.75 0\
.155,0 0.303,0.0\
61 0.413,0.17 l \
2.853,2.863 c 1.\
952,1.951 4.09,3\
.704 6.385,5.235\
0.267,0.181 0.3\
35,0.544 0.154,0\
.81 C 0.365,-0.0\
98 0.189,-0.003 \
0,0\x22 />\x0a \
</g>\x0a </g>\x0a\
</g>\x0a </g>\x0a\
<g\x0a style=\
\x22display:none\x22\x0a \
id=\x22layer5\x22>\
\x0a <g\x0a t\
ransform=\x22matrix\
(0.27613091,0,0,\
-0.27613091,-0.1\
8472717,8.655196\
1)\x22\x0a id=\x22g\
213\x22\x0a styl\
e=\x22display:inlin\
e\x22>\x0a <g\x0a \
clip-path=\
\x22url(#clipPath21\
9)\x22\x0a id=\
\x22g215\x22>\x0a \
<g\x0a tr\
ansform=\x22transla\
te(11.6355,8.797\
1)\x22\x0a i\
d=\x22g221\x22>\x0a \
<path\x0a \
id=\x22path2\
23\x22\x0a \
style=\x22fill:#52\
85a6;fill-opacit\
y:1;fill-rule:no\
nzero;stroke:non\
e\x22\x0a \
d=\x22m 0,0 10.565,\
10.565 c 2.664,-\
1.101 5.717,0.16\
6 6.818,2.83 0.4\
68,1.132 0.523,2\
.392 0.156,3.559\
l -3.271,-3.271\
-3.423,3.423 3.\
271,3.271 C 11.3\
66,21.242 8.435,\
19.712 7.571,16.\
962 7.204,15.794\
7.259,14.534 7.\
727,13.403 L -2.\
838,2.838 C -5.2\
61,3.698 -7.922,\
2.43 -8.782,0.00\
7 -9.12,-0.945 -\
9.139,-1.981 -8.\
836,-2.945 l 2.9\
19,2.92 3.054,-3\
.054 -2.919,-2.9\
2 c 2.453,-0.77 \
5.066,0.594 5.83\
6,3.047 C 0.357,\
-1.988 0.338,-0.\
952 0,0\x22 />\x0a \
</g>\x0a <\
/g>\x0a </g>\x0a <\
/g>\x0a <g\x0a st\
yle=\x22display:non\
e\x22\x0a id=\x22laye\
r4\x22>\x0a <g\x0a \
transform=\x22ma\
trix(0.26460626,\
0,0,-0.26460626,\
-4.7264532e-4,8.\
3085907)\x22\x0a \
id=\x22g249\x22\x0a \
style=\x22display\
:inline\x22>\x0a \
<g\x0a clip\
-path=\x22url(#clip\
Path255)\x22\x0a \
id=\x22g251\x22>\x0a \
<g\x0a \
transform=\x22t\
ranslate(21.6563\
,24.8375)\x22\x0a \
id=\x22g257\x22>\
\x0a <path\
\x0a id\
=\x22path259\x22\x0a \
style=\x22f\
ill:#bdbdbd;fill\
-opacity:1;fill-\
rule:nonzero;str\
oke:none\x22\x0a \
d=\x22M 0,0 \
H -2.844 V -2.81\
3 H -9.719 V -6.\
25 c 0,-0.345 0.\
28,-0.625 0.625,\
-0.625 v -2.219 \
c 0,-1.294 1.05,\
-2.344 2.344,-2.\
344 h 2.188 c 1.\
294,0 2.343,1.05\
2.343,2.344 v 1\
.282 h -2.799 c \
-0.252,0 -0.47,0\
.193 -0.482,0.44\
6 -0.012,0.269 0\
.202,0.491 0.469\
,0.491 H 0 c 1.2\
94,0 2.344,1.049\
2.344,2.344 v 2\
.187 C 2.344,-1.\
049 1.294,0 0,0\x22\
/>\x0a </g>\
\x0a <g\x0a \
transform\
=\x22translate(13.8\
125,21.4)\x22\x0a \
id=\x22g261\x22>\
\x0a <path\
\x0a id\
=\x22path263\x22\x0a \
style=\x22f\
ill:#5285a6;fill\
-opacity:1;fill-\
rule:nonzero;str\
oke:none\x22\x0a \
d=\x22m 0,0 \
h 4.375 c 0.69,0\
1.25,0.56 1.25,\
1.25 V 5.656 C 5\
.625,6.949 4.574\
,8 3.281,8 H 1.0\
94 C -0.199,8 -1\
.25,6.949 -1.25,\
5.656 V 4.375 h \
2.812 c 0.259,0 \
0.469,-0.21 0.46\
9,-0.469 0,-0.25\
9 -0.21,-0.468 -\
0.469,-0.468 h -\
5.031 c -1.292,0\
-2.343,-1.052 -\
2.343,-2.344 v -\
2.188 c 0,-1.292\
1.051,-2.344 2.\
343,-2.344 H -1.\
25 V -1.25 C -1.\
25,-0.56 -0.69,0\
0,0\x22 />\x0a \
</g>\x0a <g\
\x0a tran\
sform=\x22translate\
(15.375,27.4937)\
\x22\x0a id=\
\x22g265\x22>\x0a \
<path\x0a \
id=\x22path267\
\x22\x0a s\
tyle=\x22fill:#3232\
32;fill-opacity:\
1;fill-rule:nonz\
ero;stroke:none\x22\
\x0a d=\
\x22m 0,0 c 0,-0.25\
9 -0.21,-0.469 -\
0.469,-0.469 -0.\
259,0 -0.468,0.2\
1 -0.468,0.469 0\
,0.259 0.209,0.4\
69 0.468,0.469 C\
-0.21,0.469 0,0\
.259 0,0\x22 />\x0a \
</g>\x0a \
<g\x0a \
transform=\x22trans\
late(17.5625,15.\
3062)\x22\x0a \
id=\x22g269\x22>\x0a \
<path\x0a \
id=\x22pa\
th271\x22\x0a \
style=\x22fill:\
#323232;fill-opa\
city:1;fill-rule\
:nonzero;stroke:\
none\x22\x0a \
d=\x22m 0,0 c 0,\
-0.259 -0.21,-0.\
469 -0.469,-0.46\
9 -0.259,0 -0.46\
8,0.21 -0.468,0.\
469 0,0.259 0.20\
9,0.469 0.468,0.\
469 C -0.21,0.46\
9 0,0.259 0,0\x22 /\
>\x0a </g>\x0a \
</g>\x0a </\
g>\x0a </g>\x0a <g\x0a \
style=\x22displ\
ay:none\x22\x0a tr\
ansform=\x22matrix(\
0.78385119,0,0,0\
.78385119,0.9151\
1292,0.92384553)\
\x22\x0a | |
<reponame>MITLLRacecar/racecar-parth-kocheta<filename>labs/final/grandprix_copy.py
"""
Copyright MIT and Harvey Mudd College
MIT License
Summer 2020
Grand Prix 2021
"""
########################################################################################
# Imports
########################################################################################
import sys
import cv2 as cv
import numpy as np
from simple_pid import PID
sys.path.insert(0, "../../library")
import racecar_core
import racecar_utils as rc_utils
from racecar_utils import ARMarker
from enum import IntEnum
########################################################################################
# Global variables
########################################################################################
rc = racecar_core.create_racecar()
### LINE FOLLOWING ###
BLUE_CONE = ((100,150,150), (120,255,255),"BLUE")
RED_CONE = ((170, 150, 150), (10, 255, 255), "RED")
BLUE = ((88, 245, 199), (108, 255, 255), "BLUE")
RED = ((0, 50, 50), (20, 255, 255), "RED")
ORANGE = ((7, 172, 78), (27, 192, 158), "ORANGE")
GREEN = ((40, 60, 60), (90, 255, 255), "GREEN")
WHITE = ((90, 20, 200), (115, 60, 255), "WHITE")
ORANGEMARKER = ((7, 172, 78), (27, 192, 158), "ORANGE")#
ORANGELINE = ((5, 245, 215), (25, 255, 255), "ORANGE")
PURPLEMARKER = ((121, 192, 109), (141, 212, 189), "PURPLE")#
PURPLELINE = ((125, 245, 215), (145, 255, 255), "PURPLE")
### LIDAR WINDOW ###
FRONT_WINDOW = (-10, 10)
RIGHT_FRONT_WINDOW = (40, 50)
LEFT_FRONT_WINDOW = (310, 320)
LEFT_WINDOW_LIDAR = (-135, -45)
RIGHT_WINDOW_LIDAR = (45, 135)
FRONT_WINDOW_LIDAR = (-10, 10)
BACK_WINDOW_LIDAR = (170, 190)
### CROPPED IMAGE ###
CROP_FLOOR = ((360, 0), (rc.camera.get_height(), rc.camera.get_width()))
potential_colors = [BLUE, RED, GREEN]
potential_colors_markers = [PURPLEMARKER, ORANGEMARKER]
potential_colors_lines = [PURPLELINE, ORANGELINE]
speed = 0
angle = 0
wall_follow_end = False
final_jump_end = False
elevator_end = False
canyon_end = False
orange_pillar_end = False
cone_end = False
rightLine = 0
time = 0.0
# Camera values.
contour_center = None
contour_area = 0
cur_color = None
contour_distance = 0.0
cone_counter = 0
prev_color = None
MIN_CONTOUR_AREA = 10
currentColor = None
counter = 0
finalJump = False
arColor = None
wallfollow = False
linefollow = False
order = (GREEN, RED, BLUE)
turnright= False
turnleft = False
firstLoop = True
firstTurn = True
cur_side = "START"
foundLines = False
straight = False
########################################################################################
# Functions
########################################################################################
### State Machine ###
class State(IntEnum):
greenLine = 0
wallFollow = 1
purpleLine = 2
elevator = 3
cone = 4
train = 5
orangePlate = 6
jump = 7
orangePillar = 8
nothing = 9
greenLineFast = 10
greenLineSlow = 11
curState = State.greenLineFast
### Initialization ###
def start():
"""
This function is run once every time the start button is pressed
"""
# Have the car begin at a stop
rc.drive.stop()
### State Update ###
def update():
global curState, arColorGlobal, counter, wallfollow, orange_pillar_end, speed, ar_marker
#global speed, angle, linefollow, wallfollow, counter, turnright, turnleft, order, firstLoop, firstTurn, cur_side, straight, ar_marker,
"""
After start() is run, this function is run every frame until the back button
is pressed
"""
color_image = rc.camera.get_color_image()
depth_image = rc.camera.get_depth_image()
markers = rc_utils.get_ar_markers(color_image)
ar_marker = None
#Check to see if there are any markers detected and grab the closest marker.
if len(markers) > 0:
ar_marker = markers[0]
ar_marker.detect_colors(color_image, potential_colors_markers)
arColor = ar_marker.get_color()
arColorGlobal = arColor
#Gets the car distance from marker.
if ar_marker is not None:
corners = ar_marker.get_corners()
centerX = (corners[0][0] + corners[3][0]) //2
centerY= (corners[0][1] + corners[3][1]) //2
marker_distance = depth_image[centerX][centerY]
#rc_utils.draw_circle(image, center, rc_utils.ColorBGR.yellow.value)
else:
marker_distance = None
#print(marker_distance)
#print(curState)
if ar_marker is not None and marker_distance != 0.0:
if marker_distance < 80:
if ar_marker.get_id() == 0:
if wall_follow_end is False:
curState = State.wallFollow
elif wall_follow_end is True:
curState = State.nothing
wallfollow = True
elif ar_marker.get_id() == 5:
curState = State.greenLineFast
elif ar_marker.get_id() == 6:
curState = curState = State.greenLine
elif ar_marker.get_id() == 8:
curState = State.greenLineFast
if ar_marker.get_id() == 3:
curState = State.elevator
elif ar_marker.get_id() == 7:
curState = State.greenLineFast
if marker_distance < 60 and ar_marker.get_id() == 4:
curState = State.greenLineSlow
elif ar_marker.get_id() == 1:
speed = 0.5
curState = State.purpleLine
if canyon_end == True:
curState = State.greenLine
if curState == State.greenLineFast:
followLineFast()
elif curState == State.wallFollow :
#print("wallfollowenter")
wallFollow()
if wall_follow_end is True:
curState = State.greenLine
elif curState == State.elevator:
parkInElevator()
if elevator_end is True:
curState = State.greenLine
elif curState == State.cone:
#print("doing cones")
cone()
if cone_end is True:
curState = State.greenLine
elif curState == State.greenLineSlow:
followLineSlow()
elif curState == State.purpleLine:
newCanyon(arColorGlobal)
speed = 0.75
if canyon_end == True:
curState = State.greenLine
elif curState == State.greenLine:
followLine()
elif curState == State.jump:
curState = State.greenLineFast
if wallfollow == True:
orangeColumns()
if turnright == True:
turn_right()
if turnleft == True:
turn_left()
if straight == True:
straight2()
if orange_pillar_end is True:
curState = State.greenLine
orange_pillar_end = False
print(curState)
rc.drive.set_speed_angle(speed, angle)
def update_contour(color):
"""
Finds contours in the current color image and uses them to update contour_center
and contour_area
"""
global contour_center
global contour_area
image= rc.camera.get_color_image()
if image is None:
contour_center = None
contour_area = 0
else:
image = rc_utils.crop(image, CROP_FLOOR[0], CROP_FLOOR[1])
contours = rc_utils.find_contours(image, color[0], color[1])
contour = rc_utils.get_largest_contour(contours, 1000)
if contour is not None:
contour_center = rc_utils.get_contour_center(contour)
contour_area = rc_utils.get_contour_area(contour)
rc_utils.draw_contour(image, contour)
rc_utils.draw_circle(image, contour_center)
else:
contour_center = None
contour_area = 0
#rc.display.show_color_image(image)
def followLineSlow():
global speed
global angle
update_contour(GREEN)
imgX = rc.camera.get_width()
if contour_center is not None:
angle = rc_utils.remap_range(contour_center[1],0,imgX,-1,1)
speed = 1.1
def followLine():
global speed
global angle
update_contour(GREEN)
imgX = rc.camera.get_width()
if contour_center is not None:
angle = rc_utils.remap_range(contour_center[1],0,imgX,-1,1)
speed = 1.35
def followLineFast():
global speed
global angle
update_contour(GREEN)
imgX = rc.camera.get_width()
if contour_center is not None:
angle = rc_utils.remap_range(contour_center[1],0,imgX,-1,1)
speed = 2.4
### Wall Follow ###
def wallFollow():
global speed
global angle
global wall_follow_end
global contour_center
update_contour(GREEN)
scan = rc.lidar.get_samples()
color_image = rc.camera.get_color_image()
depth_image = rc.camera.get_depth_image()
rf_angle, rf_dist = rc_utils.get_lidar_closest_point(scan, RIGHT_FRONT_WINDOW)
lf_angle, lf_dist = rc_utils.get_lidar_closest_point(scan, LEFT_FRONT_WINDOW)
front_angle, front_dist = rc_utils.get_lidar_closest_point(scan, FRONT_WINDOW)
markers = rc_utils.get_ar_markers(color_image)
ar_marker: ARMarker = None
#Check to see if there are any markers detected and grab the closest marker.
if len(markers) > 0:
ar_marker = markers[0]
if front_dist < 170:
if rf_dist > lf_dist:
dif_dist_r = rc_utils.clamp(rf_dist - lf_dist, 0, 50)
angle = rc_utils.remap_range(dif_dist_r, 0, 50, 0, 1)
elif lf_dist > rf_dist:
dif_dist_l = rc_utils.clamp(lf_dist - rf_dist, 0, 50)
angle = rc_utils.remap_range(dif_dist_l, 0, 50, 0, -1)
if contour_center is not None and ar_marker is None:
print("end")
angle = 0
speed = 1
wall_follow_end = True
else:
angle = 0
speed = 1.2
### Elevator Parking ###
def parkInElevator():
global speed, angle, curState, elevator_end
blue = ((90, 100, 100), (120, 255, 255), "blue")
red = ((170, 100, 100), (10, 255, 255), "red")
orange = ((7, 172, 78), (27, 192, 158), "orange")
potential_colors = [blue, red, orange]
color_image = rc.camera.get_color_image()
depth_image = rc.camera.get_depth_image()
markers = rc_utils.get_ar_markers(color_image)
update_contour(GREEN)
angle = 0
if len(markers) > 0:
marker = markers[0]
corners = marker.get_corners()
centerX = (corners[0][0] + corners[3][0]) //2
centerY = (corners[0][1] + corners[3][1]) //2
angle = rc_utils.remap_range(centerY, 0, rc.camera.get_width(), -1, 1)
angle = rc_utils.clamp(angle, -1, 1)
marker_distance = depth_image[centerX][centerY]
marker.detect_colors(color_image, potential_colors)
#print(marker.get_color())
if marker_distance > 200 or marker.get_color() == "blue" or marker.get_color == "orange":
speed = 1
elif marker.get_color() == "red":
speed = 0
if marker.get_id() != 3:
elevator_end = True
######################################################################################################################################
def turn_right():
global counter, angle, speed, wallfollow, turnright, cur_side
if counter < 1.35:
counter += rc.get_delta_time()
angle = 0.55
speed = 1
else:
turnright = False
wallfollow = True
cur_side = "RIGHT"
def turn_left():
global counter, angle, wallfollow, speed, cur_side, turnleft
if counter < 1.35:
counter += rc.get_delta_time()
angle = -0.55
speed = 1
else:
turnleft = False
wallfollow = True
cur_side = "LEFT"
def straight2():
global counter, angle, wallfollow, speed, straight
if counter < 2.4:
counter += rc.get_delta_time()
angle = 0
speed = 1
else:
straight = False
wallfollow = True
#####################################################################################################################################################################
def update_contour_canyon(colorList, image, crop = True):
"""
Finds contours in the current color image and uses them to update contour_center
and contour_area
"""
global contour_center
global contour_area
global currentColor
global canyon_end
if image is None:
contour_center = None
contour_area = 0
else:
#print("in update cont")
if crop:
image = rc_utils.crop(image, CROP_FLOOR[0], CROP_FLOOR[1])
#rc.display.show_color_image(image)
for color in colorList:
contours = rc_utils.find_contours(image, color[0], color[1])
if len(contours) != 0:
break
contour = rc_utils.get_largest_contour(contours, MIN_CONTOUR_AREA)
if contour is not None:
#print("found cont")
contour_center = rc_utils.get_contour_center(contour)
contour_area = rc_utils.get_contour_area(contour)
rc_utils.draw_contour(image, contour)
rc_utils.draw_circle(image, contour_center)
currentColor = color[2]
# print(f"Current color: {currentColor}")
else:
contour_center = None
contour_area = 0
#rc.display.show_color_image(image)
def newCanyon(arColor):
global speed
global angle
global rightLine
global image, defaultUncroppedImage, foundLines
global canyon_end
global contour_center
global contour_area
#print(order)
speed = -3
image = rc.camera.get_color_image()
defaultUncroppedImage = image
#update_contour_canyon([WHITE], defaultUncroppedImage, False)
#rc.display.show_color_image(image)
if | |
import logging
import os
import requests
import shutil
import tempfile
import unittest
from opusfilter import ConfigurationError
from opusfilter.filters import *
from opusfilter.util import file_download
class TestLengthFilter(unittest.TestCase):
def test_words(self):
testfilter = LengthFilter(2, 3, 'word')
cases = [['a'], ['a dog'], ['a dog went out']]
expected = [([1], False), ([2], True), ([4], False)]
results = [(x, testfilter.accept(x)) for x in testfilter.score(cases)]
for result, correct in zip(results, expected):
self.assertSequenceEqual(result, correct)
def test_chars(self):
testfilter = LengthFilter(4, 10, 'character')
cases = [['a'], ['a dog'], ['a dog went out']]
expected = [([1], False), ([5], True), ([14], False)]
results = [(x, testfilter.accept(x)) for x in testfilter.score(cases)]
for result, correct in zip(results, expected):
self.assertSequenceEqual(result, correct)
def test_chars_bilingual(self):
testfilter = LengthFilter([6, 8], [18, 15], 'char')
cases = [['table', 'pyödällä'], ['kitchen table', 'keittiöpöydällä'], ['on the kitchen table', 'keittiöpöydällä']]
expected = [([5, 8], False), ([13, 15], True), ([20, 15], False)]
results = [(x, testfilter.accept(x)) for x in testfilter.score(cases)]
for result, correct in zip(results, expected):
self.assertSequenceEqual(result, correct)
def test_mixed_bilingual(self):
testfilter = LengthFilter([2, 8], [4, 15], ['word', 'char'])
cases = [['table', 'pyödällä'], ['kitchen table', 'keittiöpöydällä'], ['on the kitchen table', 'keittiöpöydällä']]
expected = [([1, 8], False), ([2, 15], True), ([4, 15], True)]
results = [(x, testfilter.accept(x)) for x in testfilter.score(cases)]
for result, correct in zip(results, expected):
self.assertSequenceEqual(result, correct)
class TestLengthRatioFilter(unittest.TestCase):
def test_chars_bilingual(self):
testfilter = LengthRatioFilter(2, 'char')
cases = [['table', 'keittiöpyödällä'], ['kitchen table', 'keittiöpöydällä'], ['on the kitchen table', 'keittiöpöydällä']]
expected = [(len(cases[0][1]) / len(cases[0][0]), False),
(len(cases[1][1]) / len(cases[1][0]), True),
(len(cases[2][0]) / len(cases[2][1]), True)]
results = [(x, testfilter.accept(x)) for x in testfilter.score(cases)]
for result, correct in zip(results, expected):
self.assertSequenceEqual(result, correct)
def test_mixed_bilingual(self):
testfilter = LengthRatioFilter(2, ['word', 'char'])
cases = [['table', '桌'], ['table', '厨房的桌子'], ['kitchen table', '厨房的桌子'], ['on the kitchen table', '在厨房的桌子上']]
expected = [(1, True), (5, False), (2.5, False), (7 / 4, True)]
results = [(x, testfilter.accept(x)) for x in testfilter.score(cases)]
for result, correct in zip(results, expected):
self.assertSequenceEqual(result, correct)
class TestLongWordFilter(unittest.TestCase):
def test_bilingual(self):
testfilter = LongWordFilter(10)
cases = [['a bbbbb bbbbbbb', 'c d'], ['aa bbbbbbbbbb', 'c dd e'], ['a bbb aa', 'c ddddddddddd ee'], ['', '']]
expected = [([7, 1], True), ([10, 2], False), ([3, 11], False), ([0, 0], True)]
results = [(x, testfilter.accept(x)) for x in testfilter.score(cases)]
for result, correct in zip(results, expected):
self.assertSequenceEqual(result, correct)
class TestHtmlTagFilter(unittest.TestCase):
def test_bilingual(self):
testfilter = HtmlTagFilter()
cases = [['aaa bee', 'cee dee'], ['aa <br> bee', 'cee deee'],
['<p>aaa bee</p>', '<p>cee dee</p>'], ['', '']]
expected = [([False, False], True), ([True, False], False),
([True, True], False), ([False, False], True)]
results = [(x, testfilter.accept(x)) for x in testfilter.score(cases)]
for result, correct in zip(results, expected):
self.assertSequenceEqual(result, correct)
def test_broken(self):
testfilter = HtmlTagFilter()
cases = [['<aaa bee', 'cee dee'], ['aa br> bee', 'cee deee'],
['<p aaa bee</p', 'p>cee dee /p>'], ['', ''],
['<![ foo', 'foo']]
expected = [([False, False], True), ([False, False], True),
([False, False], True), ([False, False], True),
([True, False], False)]
results = [(x, testfilter.accept(x)) for x in testfilter.score(cases)]
logging.warning(results)
for result, correct in zip(results, expected):
self.assertSequenceEqual(result, correct)
class TestRegExpFilter(unittest.TestCase):
def test_bilingual(self):
testfilter = RegExpFilter(regexps=['[0-9]', 'a^'], accept_match=False)
cases = [['aaa', 'bbbb'], ['123', 'bbb'], ['hi123!!!', 'hey...'], ['', '']]
expected = [([False, False], True), ([True, False], False), ([True, False], False), ([False, False], True)]
results = [(x, testfilter.accept(x)) for x in testfilter.score(cases)]
for result, correct in zip(results, expected):
self.assertSequenceEqual(result, correct)
def test_accept_match(self):
testfilter = RegExpFilter(regexps='^[ a-z]*$', accept_match=True)
cases = [['aaa'], ['123'], ['hey...'], ['']]
expected = [([True], True), ([False], False), ([False], False), ([True], True)]
results = [(x, testfilter.accept(x)) for x in testfilter.score(cases)]
for result, correct in zip(results, expected):
self.assertSequenceEqual(result, correct)
class TestAlphabetRatioFilter(unittest.TestCase):
def test_bilingual(self):
testfilter = AlphabetRatioFilter(threshold=[0.5, 0.5])
cases = [['aaa', 'bbbb'], ['123', 'bbb'], ['hi!!!', 'hey...'], [' a ', 'b '], ['', '']]
expected = [([1, 1], True), ([0, 1], False), ([0.4, 0.5], False), ([0.25, 0.5], False), ([1, 1], True)]
results = [(x, testfilter.accept(x)) for x in testfilter.score(cases)]
for result, correct in zip(results, expected):
self.assertSequenceEqual(result, correct)
def test_exclude_whitespace(self):
testfilter = AlphabetRatioFilter(threshold=0.5, exclude_whitespace=True)
cases = [['a aa'], ['123 '], ['hi !!!'], [' ']]
expected = [([1], True), ([0], False), ([0.4], False), ([1], True)]
results = [(x, testfilter.accept(x)) for x in testfilter.score(cases)]
for result, correct in zip(results, expected):
self.assertSequenceEqual(result, correct)
class TestLongestCommonSubstringFilter(unittest.TestCase):
bi_inputs = [('abcd', 'abcd'), ('abcd', 'efgh'), ('abcd', 'cdgh'), ('abcd', ''), ('', ''),
('abcd', 'bc'), ('abcd', 'ab abcd cd'), ('abcd ', ' abcd'), ('ab cd', 'a bc d')]
tri_inputs = [('abcd', 'abcd', 'abcd'), ('abcd', 'abcd', 'efgh'), ('abcd', '', ''), ('', '', ''),
('abcd', 'abc', 'bc'), ('abcd', 'xbcd', 'xabx')]
def test_bilingual(self):
testfilter = LongestCommonSubstringFilter(threshold=0.8, require_all=True)
expected = [([1], False), ([0], True), ([0.5], True), ([0], True), ([0], True),
([1], False), ([1], False), ([0.8], False), ([0.2], True)]
results = [(x, testfilter.accept(x)) for x in testfilter.score(self.bi_inputs)]
for result, correct in zip(results, expected):
self.assertSequenceEqual(result, correct)
def test_trilingual(self):
testfilter = LongestCommonSubstringFilter(threshold=0.75, require_all=True)
expected = [([1, 1, 1], False), ([1, 0, 0], False), ([0, 0, 0], True), ([0, 0, 0], True),
([1, 1, 1], False), ([0.75, 0.5, 0.25], False)]
results = [(x, testfilter.accept(x)) for x in testfilter.score(self.tri_inputs)]
for result, correct in zip(results, expected):
self.assertSequenceEqual(result, correct)
def test_trilingual_any(self):
testfilter = LongestCommonSubstringFilter(threshold=0.75, require_all=False)
expected = [([1, 1, 1], False), ([1, 0, 0], True), ([0, 0, 0], True), ([0, 0, 0], True),
([1, 1, 1], False), ([0.75, 0.5, 0.25], True)]
results = [(x, testfilter.accept(x)) for x in testfilter.score(self.tri_inputs)]
for result, correct in zip(results, expected):
self.assertSequenceEqual(result, correct)
class TestSimilarityFilter(unittest.TestCase):
bi_inputs = [
('abcd', 'abcd'),
('abcd', 'efgh'),
('abcd', 'ABCD'),
('big hat.', 'big hat.'),
('big hat.', 'Big Hat.'),
('big hat.', 'pig cat.'),
('big hat.', 'hat big.'),
]
tri_inputs = [
('abcd', 'abcd', 'abcd'),
('abcd', 'abcd', 'efgh'),
('abcd', 'efgh', 'ijkl')
]
def test_bilingual(self):
testfilter = SimilarityFilter(threshold=0.7)
expected = [([1], False), ([0], True), ([0.0], True),
([1], False), ([0.75], False), ([0.75], False), ([0.25], True)]
results = [(x, testfilter.accept(x)) for x in testfilter.score(self.bi_inputs)]
for result, correct in zip(results, expected):
self.assertSequenceEqual(result, correct)
def test_bilingual_lowercase(self):
testfilter = SimilarityFilter(threshold=0.7, lowercase=True)
expected = [([1], False), ([0], True), ([1.0], False),
([1], False), ([1], False), ([0.75], False), ([0.25], True)]
results = [(x, testfilter.accept(x)) for x in testfilter.score(self.bi_inputs)]
for result, correct in zip(results, expected):
self.assertSequenceEqual(result, correct)
def test_bilingual_word(self):
testfilter = SimilarityFilter(threshold=0.7, unit='word')
expected = [([1], False), ([0], True), ([0], True),
([1], False), ([0], True), ([0], True), ([0], True)]
results = [(x, testfilter.accept(x)) for x in testfilter.score(self.bi_inputs)]
for result, correct in zip(results, expected):
self.assertSequenceEqual(result, correct)
def test_bilingual_word_lowercase(self):
testfilter = SimilarityFilter(threshold=0.7, unit='word', lowercase=True)
expected = [([1], False), ([0], True), ([1], False),
([1], False), ([1], False), ([0], True), ([0], True)]
results = [(x, testfilter.accept(x)) for x in testfilter.score(self.bi_inputs)]
for result, correct in zip(results, expected):
self.assertSequenceEqual(result, correct)
def test_trilingual(self):
testfilter = LongestCommonSubstringFilter(threshold=0.7, require_all=True)
expected = [([1, 1, 1], False), ([1, 0, 0], False), ([0, 0, 0], True)]
results = [(x, testfilter.accept(x)) for x in testfilter.score(self.tri_inputs)]
for result, correct in zip(results, expected):
self.assertSequenceEqual(result, correct)
def test_trilingual_any(self):
testfilter = LongestCommonSubstringFilter(threshold=0.7, require_all=False)
expected = [([1, 1, 1], False), ([1, 0, 0], True), ([0, 0, 0], True)]
results = [(x, testfilter.accept(x)) for x in testfilter.score(self.tri_inputs)]
for result, correct in zip(results, expected):
self.assertSequenceEqual(result, correct)
class TestLangIDMethod(unittest.TestCase):
pairs_inputs = [
("This sentence is in english", "Je suis une phrase en français"),
("<NAME>", "je m'appelle Bernard")
]
class TestLangId(TestLangIDMethod):
def test_accept(self):
model = LanguageIDFilter(
languages=['en', 'fr'], id_method='langid', thresholds=[0.8, 0.99])
pair_scores = model.score(self.pairs_inputs)
pair_expecteds = [True, False]
for pair_score, pair_expected in zip(pair_scores, pair_expecteds):
self.assertEqual(model.accept(pair_score), pair_expected)
def test_accept_with_set_languages(self):
model = LanguageIDFilter(
languages=['en', 'fr'], id_method='langid', thresholds=[0.8, 0.99],
langid_languages=['fr', 'de'])
pair_scores = model.score(self.pairs_inputs)
pair_expecteds = [False, False]
for pair_score, pair_expected in zip(pair_scores, pair_expecteds):
self.assertEqual(model.accept(pair_score), pair_expected)
class TestCLD2(TestLangIDMethod):
pairs_inputs = [
("This sentence is in english", "Je suis une phrase en français"),
("<NAME>", "<NAME>"),
("english sentence", "phrase français")
]
def test_accept(self):
model = LanguageIDFilter(
languages=['en', 'fr'], id_method='cld2', thresholds=[0.9, 0.9])
pair_scores = model.score(self.pairs_inputs)
pair_expecteds = [True, False, False]
for pair_score, pair_expected in zip(pair_scores, pair_expecteds):
self.assertEqual(model.accept(pair_score), pair_expected)
def test_accept_with_options(self):
model = LanguageIDFilter(
languages=['en', 'fr'], id_method='cld2', thresholds=[0.9, 0.9],
cld2_options={'bestEffort': True})
pair_scores = model.score(self.pairs_inputs)
pair_expecteds = [True, False, True]
for pair_score, pair_expected in zip(pair_scores, pair_expecteds):
logging.info('%s %s', pair_score, pair_expected)
self.assertEqual(model.accept(pair_score), pair_expected)
class TestFasttext(TestLangIDMethod):
fasttext_inputs = ["This sentence is in english", "Je suis une phrase en français"]
model_url = 'https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.ftz'
@classmethod
def setUpClass(self):
self.tempdir = tempfile.mkdtemp()
self.testmodel = os.path.join(self.tempdir, 'model.ftz')
try:
file_download(self.model_url, self.testmodel)
except | |
ERROR: type should be string, got "https://raw.githubusercontent.com/selva86/datasets/master/wwwusage.csv\n# Import data\ndf = pd.read_csv('../data/wwwusage.csv', names=['value'], header=0)\nplt.figure(figsize=(15, 2))\nplt.plot(df)\nplt.title('Original Series')\nplt.xlabel('Time')\nplt.ylabel('Value')\nplt.show()\n\nInitial eyeballing shows that there is a trend for this time series and is non-stationary. Checking using ADF:\n\nresult = adfuller(df.value.dropna())\nprint('ADF Statistic: %f' % result[0])\nprint('p-value: %f' % result[1])\n\nThe null hypothesis of the ADF test is that the time series is non-stationary. So, if the p-value of the test is less than the significance level (0.05) then you reject the null hypothesis and infer that the time series is indeed stationary. For our example, we fail to reject the null hypothesis.\n\nNext we difference our time series and check the results of the ADF test. We will also look at the ACF.\n\nplt.rcParams.update({'figure.figsize':(15,8), 'figure.dpi':120})\n\n# Original Series\nfig, axes = plt.subplots(3, 2, sharex=True)\naxes[0, 0].plot(df.value); axes[0, 0].set_title('Original Series')\nplot_acf(df.value, ax=axes[0, 1])\n\n# 1st Differencing\naxes[1, 0].plot(df.value.diff()); axes[1, 0].set_title('1st Order Differencing')\nplot_acf(df.value.diff().dropna(), ax=axes[1, 1])\n\n# 2nd Differencing\naxes[2, 0].plot(df.value.diff().diff()); axes[2, 0].set_title('2nd Order Differencing')\nplot_acf(df.value.diff().diff().dropna(), ax=axes[2, 1])\n\nplt.show()\n\nprint('ADF Statistic for 1st Order Differencing')\nresult = adfuller(df.value.diff().dropna())\nprint('ADF Statistic: %f' % result[0])\nprint('p-value: %f' % result[1])\nprint('Critical Values:')\nfor key, value in result[4].items():\n print('\\t%s: %.3f' % (key, value))\n\nprint('\\n ADF Statistic for 2nd Order Differencing')\nresult = adfuller(df.value.diff().diff().dropna())\nprint('ADF Statistic: %f' % result[0])\nprint('p-value: %f' % result[1])\nprint('Critical Values:')\nfor key, value in result[4].items():\n print('\\t%s: %.3f' % (key, value))\n\nGiven the results of our ACF and ADF, we can see that our time series reachees stationarity after two orders of differencing. However, the ACF of the 2nd order differencing goes into the negative zone fairly quick. This can indicates that the series might have been over differenced. It is now up to us if we want consider the first or second order differencing for our ARIMA models.\n\n#### Finding the order of the AutoRegressive term *p*\n\nAs we have discussed previously, we can look at the PACF plot to determine the lag for our AR terms. Partial autocorrelation can be imagined as the correlation between the series and its lag, after excluding the contributions from the intermediate lags. So, PACF sort of conveys the pure correlation between a lag and the series. That way, we will know if that lag is needed in the AR term or not.\n\n# PACF plot of 1st differenced series\nplt.rcParams.update({'figure.figsize':(15,2.5), 'figure.dpi':120})\n\nfig, axes = plt.subplots(1, 2, sharex=True)\naxes[0].plot(df.value.diff()); axes[0].set_title('1st Differencing')\naxes[1].set(ylim=(0,5))\nplot_pacf(df.value.diff().dropna(), ax=axes[1])\n\nplt.show()\n\nImmediately, we can observe that our PACF returns sigificance at Lag 1 and Lag 2, meaning it crosses the significance limit (blue region). We can also observe significance at higher order terms but note that given the amount of lag that we are testing, it is statistically probable to see random spikes in our PACF and ACF plots. *Although this can also be attributed to Seasonality which will be tackled separately.*\n\nWith this, we can now decide to use $p = 2$ for our ARIMA model.\n\n#### Finding the order of the Moving Average term *q*\n\nSimillar to how we determined $p$, we will now look at the ACF to determine the $q$ terms to be considered for our MA. The ACF tells how many MA terms are required to remove any autocorrelation in the stationary series.\n\n# ACF plot of 1st differenced series\nplt.rcParams.update({'figure.figsize':(15,4), 'figure.dpi':120})\n\nfig, axes = plt.subplots(1, 2, sharex=True)\naxes[0].plot(df.value.diff()); axes[0].set_title('1st Differencing')\naxes[1].set(ylim=(0,1))\nplot_acf(df.value.diff().dropna(), ax=axes[1])\n\nplt.show()\n\nOur results for the ACF is not as apparent compared to our PCF. We can observed several ACF terms that is above our significance level. This may be attritbuted to the fact that our model has a weak stationarity. This may also be caused by the fact that our time series is not perfectly MA and is an ARIMA model. For now, let's consider $q = 3$.\n\n## Building the ARIMA model\nNow that we’ve determined the values of p, d and q, we have everything needed to fit the ARIMA model. Let's implement using this dataset first before we move on to a deeper look at the implementation of ARIMA in the next section. Let’s use the ARIMA() implementation in statsmodels package. As computed, we will use ARIMA(2,1,3):\n\nmodel = ARIMA(df.value, order=(2,1,3))\nmodel_fit = model.fit(disp=0)\nprint(model_fit.summary())\n\nThere's quite a bit of information to unpack from the summary. Generally, we are interested in the Akaike’s Information Criterion (AIC), coefficients of our AR and MA terms (coef_), and the p-values of the terms (P>|z|). We need the p-values to be less than 0.05 to be significant, which means that our model failed to reach significance for the AR and MA terms. Let's try to be conservative and use small values for p and d, i.e. ARIMA(1,1,1), as given by the p-values of AR1 and MA1 in our results.\n\nmodel = ARIMA(df.value, order=(1,1,1))\nmodel_fit = model.fit(disp=0)\nprint(model_fit.summary())\n\nImmediately, we can see that the p-values is now <<0.05 for both the AR and MA terms and is highly significant. We will now check the residuals of our time-series to ensure that we will not see patterns and will have a constant mean and variance.\n\n# Plot residual errors\nresiduals = pd.DataFrame(model_fit.resid)\nfig, ax = plt.subplots(1,2, figsize=(15,2.5))\nresiduals.plot(title=\"Residuals\", ax=ax[0])\nresiduals.plot(kind='kde', title='Density', ax=ax[1])\nplt.show()\n\nThe residuals is a good final check for our ARIMA models. Ideally, the residual errors should be a Gaussian with a zero mean and uniform variance. With this, we can now proceed with fitting our initial time series with our model.\n\n# Actual vs Fitted\nfig, ax = plt.subplots(figsize=(15,2))\nax = df.plot(ax=ax)\nfig = model_fit.plot_predict(85, 100, dynamic=False, ax=ax, plot_insample=False)\nplt.show()\n\nWhen we set dynamic=False the in-sample lagged values are used for prediction. That is, the model gets trained up until the previous value to make the next prediction. We can also call this as a **walk-forward** or **one-step ahead** prediction.\n\nHowever, we should note two things:\n1. We used the entire time series to train our model\n2. Some of the use-cases that we will be tackling will require us to forecast t-steps ahead\n\nWe will be solving the first point in the next section by tackling real-world datasets. The second point can be solved immediately using dynamic=True argument shown below.\n\n# Actual vs Fitted\nfig, ax = plt.subplots(figsize=(15,2))\nax = df.plot(ax=ax)\nfig = model_fit.plot_predict(90, 110, dynamic=True, ax=ax, plot_insample=False)\nplt.show()\n\nNotice that our confidence interval is now increasing as we go farther our given data set since we will be compounding errors given by out forecasted values. Also note that generally, our ARIMA model captured the direction of the trend of our time series but is consistently lower than the actual values. We can correct this by varying the parameters of our ARIMA models and validating using performance metrics. \n\n## Implementation and Forecasting using ARIMA\n\n\nGiven the steps discussed in the first three sections of this notebook, we can now create a framework of how we can approach ARIMA models in general. We can use a modified version of the Hyndman-Khandakar algorithm (Hyndman & Khandakar, 2008), which combines unit root tests, minimisation of the AIC and MLE to obtain an ARIMA model. The steps are outlined below: \n\n1. The differencing term d is determined using repeated ADF tests.\n2. The values of p and q are chosen based on the AIC, ACF, and PACF of our differenced time series.\n3. Use a step-wise traversal of our parameter space (+-1) for p, d, and q to find a lower AIC. *We can use insights and heuristics to observe our ACF and PACF to determine if we need to add or reduce our p, d, and q.*\n4. Check the residuals for Gaussian Distribution to establish stationarity.\n\nOr in our case, we will instead use a grid-search algorithm to find automatically configure our ARIMA and find our hyperparameters. We will search values of p, d, and q for combinations (skipping those that fail to converge), and find the combination that results in the best performance on the test set. We use grid search to explore all combinations in a subset of integer values.\n\nSpecifically, we will search all combinations of the following parameters:\n\np: 0 to 4. d: 0 to 3. q: 0 to 4. This is (4 * 3 * 4), or 48, potential runs of the test harness and will take some time to execute.\n\nThe complete worked example with the grid search version of the" | |
False means that AzureML will create a python environment based on the Conda dependencies specification.
"""
return pulumi.get(self, "user_managed_dependencies")
@pulumi.output_type
class ModelEnvironmentDefinitionResponseResponseR(dict):
"""
Settings for a R environment.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "bioConductorPackages":
suggest = "bio_conductor_packages"
elif key == "cranPackages":
suggest = "cran_packages"
elif key == "customUrlPackages":
suggest = "custom_url_packages"
elif key == "gitHubPackages":
suggest = "git_hub_packages"
elif key == "rVersion":
suggest = "r_version"
elif key == "rscriptPath":
suggest = "rscript_path"
elif key == "snapshotDate":
suggest = "snapshot_date"
elif key == "userManaged":
suggest = "user_managed"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ModelEnvironmentDefinitionResponseResponseR. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ModelEnvironmentDefinitionResponseResponseR.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ModelEnvironmentDefinitionResponseResponseR.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
bio_conductor_packages: Optional[Sequence[str]] = None,
cran_packages: Optional[Sequence['outputs.RCranPackageResponse']] = None,
custom_url_packages: Optional[Sequence[str]] = None,
git_hub_packages: Optional[Sequence['outputs.RGitHubPackageResponseResponse']] = None,
r_version: Optional[str] = None,
rscript_path: Optional[str] = None,
snapshot_date: Optional[str] = None,
user_managed: Optional[bool] = None):
"""
Settings for a R environment.
:param Sequence[str] bio_conductor_packages: The packages from Bioconductor.
:param Sequence['RCranPackageResponse'] cran_packages: The CRAN packages to use.
:param Sequence[str] custom_url_packages: The packages from custom urls.
:param Sequence['RGitHubPackageResponseResponse'] git_hub_packages: The packages directly from GitHub.
:param str r_version: The version of R to be installed
:param str rscript_path: The Rscript path to use if an environment build is not required.
The path specified gets used to call the user script.
:param str snapshot_date: Date of MRAN snapshot to use in YYYY-MM-DD format, e.g. "2019-04-17"
:param bool user_managed: Indicates whether the environment is managed by user or by AzureML.
"""
if bio_conductor_packages is not None:
pulumi.set(__self__, "bio_conductor_packages", bio_conductor_packages)
if cran_packages is not None:
pulumi.set(__self__, "cran_packages", cran_packages)
if custom_url_packages is not None:
pulumi.set(__self__, "custom_url_packages", custom_url_packages)
if git_hub_packages is not None:
pulumi.set(__self__, "git_hub_packages", git_hub_packages)
if r_version is not None:
pulumi.set(__self__, "r_version", r_version)
if rscript_path is not None:
pulumi.set(__self__, "rscript_path", rscript_path)
if snapshot_date is not None:
pulumi.set(__self__, "snapshot_date", snapshot_date)
if user_managed is not None:
pulumi.set(__self__, "user_managed", user_managed)
@property
@pulumi.getter(name="bioConductorPackages")
def bio_conductor_packages(self) -> Optional[Sequence[str]]:
"""
The packages from Bioconductor.
"""
return pulumi.get(self, "bio_conductor_packages")
@property
@pulumi.getter(name="cranPackages")
def cran_packages(self) -> Optional[Sequence['outputs.RCranPackageResponse']]:
"""
The CRAN packages to use.
"""
return pulumi.get(self, "cran_packages")
@property
@pulumi.getter(name="customUrlPackages")
def custom_url_packages(self) -> Optional[Sequence[str]]:
"""
The packages from custom urls.
"""
return pulumi.get(self, "custom_url_packages")
@property
@pulumi.getter(name="gitHubPackages")
def git_hub_packages(self) -> Optional[Sequence['outputs.RGitHubPackageResponseResponse']]:
"""
The packages directly from GitHub.
"""
return pulumi.get(self, "git_hub_packages")
@property
@pulumi.getter(name="rVersion")
def r_version(self) -> Optional[str]:
"""
The version of R to be installed
"""
return pulumi.get(self, "r_version")
@property
@pulumi.getter(name="rscriptPath")
def rscript_path(self) -> Optional[str]:
"""
The Rscript path to use if an environment build is not required.
The path specified gets used to call the user script.
"""
return pulumi.get(self, "rscript_path")
@property
@pulumi.getter(name="snapshotDate")
def snapshot_date(self) -> Optional[str]:
"""
Date of MRAN snapshot to use in YYYY-MM-DD format, e.g. "2019-04-17"
"""
return pulumi.get(self, "snapshot_date")
@property
@pulumi.getter(name="userManaged")
def user_managed(self) -> Optional[bool]:
"""
Indicates whether the environment is managed by user or by AzureML.
"""
return pulumi.get(self, "user_managed")
@pulumi.output_type
class ModelEnvironmentDefinitionResponseResponseSpark(dict):
"""
The configuration for a Spark environment.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "precachePackages":
suggest = "precache_packages"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ModelEnvironmentDefinitionResponseResponseSpark. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ModelEnvironmentDefinitionResponseResponseSpark.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ModelEnvironmentDefinitionResponseResponseSpark.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
packages: Optional[Sequence['outputs.SparkMavenPackageResponse']] = None,
precache_packages: Optional[bool] = None,
repositories: Optional[Sequence[str]] = None):
"""
The configuration for a Spark environment.
:param Sequence['SparkMavenPackageResponse'] packages: The Spark packages to use.
:param bool precache_packages: Whether to precache the packages.
:param Sequence[str] repositories: The list of spark repositories.
"""
if packages is not None:
pulumi.set(__self__, "packages", packages)
if precache_packages is not None:
pulumi.set(__self__, "precache_packages", precache_packages)
if repositories is not None:
pulumi.set(__self__, "repositories", repositories)
@property
@pulumi.getter
def packages(self) -> Optional[Sequence['outputs.SparkMavenPackageResponse']]:
"""
The Spark packages to use.
"""
return pulumi.get(self, "packages")
@property
@pulumi.getter(name="precachePackages")
def precache_packages(self) -> Optional[bool]:
"""
Whether to precache the packages.
"""
return pulumi.get(self, "precache_packages")
@property
@pulumi.getter
def repositories(self) -> Optional[Sequence[str]]:
"""
The list of spark repositories.
"""
return pulumi.get(self, "repositories")
@pulumi.output_type
class ModelResponse(dict):
"""
An Azure Machine Learning Model.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "mimeType":
suggest = "mime_type"
elif key == "createdTime":
suggest = "created_time"
elif key == "derivedModelIds":
suggest = "derived_model_ids"
elif key == "experimentName":
suggest = "experiment_name"
elif key == "frameworkVersion":
suggest = "framework_version"
elif key == "kvTags":
suggest = "kv_tags"
elif key == "modifiedTime":
suggest = "modified_time"
elif key == "parentModelId":
suggest = "parent_model_id"
elif key == "resourceRequirements":
suggest = "resource_requirements"
elif key == "runId":
suggest = "run_id"
elif key == "sampleInputData":
suggest = "sample_input_data"
elif key == "sampleOutputData":
suggest = "sample_output_data"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ModelResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ModelResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ModelResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
mime_type: str,
name: str,
url: str,
created_time: Optional[str] = None,
datasets: Optional[Sequence['outputs.DatasetReferenceResponse']] = None,
derived_model_ids: Optional[Sequence[str]] = None,
description: Optional[str] = None,
experiment_name: Optional[str] = None,
framework: Optional[str] = None,
framework_version: Optional[str] = None,
id: Optional[str] = None,
kv_tags: Optional[Mapping[str, str]] = None,
modified_time: Optional[str] = None,
parent_model_id: Optional[str] = None,
properties: Optional[Mapping[str, str]] = None,
resource_requirements: Optional['outputs.ContainerResourceRequirementsResponse'] = None,
run_id: Optional[str] = None,
sample_input_data: Optional[str] = None,
sample_output_data: Optional[str] = None,
unpack: Optional[bool] = None,
version: Optional[float] = None):
"""
An Azure Machine Learning Model.
:param str mime_type: The MIME type of Model content. For more details about MIME type, please open https://www.iana.org/assignments/media-types/media-types.xhtml
:param str name: The Model name.
:param str url: The URL of the Model. Usually a SAS URL.
:param str created_time: The Model creation time (UTC).
:param Sequence['DatasetReferenceResponse'] datasets: The list of datasets associated with the model.
:param Sequence[str] derived_model_ids: Models derived from this model
:param str description: The Model description text.
:param str experiment_name: The name of the experiment where this model was created.
:param str framework: The Model framework.
:param str framework_version: The Model framework version.
:param str id: The Model Id.
:param Mapping[str, str] kv_tags: The Model tag dictionary. Items are mutable.
:param str modified_time: The Model last modified time (UTC).
:param str parent_model_id: The Parent Model Id.
:param Mapping[str, str] properties: The Model property dictionary. Properties are immutable.
:param 'ContainerResourceRequirementsResponse' resource_requirements: Resource requirements for the model
:param str run_id: The RunId that created this model.
:param str sample_input_data: Sample Input Data for the Model. A reference to a dataset in the workspace in the format aml://dataset/{datasetId}
:param str sample_output_data: Sample Output Data for the Model. A reference to a dataset in the workspace in the format aml://dataset/{datasetId}
:param bool unpack: Indicates whether we need to unpack the Model during docker Image creation.
:param float version: The Model version assigned by Model Management Service.
"""
pulumi.set(__self__, "mime_type", mime_type)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "url", url)
if created_time is not None:
pulumi.set(__self__, "created_time", created_time)
if datasets is not None:
pulumi.set(__self__, "datasets", datasets)
if derived_model_ids is not None:
pulumi.set(__self__, "derived_model_ids", derived_model_ids)
if description is not None:
pulumi.set(__self__, "description", description)
if experiment_name is not None:
pulumi.set(__self__, "experiment_name", experiment_name)
if framework is not None:
pulumi.set(__self__, "framework", framework)
if framework_version is not None:
pulumi.set(__self__, "framework_version", framework_version)
if id is not None:
pulumi.set(__self__, "id", id)
if kv_tags is not None:
pulumi.set(__self__, "kv_tags", kv_tags)
if modified_time is not None:
pulumi.set(__self__, "modified_time", modified_time)
if parent_model_id is not None:
pulumi.set(__self__, "parent_model_id", parent_model_id)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if resource_requirements is not None:
pulumi.set(__self__, "resource_requirements", resource_requirements)
if run_id is not None:
pulumi.set(__self__, "run_id", run_id)
if sample_input_data is not None:
pulumi.set(__self__, "sample_input_data", sample_input_data)
if sample_output_data is not None:
pulumi.set(__self__, "sample_output_data", sample_output_data)
if unpack is not None:
pulumi.set(__self__, "unpack", unpack)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="mimeType")
def mime_type(self) -> str:
| |
if fastq_gzipped:
cmd = [['gzip', '-d', '-c', os.path.basename(fastq_path)]]
else:
cmd = [['cat', os.path.basename(fastq_path)]]
cmd.append(['split', '-l', str(chunk_lines),
'--filter=pigz -p {} > $FILE.fq.gz'.format(max(1, int(context.config.fq_split_cores) - 1)),
'-', '{}-chunk.'.format(fastq_name)])
context.runner.call(job, cmd, work_dir = work_dir, tool_name='pigz')
fastq_chunk_ids = []
for chunk_name in sorted(os.listdir(work_dir)):
if chunk_name.endswith('.fq.gz') and chunk_name.startswith('{}-chunk'.format(fastq_name)):
fastq_chunk_ids.append(context.write_intermediate_file(job, os.path.join(work_dir, chunk_name)))
end_time = timeit.default_timer()
run_time = end_time - start_time
RealtimeLogger.info("Split fastq into {} chunks. Process took {} seconds.".format(len(fastq_chunk_ids), run_time))
return fastq_chunk_ids
def run_split_gam_reads(job, context, gam_input_reads, gam_reads_file_id):
""" split up an input reads file in GAM format
"""
RealtimeLogger.info("Starting gam split")
start_time = timeit.default_timer()
# Define work directory for docker calls
work_dir = job.fileStore.getLocalTempDir()
# We need the sample fastq for alignment
gam_path = os.path.join(work_dir, os.path.basename(gam_input_reads))
job.fileStore.readGlobalFile(gam_reads_file_id, gam_path)
# Split up the gam into chunks
# Make sure chunk size even in case paired interleaved
chunk_size = context.config.reads_per_chunk
if chunk_size % 2 != 0:
chunk_size += 1
cmd = ['vg', 'chunk', '-a', os.path.basename(gam_path), '--gam-split-size', str(chunk_size),
'--prefix', 'gam_reads_chunk']
context.runner.call(job, cmd, work_dir = work_dir)
gam_chunk_ids = []
for chunk_name in os.listdir(work_dir):
if chunk_name.endswith('.gam') and chunk_name.startswith('gam_reads_chunk'):
gam_chunk_ids.append(context.write_intermediate_file(job, os.path.join(work_dir, chunk_name)))
end_time = timeit.default_timer()
run_time = end_time - start_time
RealtimeLogger.info("Split gam into {} chunks. Process took {} seconds.".format(len(gam_chunk_ids), run_time))
return gam_chunk_ids
def run_split_bam_reads(job, context, bam_input_reads, bam_reads_file_id):
""" split up an input reads file in BAM format
"""
RealtimeLogger.info("Starting bam split")
start_time = timeit.default_timer()
# Define work directory for docker calls
work_dir = job.fileStore.getLocalTempDir()
# We need the sample fastq for alignment
bam_path = os.path.join(work_dir, os.path.basename(bam_input_reads))
job.fileStore.readGlobalFile(bam_reads_file_id, bam_path)
# Split up the bam into chunks
# Make sure chunk size even in case paired interleaved
chunk_size = context.config.reads_per_chunk
if chunk_size % 2 != 0:
chunk_size += 1
# 1 line per read
chunk_lines = chunk_size * 1
cmd = [['samtools', 'view', os.path.basename(bam_path)]]
cmd.append(['split', '-l', str(chunk_lines),
'--filter=bash -c \'cat <(samtools view -H {}) <(cat -)'.format(os.path.basename(bam_path)) +
' | samtools view -O BAM --threads {} -'.format(max(1, int(context.config.fq_split_cores) - 1)) +
' > $FILE.bam\'', '-', 'bam_reads_chunk.'])
context.runner.call(job, cmd, work_dir = work_dir)
bam_chunk_ids = []
for chunk_name in sorted(os.listdir(work_dir)):
if chunk_name.endswith('.bam') and chunk_name.startswith('bam_reads_chunk'):
bam_chunk_ids.append(context.write_intermediate_file(job, os.path.join(work_dir, chunk_name)))
end_time = timeit.default_timer()
run_time = end_time - start_time
RealtimeLogger.info("Split bam into {} chunks. Process took {} seconds.".format(len(bam_chunk_ids), run_time))
return bam_chunk_ids
def run_whole_alignment(job, context, fastq, gam_input_reads, bam_input_reads, sample_name, interleaved, mapper,
indexes, reads_chunk_ids,
bam_output=False, surject=False, gbwt_penalty=None, validate=False):
"""
align all fastq chunks in parallel
Takes a dict from index type to index file ID. Some indexes are extra and
specifying them will change mapping behavior.
Returns a list of per-contig GAMs, the total allignment runtime, and a list
of per-contig BAM file IDs (which is only nonempty when surject is true).
"""
# this will be a list of lists.
# gam_chunk_file_ids[i][j], will correspond to the jth path (from id_ranges)
# for the ith gam chunk (generated from fastq shard i)
gam_chunk_file_ids = []
gam_chunk_running_times = []
# depending on bam_output and surject options, we can make bam_output too
bam_chunk_file_ids = []
# to encapsulate everything under this job
child_job = Job()
job.addChild(child_job)
for chunk_id, chunk_filename_ids in enumerate(zip(*reads_chunk_ids)):
#Run graph alignment on each fastq chunk
chunk_alignment_job = child_job.addChildJobFn(run_chunk_alignment, context, gam_input_reads, bam_input_reads,
sample_name,
interleaved, mapper, chunk_filename_ids, chunk_id,
indexes,
bam_output=bam_output,
gbwt_penalty=gbwt_penalty,
validate=validate,
cores=context.config.alignment_cores, memory=context.config.alignment_mem,
disk=context.config.alignment_disk)
if not bam_output:
gam_chunk_file_ids.append(chunk_alignment_job.rv(0))
else:
bam_chunk_file_ids.append(chunk_alignment_job.rv(0))
gam_chunk_running_times.append(chunk_alignment_job.rv(1))
if not bam_output:
merge_gams_job = child_job.addFollowOnJobFn(run_merge_gams, context, sample_name, indexes.get('id_ranges'), gam_chunk_file_ids,
gam_chunk_running_times,
cores=context.config.misc_cores,
memory=context.config.misc_mem, disk=context.config.misc_disk)
gam_chrom_ids = merge_gams_job.rv(0)
gam_chunk_time = merge_gams_job.rv(1)
bam_chrom_ids = []
else:
gam_chrom_ids = []
gam_chunk_time = None
merge_bams_job = child_job.addFollowOnJobFn(run_merge_bams, context, sample_name, bam_chunk_file_ids)
bam_chrom_ids = [merge_bams_job.rv()]
if surject:
interleaved_surject = interleaved or (fastq and len(fastq) == 2)
zip_job = child_job.addFollowOnJobFn(run_zip_surject_input, context, gam_chunk_file_ids)
xg_id = indexes['xg-surject'] if 'xg-surject' in indexes else indexes['xg']
bam_chrom_ids = [zip_job.addFollowOnJobFn(run_whole_surject, context, zip_job.rv(), sample_name + '-surject',
interleaved_surject, xg_id, []).rv()]
return gam_chrom_ids, gam_chunk_time, bam_chrom_ids
def run_zip_surject_input(job, context, gam_chunk_file_ids):
"""
run_whole_surject takes input in different format than what we have above, so we shuffle the
promised lists around here to avoid a (probably-needed) refactor of the existing interface
"""
return list(zip(*gam_chunk_file_ids))
def run_chunk_alignment(job, context, gam_input_reads, bam_input_reads, sample_name, interleaved, mapper,
chunk_filename_ids, chunk_id, indexes,
bam_output=False, gbwt_penalty=None, always_check_population=True, validate=False):
"""
Align a chunk of reads.
Takes a dict from index type to index file ID. Some indexes are extra and
specifying them will change mapping behavior.
"""
RealtimeLogger.info("Starting {} alignment on {} chunk {}".format(mapper, sample_name, chunk_id))
# How long did the alignment take to run, in seconds?
run_time = None
# Define work directory for docker calls
work_dir = job.fileStore.getLocalTempDir()
# Download local input files from the remote storage container
graph_file = os.path.join(work_dir, "graph.vg")
# Work out what index files we need
index_files = {}
index_files['xg'] = graph_file + ".xg"
if mapper == 'map' or mapper == 'mpmap':
index_files['gcsa'] = graph_file + ".gcsa"
index_files['lcp'] = index_files['gcsa'] + ".lcp"
if 'gbwt' in indexes:
# We have a GBWT haplotype index available.
index_files['gbwt'] = graph_file + ".gbwt"
if mapper == 'mpmap':
if 'snarls' in indexes:
# mpmap knows how to use the snarls, and we have them, so we should use them
# Note that passing them will affect mapping, if using multiple
# tracebacks. Since we only run single path mode, if multiple
# tracebacks aren't used, mpmap will ignore the snarls.
index_files['snarls'] = graph_file + ".snarls"
if mapper == 'gaffe':
index_files['minimizer'] = graph_file + ".min"
index_files['distance'] = graph_file + ".dist"
index_files['gbwt'] = graph_file + ".gbwt"
for index_type in list(index_files.keys()):
# Download each index file
job.fileStore.readGlobalFile(indexes[index_type], index_files[index_type])
# We need the sample reads (fastq(s) or gam) for alignment
reads_files = []
reads_ext = 'gam' if gam_input_reads else 'bam' if bam_input_reads else 'fq.gz'
for j, chunk_filename_id in enumerate(chunk_filename_ids):
reads_file = os.path.join(work_dir, 'reads_chunk_{}_{}.{}'.format(chunk_id, j, reads_ext))
job.fileStore.readGlobalFile(chunk_filename_id, reads_file)
reads_files.append(reads_file)
# And a temp file for our aligner output
output_file = os.path.join(work_dir, "{}_{}.gam".format(sample_name, chunk_id))
# Open the file stream for writing
with open(output_file, 'wb') as alignment_file:
# Start the aligner and have it write to the file
# Plan out what to run
vg_parts = []
if mapper == 'mpmap':
vg_parts += ['vg', 'mpmap']
vg_parts += context.config.mpmap_opts
if '-S' not in vg_parts and '--single-path-mode' not in vg_parts:
RealtimeLogger.warning('Adding --single-path-mode to mpmap options as only GAM output supported')
vg_parts += ['--single-path-mode']
elif mapper == 'map':
vg_parts += ['vg', 'map']
vg_parts += context.config.map_opts
elif mapper == 'gaffe':
vg_parts += ['vg', 'gaffe']
vg_parts += context.config.gaffe_opts
else:
raise RuntimeError('Unimplemented mapper "{}"'.format(mapper))
for reads_file in reads_files:
input_flag = '-G' if gam_input_reads else '-b' if bam_input_reads else '-f'
vg_parts += [input_flag, os.path.basename(reads_file)]
vg_parts += ['-t', str(context.config.alignment_cores)]
# Override the -i flag in args with the --interleaved command-line flag
if interleaved is True and '-i' not in vg_parts and '--interleaved' not in vg_parts:
vg_parts += ['-i']
elif interleaved is False and 'i' in vg_parts:
del vg_parts[vg_parts.index('-i')]
if interleaved is False and '--interleaved' in vg_parts:
del vg_parts[vg_parts.index('--interleaved')]
# Override the --surject-to option
if bam_output is True and '--surject-to' not in vg_parts:
vg_parts += ['--surject-to', 'bam']
elif bam_output is False and '--surject-to' in vg_parts:
sidx = vg_parts.index('--surject-to')
del vg_parts[sidx]
del vg_parts[sidx]
# Turn indexes into options
type_to_option = {
'gbwt': '--gbwt-name',
'xg': '-x',
'gcsa': '-g',
'lcp': None,
'distance': '-d',
'minimizer': '-m',
'snarls': '--snarls'
}
for index_type, index_file in list(index_files.items()):
if type_to_option[index_type] is not None:
vg_parts += [type_to_option[index_type], os.path.basename(index_file)]
if 'gbwt' in index_files:
# We may have a GBWT recombination rate/penalty override
if gbwt_penalty is not None:
# We have a recombination penalty value to apply
if '--recombination-penalty' in vg_parts:
# Make sure to strip out the penalty if it is in args already
sidx = vg_parts.index('--recombination-penalty')
del vg_parts[sidx]
del vg_parts[sidx]
# Both map and mpmap take this option
vg_parts += ['--recombination-penalty', str(gbwt_penalty)]
if mapper == 'mpmap' and always_check_population:
# Always try to population-score even unambiguous reads
# mpmap can do this
vg_parts += ['--always-check-population']
RealtimeLogger.info(
"Running VG for {} against {}: {}".format(sample_name, graph_file,
" ".join(vg_parts)))
# Mark when we | |
<gh_stars>1-10
"""The classes used to represent various information about persons on IMDb.
This will contain classes for both information gathered from the datasets provided by IMDb
and information scraped from IMDb web pages. Class names ending with "`Scrape`" are scraped
from the web pages. Otherwise, they are gathered from the datasets.
"""
from ..utils import (
is_float,
is_int,
to_datetime
)
from functools import total_ordering
# Credit job title keys
ACTOR = 'actor'
DIRECTOR = 'director'
PRODUCER = 'producer'
WRITER = 'writer'
MUSIC = 'music'
CINEMATOGRAPHY = 'cinematography'
FILM_EDITING = 'film editing'
CASTING = 'casting'
PRODUCTION_DESIGN = 'production design'
ART_DIRECTOR = 'art director'
SET_DECORATION = 'set decoration'
COSTUME_DESIGN = 'costume design'
MAKEUP_DEPARTMENT = 'makeup department'
PRODUCTION_MANAGER = 'production manager'
ASSISTANT_DIRECTOR = 'assistant director'
ART_DEPARTMENT = 'art department'
SOUND_DEPARTMENT = 'sound department'
SPECIAL_EFFECTS = 'special effects'
VISUAL_EFFECTS = 'visual effects'
STUNTS = 'stunts'
CAMERA_AND_ELECTRICAL_DEPARTMENT = 'camera and electrical department'
ANIMATION_DEPARTMENT = 'animation department'
CASTING_DEPARTMENT = 'casting department'
COSTUME_AND_WARDROBE_DEPARTMENT = 'costume and wardrobe department'
EDITORIAL_DEPARTMENT = 'editorial department'
LOCATION_MANAGEMENT = 'location management'
MUSIC_DEPARTMENT = 'music department'
SCRIPT_DEPARTMENT = 'script department'
TRANSPORTATION_DEPARTMENT = 'transportation department'
OTHER_CREW = 'other crew'
ADDITIONAL_CREW = 'additional crew'
THANKS = 'thanks'
# Credit title mappings
_CREDIT_MAPPINGS = {
'directed by': DIRECTOR,
'produced by': PRODUCER,
'writing credits': WRITER,
'music by': MUSIC,
'cinematography by': CINEMATOGRAPHY,
'film editing by': FILM_EDITING,
'casting by': CASTING,
'production design by': PRODUCTION_DESIGN,
'art direction by': ART_DIRECTOR,
'set decoration by': SET_DECORATION,
'costume design by': COSTUME_DESIGN,
'makeup department': MAKEUP_DEPARTMENT,
'production management': PRODUCTION_MANAGER,
'second unit director or assistant director': ASSISTANT_DIRECTOR,
'art department': ART_DEPARTMENT,
'sound department': SOUND_DEPARTMENT,
'special effects by': SPECIAL_EFFECTS,
'visual effects by': VISUAL_EFFECTS,
'stunts': STUNTS,
'camera and electrical department': CAMERA_AND_ELECTRICAL_DEPARTMENT,
'animation department': ANIMATION_DEPARTMENT,
'casting department': CASTING_DEPARTMENT,
'costume and wardrobe department': COSTUME_AND_WARDROBE_DEPARTMENT,
'editorial department': EDITORIAL_DEPARTMENT,
'location management': LOCATION_MANAGEMENT,
'music department': MUSIC_DEPARTMENT,
'script and continuity department': SCRIPT_DEPARTMENT,
'transportation department': TRANSPORTATION_DEPARTMENT,
'other crew': OTHER_CREW,
'additional crew': ADDITIONAL_CREW,
'thanks': THANKS,
}
class NameBasics:
"""Class to store the row information from IMDb's "`name.basics.tsv`" dataset.
Args:
name_id (:obj:`str`): The person's ID used by IMDb prefixed with `nm`.
primary_name(:obj:`str`): The person's name.
birth_year (:obj:`int`): The person's birth year.
death_year (:obj:`int`): The person's death year, or `None` otherwise.
primary_professions (:obj:`list` of :obj:`str`): A list of all the person's primary professions.
known_for_titles (:obj:`list` of :obj:`str`): A list of title IDs for each title the person is known for.
"""
__slots__ = '_name_id', '_primary_name', '_birth_year', '_death_year', '_primary_professions', '_known_for_titles'
def __init__(self, name_id, primary_name, birth_year, death_year, primary_professions, known_for_titles):
self._name_id = name_id
self._primary_name = primary_name
self._birth_year = None
self._death_year = None
self._primary_professions = []
self._known_for_titles = []
self.birth_year = birth_year
self.death_year = death_year
self.primary_professions = primary_professions
self.known_for_titles = known_for_titles
@property
def name_id(self):
return self._name_id
@property
def primary_name(self):
return self._primary_name
@property
def birth_year(self):
return self._birth_year
@birth_year.setter
def birth_year(self, value):
if is_int(value):
self._birth_year = int(value)
@property
def death_year(self):
return self._death_year
@death_year.setter
def death_year(self, value):
if is_int(value):
self._death_year = int(value)
@property
def primary_professions(self):
return self._primary_professions
@primary_professions.setter
def primary_professions(self, value):
if value is not None:
self._primary_professions = value
@property
def known_for_titles(self):
return self._known_for_titles
@known_for_titles.setter
def known_for_titles(self, value):
if value is not None:
self._known_for_titles = value
def __str__(self):
return f'{self._primary_name} ({self._name_id}): ' + \
f'{"???" if self._birth_year is None else self._birth_year} - ' + \
f'{"" if self._death_year is None else self._death_year}'
@total_ordering
class CreditScrape:
"""Object to represent information for each person scraped from IMDb's `fullcredits` page for a title.
This information is scraped from the `fullcredits` IMDb web page, and will either represent an actor or
another crew member.
Args:
name_id (:obj:`str`): The person's ID used by IMDb prefixed with `nm`.
title_id (:obj:`str`): The title's ID used by IMDb prefixed with `tt`.
job_title (:obj:`str`): The job title the person is credited for on the title.
credit (:obj:`str`): Further credits for the person on the title.
episode_count (:obj:`int`): How many episodes the person is credited for if a
TV series, otherwise `None`.
episode_year_start (:obj:`int`): The year the person began being credited in the
title if the title is a TV series, otherwise `None`.
episode_year_end (:obj:`int`): The year the person stopped being credited in the
title if the title is a TV series, otherwise `None`.
"""
__slots__ = '_name_id', '_title_id', '_job_title', '_credit', '_episode_count', '_episode_year_start', '_episode_year_end'
def __init__(self, name_id, title_id, job_title, credit, episode_count, episode_year_start, episode_year_end):
self._name_id = name_id
self._title_id = title_id
self._job_title = job_title
self._credit = credit
self._episode_count = None
self._episode_year_start = None
self._episode_year_end = None
self.episode_count = episode_count
self.episode_year_start = episode_year_start
self.episode_year_end = episode_year_end
@property
def name_id(self):
return self._name_id
@property
def title_id(self):
return self._title_id
@property
def job_title(self):
return self._job_title
@property
def credit(self):
return self._credit
@property
def episode_count(self):
return self._episode_count
@episode_count.setter
def episode_count(self, value):
if is_int(value):
self._episode_count = int(value)
@property
def episode_year_start(self):
return self._episode_year_start
@episode_year_start.setter
def episode_year_start(self, value):
if is_int(value):
self._episode_year_start = int(value)
@property
def episode_year_end(self):
return self._episode_year_end
@episode_year_end.setter
def episode_year_end(self, value):
if is_int(value):
self._episode_year_end = int(value)
def __eq__(self, other):
return (self.name_id, self.title_id, self.job_title, self.credit, self.episode_count, self.episode_year_start, self.episode_year_end) == \
(other.name_id, other.title_id, other.job_title, other.credit, other.episode_count, other.episode_year_start, other.episode_year_end)
def __lt__(self, other):
return (self.name_id, self.title_id, self.job_title, self.credit, self.episode_count, self.episode_year_start, self.episode_year_end) < \
(other.name_id, other.title_id, other.job_title, other.credit, other.episode_count, other.episode_year_start, other.episode_year_end)
def __str__(self):
return f'{self.name_id}: {self.job_title} in {self.title_id} as {self.credit}' + \
f'{f" in {self.episode_count} episodes" if self.episode_count is not None else ""}'
def __repr__(self):
return self.__str__()
class NameScrape:
"""Specific information on a person scraped from IMDb.
This information is taken from IMDb's `bio` web page on a person to find detailed information.
Args:
name_id (:obj:`str`): The person's ID used by IMDb prefixed with `nm`.
display_name (:obj:`str`): The name IMDb lists the person having currently. Usually
how they are well known or credited.
known_for_titles (:obj:`list` of :obj:`str`): A list of title IDs for each title the person is known for.
birth_name (:obj:`str`): The name IMDb lists the person born as.
birth_date (:obj:`datetime`): The date the person was born.
birth_city (:obj:`str`): The city the person was born in.
death_date (:obj:`datetime`): The date the person died, or `None` otherwise.
death_city (:obj:`str`): The city the person died in, or `None` otherwise.
death_cause (:obj:`str`): The person's cause of death, or `None` otherwise.
nicknames (:obj:`list` of :obj:`str`): All of the person's nicknames.
height (:obj:`float`): How tall the person is in meters.
"""
__slots__ = '_name_id', '_display_name', '_known_for_titles', '_birth_name', '_birth_date', '_birth_city', \
'_death_date', '_death_city', '_death_cause', '_nicknames', '_height'
def __init__(self, name_id, display_name, known_for_titles, birth_name, birth_date, birth_city,
death_date, death_city, death_cause, nicknames, height):
self._name_id = name_id
self._display_name = display_name
self._known_for_titles = known_for_titles
self._birth_name = birth_name
self._birth_date = None
self._birth_city = birth_city
self._death_date = None
self._death_city = death_city
self._death_cause = death_cause
self._nicknames = nicknames
self._height = None
self.birth_date = birth_date
self.death_date = death_date
self.height = height
@property
def name_id(self):
return self._name_id
@property
def display_name(self):
return self._display_name
@property
def known_for_titles(self):
return self._known_for_titles
@property
def birth_name(self):
return self._birth_name
@property
def birth_date(self):
return self._birth_date
@birth_date.setter
def birth_date(self, value):
self._birth_date = to_datetime(value)
@property
def birth_city(self):
return self._birth_city
@property
def death_date(self):
return self._death_date
@death_date.setter
def death_date(self, value):
self._death_date = to_datetime(value)
@property
def death_city(self):
return self._death_city
@property
def death_cause(self):
return self._death_cause
@property
def nicknames(self):
return self._nicknames
@property
def height(self):
return self._height
@height.setter
def height(self, value):
if is_float(value):
self._height = float(value)
def __str__(self):
return f'{self.display_name} [{self.name_id}] ({self.birth_date} - ' + \
f'{self.death_date if self.death_date is not None else ""}): {self.height}m'
class NameCreditScrape:
"""Stores credit information from a person's `full filmography` on IMDb.
This information is taken from IMDb's `full filmography` section of a person's
personal web page.
Args:
name_id (:obj:`str`): The person's ID used by IMDb prefixed with `nm`.
title_id (:obj:`str`): The titles's ID used by IMDb prefixed with `tt`.
category (:obj:`str`): The category this credit is listed under in the filmography section.
start_year (:obj:`int`): The year the title released, or the starting year they were credited
for on a TV series.
end_year (:obj:`int`): The year the person stopped being credited on a TV series, or `None` otherwise.
role (:obj:`str`): A string of the role the person is credited for the title, such as character.
title_notes (:obj:`list` of :obj:`str`): A list of further notes for a person's credit on a title.
"""
__slots__ = '_name_id', '_title_id', '_category', '_start_year', '_end_year', '_role', '_title_notes'
def __init__(self, name_id, title_id, category, start_year, end_year, role, title_notes):
self._name_id = name_id
self._title_id = title_id
self._category = category
self._start_year = None
self._end_year = None
self._role = role
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.