input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
import copy,subprocess,os,tempfile,re
import uuid,time,glob,lixtools
import numpy as np
from io import StringIO
import pylab as plt
from dask.distributed import as_completed
def run(cmd, path="", ignoreErrors=True, returnError=False, debug=False):
""" cmd should be a list, e.g. ["ls", "-lh"]
path is for the cmd, not the same as cwd
"""
cmd[0] = path+cmd[0]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if debug:
print(out.decode(), err.decode())
if len(err)>0 and not ignoreErrors:
print(err.decode())
raise Exception(err.decode())
if returnError:
return out.decode(),err.decode()
else:
return out.decode()
def extract_vals(txt, dtype=float, strip=None, debug=False):
if strip is not None:
txt = txt.replace(strip, " ")
sl = txt.split(" ")
ret = []
for ss in sl:
try:
val = dtype(ss)
except:
pass
else:
ret.append(val)
return ret
def atsas_create_temp_file(fn, d1s, skip=0, q_cutoff=0.6):
idx = (d1s.qgrid<=q_cutoff)
np.savetxt(fn, np.vstack([d1s.qgrid[idx][skip:], d1s.data[idx][skip:], d1s.err[idx][skip:]]).T)
def atsas_autorg(fn, debug=False, path=""):
ret = run(["autorg", fn], path).split('\n')
#rg,drg = extract_vals(ret[0], "+/-", debug=debug)
#i0,di0 = extract_vals(ret[1], "+/-", debug=debug)
#n1,n2 = extract_vals(ret[2], " to ", debug=debug, dtype=int)
#qual = extract_vals(ret[3], "%", debug=debug)
try:
rg,drg = extract_vals(ret[0])
i0,di0 = extract_vals(ret[1])
n1,n2 = extract_vals(ret[2], dtype=int)
qual = extract_vals(ret[3], strip="%")[0]
except:
print("Unable to run autorg ...")
rg,drg = 0,0
i0,di0 = 0,0
n1,n2 = 0,-1
qual = 0
return {"Rg": rg, "Rg err": drg,
"I0": i0, "I0 err": di0,
"fit range": [n1,n2],
"quality": qual}
def atsas_datgnom(fn, rg, first, last=None, fn_out=None, path=""):
"""
"""
if fn_out is None:
fn_out = fn.split('.')[0]+'.out'
options = ["-r", str(rg), "-o", fn_out, "--first", str(first)]
if last is not None:
options += ["--last", str(last)]
# datgnom vs datgnom4, slightly different input parameters
ret = run(["datgnom", *options, fn], path).split("\n")
try:
if len(ret)>1:
# example stdout results:
# dmax: 50.170000000000002 Total: 0.90463013315175844
# Guinier: 15.332727107179052 Gnom: 15.332431498444064
dmax,qual = extract_vals(ret[0])
rgg,rgp = extract_vals(ret[1])
else:
# newer version of datgnom no longer reports Dmax/Rg on stdout,
# the .out file format is also different
# the Rg/Dmax values used to be located at the end of the file:
# Total estimate : 0.944 which is AN EXCELLENT solution
# Reciprocal space: Rg = 14.75 , I(0) = 0.5740E+01
# Real space: Rg = 14.74 +- 0.092 I(0) = 0.5740E+01 +- 0.2274E-01
# in the more recent files, this info in embedded in the header
# Total Estimate: 0.9546 (a EXCELLENT solution)
# Reciprocal space Rg: 0.1471E+02
# Reciprocal space I(0): 0.5733E+01
# Real space range: 0.0000 to 45.9000
# Real space Rg: 0.1470E+02 +- 0.1148E+00
# Real space I(0): 0.5733E+01 +- 0.2619E-01
qual = extract_vals(run(["grep", "Total Estimate", fn_out], path))[0]
dmax = extract_vals(run(["grep", "Real space range", fn_out], path))[1]
rgg = extract_vals(run(["grep", "Reciprocal space Rg", fn_out], path))[0]
rgp = extract_vals(run(["grep", "Real space Rg", fn_out], path))[0]
except:
qual = 0
dmax = 100
rgg = 0
rgp = 0
return {"Dmax": dmax, "quality": qual,
"Rg (q)": rgg, "Rg (r)": rgp}
def read_arr_from_strings(lines, cols=[0,1,2]):
""" assuming that any none numerical values will be ignored
data are in multiple columns
some columns may be missing values at the top
for P(r), cols=[0,1,2]
for I_fit(q), cols=[0,-1]
"""
ret = []
for buf in lines:
if len(buf)<len(cols): # empty line
continue
tb = np.genfromtxt(StringIO(buf))
if np.isnan(tb).any(): # mixed text and numbersS J EXP ERROR J REG I REG
continue
ret.append([tb[i] for i in cols])
return np.asarray(ret).T
def read_gnom_out_file(fn, plot_pr=False, ax=None):
ff = open(fn, "r")
tt = ff.read()
ff.close()
hdr,t1 = tt.split("#### Experimental Data and Fit ####")
#hdr,t1 = tt.split("S J EXP ERROR J REG I REG")
iq, pr = t1.split("#### Real Space Data ####")
#iq, pr = t1.split("Distance distribution function of particle")
dq, di = read_arr_from_strings(iq.rstrip().split("\n"), cols=[0,-1])
dr, dpr, dpre = read_arr_from_strings(pr.rstrip().split("\n"), cols=[0,1,2])
if plot_pr:
if ax is None:
plt.figure()
ax = plt.gca()
ax.errorbar(dr, dpr, dpre)
return hdr.rstrip(),dq,di,dr,dpr,dpre
# ALMERGE Automatically merges data collected from two different concentrations or
# extrapolates it to infinite dilution assuming moderate particle interactions.
#
""" version-dependent output
$ datporod --version
datporod, ATSAS 2.8.5 (r11116)
Copyright (c) ATSAS Team, EMBL, Hamburg Outstation 2009-2018
$ datporod --help
Usage: datporod [OPTIONS] [FILE(S)]
Estimation of Porod volume. Output values are Rg, I0, estimated Volume and file name.
Known Arguments:
FILE Data file
Known Options:
--i0=<VALUE> Forward scattering intensity
--rg=<VALUE> Experimental Radius of Gyration
--first=<N> index of the first point to be used (default: 1)
--last=<N> index of the last point to be used (default: s*Rg ~ 7)
-h, --help Print usage information and exit
-v, --version Print version information and exit
Mandatory arguments to long options are mandatory for short options too.
$ datporod --version
datporod, ATSAS 3.0.1 (r12314)
Copyright (c) ATSAS Team, EMBL, Hamburg Outstation 2009-2020
$ datporod --help
Usage: datporod [OPTIONS] [FILE(S)]
Molecular weight from Porod Volume
Output: smax (A^-1), Volume (A^3), file name
Known Arguments:
FILE Data file
Known Options:
--i0=<VALUE> Forward scattering intensity
--rg=<VALUE> Experimental Radius of Gyration
--first=<N> index of the first point to be used (default: 1)
--last=<N> index of the last point to be used (default: s*Rg ~ 7)
-h, --help Print usage information and exit
-v, --version Print version information and exit
Mandatory arguments to long options are mandatory for short options too.
datmow --version
datmow, ATSAS 2.8.5 (r11116)
Copyright (c) ATSAS Team, EMBL, Hamburg Outstation 2014-2018
$ datmow --help
Usage: datmow [OPTIONS] [FILE(S)]
Output: Q', V' (apparent Volume), V (Volume, A^3), MW (Da), file name
Known Arguments:
FILE Data file
Known Options:
--i0=<VALUE> Forward scattering intensity
--rg=<VALUE> Experimental Radius of Gyration
--rho=<VALUE> Average protein density (default: 1.37 g/cm^-3)
-a, --offset=<VALUE> Offset coefficient (default: look up)
-b, --scaling=<VALUE> Scaling coefficient (default: look up)
-h, --help Print usage information and exit
-v, --version Print version information and exit
Mandatory arguments to long options are mandatory for short options too.
$ datmow --version
datmow, ATSAS 3.0.1 (r12314)
Copyright (c) ATSAS Team, EMBL, Hamburg Outstation 2014-2020
$ datmow --help
Usage: datmow [OPTIONS] [FILE(S)]
Molecular weight from Apparent Volume (Fischer et al., 2010).
Output: smax (A^-1), Q', V' (apparent volume), V (Volume, A^3), MW (Da), file name
Known Arguments:
FILE Data file
Known Options:
--i0=<VALUE> Forward scattering intensity
--rg=<VALUE> Experimental Radius of Gyration
--first=<N> index of the first point to be used (default: 1)
-h, --help Print usage information and exit
-v, --version Print version information and exit
Mandatory arguments to long options are mandatory for short options too.
"""
def atsas_dat_tools(fn_out, path=""):
# datporod: the used Rg, I0, the computed volume estimate and the input file name
#
# datvc: the first three numbers are the integrated intensities up to 0.2, 0.25 and 0.3, respectively.
# the second three numbers the corresponding MW estimates
#
# datmow: Output: Q', V' (apparent Volume), V (Volume, A^3), MW (Da), file name
ret = run(["datporod", fn_out], path).split('\n')
try:
Vv = extract_vals(ret[0])[-1]
r_porod = {"vol": Vv}
except:
r_porod = {"vol": np.nan}
print("Unable to get output from datporod ...")
#ret = run(f"datvc {fn_out}").split('\n')
#try:
# ii1,ii2,ii3,mw1,mw2,mw3 = extract_vals(ret[0])
# r_vc = {"MW": [mw1, mw2, mw3]}
#except:
# print("Unable to get output from datvc ...")
ret = run(["datmow", fn_out], path).split('\n')
try:
Qp,Vp,Vv,mw = extract_vals(ret[0])[-4:]
r_mow = {"Q": Qp, "app vol": Vp, "vol": Vv, "MW": mw}
except:
r_mow = {"Q": np.nan, "app vol": np.nan, "vol": np.nan, "MW": np.nan}
print("Unable to get output from datmow ...")
return {"datporod": r_porod,
#"datvc": r_vc, # this won't work if q_max is below 0.3
"datmow": r_mow}
def gen_atsas_report(d1s, ax=None, fig=None, sn=None, skip=0, q_cutoff=0.6,
plot_full_q_range=False, print_results=True, path=""):
if not os.path.isdir("processed"):
os.mkdir("processed")
if ax is None:
ax = []
if fig is None:
fig = plt.figure(figsize=(9,3))
# rect = l, b, w, h
ax.append(fig.add_axes([0.09, 0.25, 0.25, 0.6]))
ax.append(fig.add_axes([0.41, 0.25, 0.25, 0.6]))
ax.append(fig.add_axes([0.73, 0.25, 0.25, 0.6]))
ax.append(ax[0].twiny())
else:
#ax[0].figure.cla()
for a in ax:
a.clear()
if sn is None:
tfn = "processed/t.dat"
tfn_out = "processed/t.out"
else:
tfn = "processed/t.dat"
tfn_out = f"processed/{sn}.out"
sk0 = skip
qc0 = q_cutoff
if skip<0:
sk0 = 0
if q_cutoff<0:
qc0 = 0.3
atsas_create_temp_file(tfn, d1s, skip=sk0, q_cutoff=qc0)
re_autorg = atsas_autorg(tfn, path=path)
if re_autorg["Rg"]==0: # autorg not successful,py4xs.slnxs might work
re_autorg["I0"],re_autorg["Rg"],re_autorg["fit range"] = d1s.plot_Guinier(no_plot=True)
if skip<0:
sk0 = re_autorg["fit range"][0]
if q_cutoff<0 and re_autorg["Rg"]>0:
qc0 = 15./re_autorg["Rg"]
re_gnom = atsas_datgnom(tfn, re_autorg["Rg"], first=sk0+1,
last=len(d1s.qgrid[d1s.qgrid<=qc0]), | |
<filename>plasmapy/utils/decorators/checks.py
"""
Decorator for checking input/output arguments of functions.
"""
__all__ = [
"check_values",
"check_units",
"check_relativistic",
"CheckBase",
"CheckUnits",
"CheckValues",
]
import collections
import functools
import inspect
import numpy as np
import warnings
from astropy import units as u
from astropy.constants import c
from functools import reduce
from operator import add
from typing import Any, Dict, List, Tuple, Union
from plasmapy.utils.decorators.helpers import preserve_signature
from plasmapy.utils.exceptions import (
PlasmaPyWarning,
RelativityError,
RelativityWarning,
)
try:
from astropy.units.equivalencies import Equivalency
except ImportError:
# TODO: remove once we have dependency Astropy >= 3.2.1
# astropy defined the Equivalency class in v3.2.1
class Equivalency:
pass
class CheckBase:
"""
Base class for 'Check' decorator classes.
Parameters
----------
checks_on_return
specified checks on the return of the wrapped function
**checks
specified checks on the input arguments of the wrapped function
"""
def __init__(self, checks_on_return=None, **checks):
self._checks = checks
if checks_on_return is not None:
self._checks["checks_on_return"] = checks_on_return
@property
def checks(self):
"""
Requested checks on the decorated function's input arguments
and/or return.
"""
return self._checks
class CheckValues(CheckBase):
"""
A decorator class to 'check' -- limit/control -- the values of input and return
arguments to a function or method.
Parameters
----------
checks_on_return: Dict[str, bool]
Specifications for value checks on the return of the function being wrapped.
(see `check values`_ for valid specifications)
**checks: Dict[str, Dict[str, bool]]
Specifications for value checks on the input arguments of the function
being wrapped. Each keyword argument in `checks` is the name of a function
argument to be checked and the keyword value contains the value check
specifications.
.. _`check values`:
The value check specifications are defined within a dictionary containing
the keys defined below. If the dictionary is empty or omitting keys,
then the default value will be assumed for the missing keys.
================ ======= ================================================
Key Type Description
================ ======= ================================================
can_be_negative `bool` [DEFAULT `True`] values can be negative
can_be_complex `bool` [DEFAULT `False`] values can be complex numbers
can_be_inf `bool` [DEFAULT `True`] values can be :data:`~numpy.inf`
can_be_nan `bool` [DEFAULT `True`] values can be :data:`~numpy.nan`
none_shall_pass `bool` [DEFAULT `False`] values can be a python `None`
can_be_zero `bool` [DEFAULT `True`] values can be zero
================ ======= ================================================
Notes
-----
* Checking of function arguments `*args` and `**kwargs` is not supported.
Examples
--------
.. code-block:: python
from plasmapy.utils.decorators.checks import CheckValues
@CheckValues(arg1={'can_be_negative': False, 'can_be_nan': False},
arg2={'can_be_inf': False},
checks_on_return={'none_shall_pass': True)
def foo(arg1, arg2):
return None
# on a method
class Foo:
@CheckValues(arg1={'can_be_negative': False, 'can_be_nan': False},
arg2={'can_be_inf': False},
checks_on_return={'none_shall_pass': True)
def bar(self, arg1, arg2):
return None
"""
#: Default values for the possible 'check' keys.
# To add a new check to the class, the following needs to be done:
# 1. Add a key & default value to the `__check_defaults` dictionary
# 2. Add a corresponding if-statement to method `_check_value`
#
__check_defaults = {
"can_be_negative": True,
"can_be_complex": False,
"can_be_inf": True,
"can_be_nan": True,
"none_shall_pass": False,
"can_be_zero": True,
}
def __init__(
self, checks_on_return: Dict[str, bool] = None, **checks: Dict[str, bool]
):
super().__init__(checks_on_return=checks_on_return, **checks)
def __call__(self, f):
"""
Decorate a function.
Parameters
----------
f
Function to be wrapped
Returns
-------
function
wrapped function of `f`
"""
self.f = f
wrapped_sign = inspect.signature(f)
@preserve_signature
@functools.wraps(f)
def wrapper(*args, **kwargs):
# map args and kwargs to function parameters
bound_args = wrapped_sign.bind(*args, **kwargs)
bound_args.apply_defaults()
# get checks
checks = self._get_value_checks(bound_args)
# check input arguments
for arg_name in checks:
# skip check of output/return
if arg_name == "checks_on_return":
continue
# check argument
self._check_value(
bound_args.arguments[arg_name], arg_name, checks[arg_name]
)
# call function
_return = f(**bound_args.arguments)
# check function return
if "checks_on_return" in checks:
self._check_value(
_return, "checks_on_return", checks["checks_on_return"]
)
return _return
return wrapper
def _get_value_checks(
self, bound_args: inspect.BoundArguments
) -> Dict[str, Dict[str, bool]]:
"""
Review :attr:`checks` and function bound arguments to build a complete 'checks'
dictionary. If a check key is omitted from the argument checks, then a default
value is assumed (see `check values`_).
Parameters
----------
bound_args: :class:`inspect.BoundArguments`
arguments passed into the function being wrapped
.. code-block:: python
bound_args = inspect.signature(f).bind(*args, **kwargs)
Returns
-------
Dict[str, Dict[str, bool]]
A complete 'checks' dictionary for checking function input arguments
and return.
"""
# initialize validation dictionary
out_checks = {}
# Iterate through function bound arguments + return and build `out_checks:
#
# artificially add "return" to parameters
things_to_check = bound_args.signature.parameters.copy()
things_to_check["checks_on_return"] = inspect.Parameter(
"checks_on_return",
inspect.Parameter.POSITIONAL_ONLY,
annotation=bound_args.signature.return_annotation,
)
for param in things_to_check.values():
# variable arguments are NOT checked
# e.g. in foo(x, y, *args, d=None, **kwargs) variable arguments
# *args and **kwargs will NOT be checked
#
if param.kind in (
inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL,
):
continue
# grab the checks dictionary for the desired parameter
try:
param_in_checks = self.checks[param.name]
except KeyError:
# checks for parameter not specified
continue
# build `out_checks`
# read checks and/or apply defaults values
out_checks[param.name] = {}
for v_name, v_default in self.__check_defaults.items():
try:
out_checks[param.name][v_name] = param_in_checks.get(
v_name, v_default
)
except AttributeError:
# for the case that checks are defined for an argument,
# but is NOT a dictionary
# (e.g. CheckValues(x=u.cm) ... this scenario could happen
# during subclassing)
out_checks[param.name][v_name] = v_default
# Does `self.checks` indicate arguments not used by f?
if missing_params := list(set(self.checks) - set(out_checks)):
params_str = ", ".join(missing_params)
warnings.warn(
PlasmaPyWarning(
f"Expected to value check parameters {params_str} but they "
f"are missing from the call to {self.f.__name__}"
)
)
return out_checks
def _check_value(self, arg, arg_name: str, arg_checks: Dict[str, bool]):
"""
Perform checks `arg_checks` on function argument `arg`.
Parameters
----------
arg
The argument to be checked
arg_name: str
The name of the argument to be checked
arg_checks: Dict[str, bool]
The requested checks for the argument
Raises
------
ValueError
raised if a check fails
"""
if arg_name == "checks_on_return":
valueerror_msg = "The return value "
else:
valueerror_msg = f"The argument '{arg_name}' "
valueerror_msg += f"to function {self.f.__name__}() can not contain"
# check values
# * 'none_shall_pass' always needs to be checked first
ckeys = list(self.__check_defaults.keys())
ckeys.remove("none_shall_pass")
ckeys = ("none_shall_pass",) + tuple(ckeys)
for ckey in ckeys:
if ckey == "can_be_complex":
if not arg_checks[ckey] and np.any(np.iscomplexobj(arg)):
raise ValueError(f"{valueerror_msg} complex numbers.")
elif ckey == "can_be_inf":
if not arg_checks[ckey] and np.any(np.isinf(arg)):
raise ValueError(f"{valueerror_msg} infs.")
elif ckey == "can_be_nan":
if not arg_checks["can_be_nan"] and np.any(np.isnan(arg)):
raise ValueError(f"{valueerror_msg} NaNs.")
elif ckey == "can_be_negative":
if not arg_checks[ckey] and np.any(arg < 0):
raise ValueError(f"{valueerror_msg} negative numbers.")
elif ckey == "can_be_zero":
if not arg_checks[ckey] and np.any(arg == 0):
raise ValueError(f"{valueerror_msg} zeros.")
elif ckey == "none_shall_pass":
if arg is None and arg_checks[ckey]:
break
elif arg is None:
raise ValueError(f"{valueerror_msg} Nones.")
class CheckUnits(CheckBase):
"""
A decorator class to 'check' -- limit/control -- the units of input and return
arguments to a function or method.
Parameters
----------
checks_on_return: list of astropy :mod:`~astropy.units` or dict of unit specifications
Specifications for unit checks on the return of the function being wrapped.
(see `check units`_ for valid specifications)
**checks: list of astropy :mod:`~astropy.units` or dict of unit specifications
Specifications for unit checks on the input arguments of the function
being wrapped. Each keyword argument in `checks` is the name of a function
argument to be checked and the keyword value contains the unit check
specifications.
.. _`check units`:
Unit checks can be defined by passing one of the astropy
:mod:`~astropy.units`, a list of astropy units, or a dictionary containing
the keys defined below. Units can also be defined with function
annotations, but must be consistent with decorator `**checks` arguments if
used concurrently. If a key is omitted, then the default value will be assumed.
====================== ======= ================================================
Key Type Description
====================== ======= ================================================
units list of desired astropy :mod:`~astropy.units`
equivalencies | [DEFAULT `None`] A list of equivalent pairs to
try if
| the units are not directly convertible.
| (see :mod:`~astropy.units.equivalencies`,
and/or `astropy equivalencies`_)
pass_equivalent_units `bool` | [DEFAULT `False`] allow equivalent units
| to pass
====================== ======= ================================================
Notes
-----
* Checking of function arguments `*args` and `**kwargs` is not supported.
* Decorator does NOT perform any unit conversions.
* If it is desired that `None` values do not raise errors or warnings, then
include `None` in the list of units or as a default value for the function
argument.
* | |
== "android.intent.action.MAIN":
x.add( item.getAttributeNS(NS_ANDROID_URI, "name" ) )
for sitem in item.getElementsByTagName( "category" ):
val = sitem.getAttributeNS(NS_ANDROID_URI, "name" )
if val == "android.intent.category.LAUNCHER":
y.add( item.getAttributeNS(NS_ANDROID_URI, "name" ) )
z = x.intersection(y)
if len(z) > 0:
return self.format_value(z.pop())
return None
def get_activities(self):
"""
Return the android:name attribute of all activities
:rtype: a list of string
"""
return self.get_elements("activity", "name")
def get_services(self):
"""
Return the android:name attribute of all services
:rtype: a list of string
"""
return self.get_elements("service", "name")
def get_receivers(self):
"""
Return the android:name attribute of all receivers
:rtype: a list of string
"""
return self.get_elements("receiver", "name")
def get_providers(self):
"""
Return the android:name attribute of all providers
:rtype: a list of string
"""
return self.get_elements("provider", "name")
def get_intent_filters(self, category, name):
d = {}
d["action"] = []
d["category"] = []
for i in self.xml:
for item in self.xml[i].getElementsByTagName(category):
if self.format_value(item.getAttributeNS(NS_ANDROID_URI, "name")) == name:
for sitem in item.getElementsByTagName("intent-filter"):
for ssitem in sitem.getElementsByTagName("action"):
if ssitem.getAttributeNS(NS_ANDROID_URI, "name") not in d["action"]:
d["action"].append(ssitem.getAttributeNS(NS_ANDROID_URI, "name"))
for ssitem in sitem.getElementsByTagName("category"):
if ssitem.getAttributeNS(NS_ANDROID_URI, "name") not in d["category"]:
d["category"].append(ssitem.getAttributeNS(NS_ANDROID_URI, "name"))
if not d["action"]:
del d["action"]
if not d["category"]:
del d["category"]
return d
def get_permissions(self):
"""
Return permissions
:rtype: list of string
"""
return self.permissions
def get_details_permissions(self):
"""
Return permissions with details
:rtype: list of string
"""
l = {}
for i in self.permissions:
perm = i
pos = i.rfind(".")
if pos != -1:
perm = i[pos+1:]
try:
l[ i ] = DVM_PERMISSIONS["MANIFEST_PERMISSION"][ perm ]
except KeyError:
l[ i ] = [ "normal", "Unknown permission from android reference", "Unknown permission from android reference" ]
return l
def get_requested_permissions(self):
"""
Returns all requested permissions.
:rtype: list of strings
"""
return self.permissions
def get_requested_aosp_permissions(self):
'''
Returns requested permissions declared within AOSP project.
:rtype: list of strings
'''
aosp_permissions = []
all_permissions = self.get_requested_permissions()
for perm in all_permissions:
if perm in self.permission_module["AOSP_PERMISSIONS"].keys():
aosp_permissions.append(perm)
return aosp_permissions
def get_requested_aosp_permissions_details(self):
"""
Returns requested aosp permissions with details.
:rtype: dictionary
"""
l = {}
for i in self.permissions:
try:
l[i] = self.permission_module["AOSP_PERMISSIONS"][i]
except KeyError:
continue #if we have not found permission do nothing
return l
def get_requested_third_party_permissions(self):
'''
Returns list of requested permissions not declared within AOSP project.
:rtype: list of strings
'''
third_party_permissions = []
all_permissions = self.get_requested_permissions()
for perm in all_permissions:
if perm not in self.permission_module["AOSP_PERMISSIONS"].keys():
third_party_permissions.append(perm)
return third_party_permissions
def get_declared_permissions(self):
'''
Returns list of the declared permissions.
:rtype: list of strings
'''
return self.declared_permissions.keys()
def get_declared_permissions_details(self):
'''
Returns declared permissions with the details.
:rtype: dict
'''
return self.declared_permissions
def get_max_sdk_version(self):
"""
Return the android:maxSdkVersion attribute
:rtype: string
"""
return self.get_element("uses-sdk", "maxSdkVersion")
def get_min_sdk_version(self):
"""
Return the android:minSdkVersion attribute
:rtype: string
"""
return self.get_element("uses-sdk", "minSdkVersion")
def get_target_sdk_version(self):
"""
Return the android:targetSdkVersion attribute
:rtype: string
"""
return self.get_element( "uses-sdk", "targetSdkVersion" )
def get_libraries(self):
"""
Return the android:name attributes for libraries
:rtype: list
"""
return self.get_elements( "uses-library", "name" )
def get_certificate(self, filename):
"""
Return a certificate object by giving the name in the apk file
"""
import chilkat
cert = chilkat.CkCert()
f = self.get_file(filename)
data = chilkat.CkByteData()
data.append2(f, len(f))
success = cert.LoadFromBinary(data)
return success, cert
def new_zip(self, filename, deleted_files=None, new_files={}):
"""
Create a new zip file
:param filename: the output filename of the zip
:param deleted_files: a regex pattern to remove specific file
:param new_files: a dictionnary of new files
:type filename: string
:type deleted_files: None or a string
:type new_files: a dictionnary (key:filename, value:content of the file)
"""
if self.zipmodule == 2:
from androguard.patch import zipfile
zout = zipfile.ZipFile(filename, 'w')
else:
import zipfile
zout = zipfile.ZipFile(filename, 'w')
for item in self.zip.infolist():
if deleted_files != None:
if re.match(deleted_files, item.filename) == None:
if item.filename in new_files:
zout.writestr(item, new_files[item.filename])
else:
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
zout.close()
def get_android_manifest_axml(self):
"""
Return the :class:`AXMLPrinter` object which corresponds to the AndroidManifest.xml file
:rtype: :class:`AXMLPrinter`
"""
try:
return self.axml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_manifest_xml(self):
"""
Return the xml object which corresponds to the AndroidManifest.xml file
:rtype: object
"""
try:
return self.xml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_resources(self):
"""
Return the :class:`ARSCParser` object which corresponds to the resources.arsc file
:rtype: :class:`ARSCParser`
"""
try:
return self.arsc["resources.arsc"]
except KeyError:
try:
self.arsc["resources.arsc"] = ARSCParser(self.zip.read("resources.arsc"))
return self.arsc["resources.arsc"]
except KeyError:
return None
def get_signature_name(self):
signature_expr = re.compile("^(META-INF/)(.*)(\.RSA|\.DSA)$")
for i in self.get_files():
if signature_expr.search(i):
return i
return None
def get_signature(self):
signature_expr = re.compile("^(META-INF/)(.*)(\.RSA|\.DSA)$")
for i in self.get_files():
if signature_expr.search(i):
return self.get_file(i)
return None
def show(self):
self.get_files_types()
print ("FILES: ")
for i in self.get_files():
try:
print ("\t", i, self.files[i], "%x" % self.files_crc32[i])
except KeyError:
print ("\t", i, "%x" % self.files_crc32[i])
print ("DECLARED PERMISSIONS:")
declared_permissions = self.get_declared_permissions()
for i in declared_permissions:
print ("\t", i)
print ("REQUESTED PERMISSIONS:")
requested_permissions = self.get_requested_permissions()
for i in requested_permissions:
print ("\t", i)
print ("MAIN ACTIVITY: ", self.get_main_activity())
print ("ACTIVITIES: ")
activities = self.get_activities()
for i in activities:
filters = self.get_intent_filters("activity", i)
print ("\t", i, filters or "")
print ("SERVICES: ")
services = self.get_services()
for i in services:
filters = self.get_intent_filters("service", i)
print ("\t", i, filters or "")
print ("RECEIVERS: ")
receivers = self.get_receivers()
for i in receivers:
filters = self.get_intent_filters("receiver", i)
print ("\t", i, filters or "")
print ("PROVIDERS: ", self.get_providers())
def show_Certificate(cert):
print ("Issuer: C=%s, CN=%s, DN=%s, E=%s, L=%s, O=%s, OU=%s, S=%s" % (cert.issuerC(), cert.issuerCN(), cert.issuerDN(), cert.issuerE(), cert.issuerL(), cert.issuerO(), cert.issuerOU(), cert.issuerS()))
print ("Subject: C=%s, CN=%s, DN=%s, E=%s, L=%s, O=%s, OU=%s, S=%s" % (cert.subjectC(), cert.subjectCN(), cert.subjectDN(), cert.subjectE(), cert.subjectL(), cert.subjectO(), cert.subjectOU(), cert.subjectS()))
######################################################## AXML FORMAT ########################################################
# Translated from http://code.google.com/p/android4me/source/browse/src/android/content/res/AXmlResourceParser.java
UTF8_FLAG = 0x00000100
CHUNK_STRINGPOOL_TYPE = 0x001C0001
CHUNK_NULL_TYPE = 0x00000000
class StringBlock(object):
def __init__(self, buff):
self.start = buff.get_idx()
self._cache = {}
self.header_size, self.header = self.skipNullPadding(buff)
self.chunkSize = unpack('<i', buff.read(4))[0]
self.stringCount = unpack('<i', buff.read(4))[0]
self.styleOffsetCount = unpack('<i', buff.read(4))[0]
self.flags = unpack('<i', buff.read(4))[0]
self.m_isUTF8 = ((self.flags & UTF8_FLAG) != 0)
self.stringsOffset = unpack('<i', buff.read(4))[0]
self.stylesOffset = unpack('<i', buff.read(4))[0]
self.m_stringOffsets = []
self.m_styleOffsets = []
self.m_strings = []
self.m_styles = []
for i in range(0, self.stringCount):
self.m_stringOffsets.append(unpack('<i', buff.read(4))[0])
for i in range(0, self.styleOffsetCount):
self.m_styleOffsets.append(unpack('<i', buff.read(4))[0])
size = self.chunkSize - self.stringsOffset
if self.stylesOffset != 0:
size = self.stylesOffset - self.stringsOffset
# FIXME
if (size % 4) != 0:
androconf.warning("ooo")
for i in range(0, size):
self.m_strings.append(unpack('=b', buff.read(1))[0])
if self.stylesOffset != 0:
size = self.chunkSize - self.stylesOffset
# FIXME
if (size % 4) != 0:
androconf.warning("ooo")
for i in range(0, size / 4):
self.m_styles.append(unpack('<i', buff.read(4))[0])
def skipNullPadding(self, buff):
def readNext(buff, first_run=True):
header = unpack('<i', buff.read(4))[0]
if header == CHUNK_NULL_TYPE and first_run:
androconf.info("Skipping null padding in StringBlock header")
header = readNext(buff, first_run=False)
elif header != CHUNK_STRINGPOOL_TYPE:
androconf.warning("Invalid StringBlock header")
return header
header = readNext(buff)
return header >> 8, header & 0xFF
def getString(self, idx):
if idx in self._cache:
return self._cache[idx]
if idx < 0 or not self.m_stringOffsets or idx >= len(self.m_stringOffsets):
return ""
offset = self.m_stringOffsets[idx]
if not self.m_isUTF8:
length = self.getShort2(self.m_strings, offset)
offset += 2
self._cache[idx] = self.decode(self.m_strings, offset, length)
else:
offset += self.getVarint(self.m_strings, offset)[1]
varint = self.getVarint(self.m_strings, offset)
offset += varint[1]
length = varint[0]
self._cache[idx] = self.decode2(self.m_strings, offset, length)
return self._cache[idx]
def getStyle(self, idx):
print (idx)
print (idx in self.m_styleOffsets, self.m_styleOffsets[idx])
print (self.m_styles[0])
def decode(self, array, offset, length):
length = length * 2
length = length + length % 2
data = ""
for i in range(0, length):
t_data = pack("=b", self.m_strings[offset + i])
data += str(t_data, errors='ignore')
if data[-2:] == "\x00\x00":
break
end_zero = data.find("\x00\x00")
if end_zero != -1:
data = data[:end_zero]
return data
def decode2(self, array, offset, length):
data = ""
for i in range(0, length):
t_data = pack("=b", self.m_strings[offset + i])
data += str(t_data, errors='ignore')
return data
def getVarint(self, array, offset):
val = array[offset]
more = (val & 0x80) != 0
val &= 0x7f
if not more:
return val, 1
return val << 8 | array[offset + 1] & 0xff, 2
def getShort(self, array, offset):
value = array[offset // 4]
if ((offset % 4) // 2) == 0:
return value & 0xFFFF
else:
return value >> 16
def getShort2(self, array, offset):
return (array[offset + 1] & 0xff) << | |
"""CardinalityMatching.py
Find maximum cardinality matchings in general undirected graphs.
<NAME>, UC Irvine, September 6, 2003.
"""
import sys
from UnionFind import UnionFind
from Util import arbitrary_item
def matching(G, initialMatching = None):
"""Find a maximum cardinality matching in a graph G.
G is represented in modified GvR form: iter(G) lists its vertices;
iter(G[v]) lists the neighbors of v; w in G[v] tests adjacency.
For maximal efficiency, G and G[v] should be dictionaries, so
that adjacency tests take constant time each.
The output is a dictionary mapping vertices to their matches;
unmatched vertices are omitted from the dictionary.
We use Edmonds' blossom-contraction algorithm, as described e.g.
in Galil's 1986 Computing Surveys paper.
"""
# Copy initial matching so we can use it nondestructively
# and augment it greedily to reduce main loop iterations
matching = greedyMatching(G,initialMatching)
def augment():
"""Search for a single augmenting path.
Returns true if the matching size was increased, false otherwise.
"""
# Data structures for augmenting path search:
#
# leader: union-find structure; the leader of a blossom is one
# of its vertices (not necessarily topmost), and leader[v] always
# points to the leader of the largest blossom containing v
#
# S: dictionary of blossoms at even levels of the structure tree.
# Dictionary keys are names of blossoms (as returned by the union-find
# data structure) and values are the structure tree parent of the blossom
# (a T-node, or the top vertex if the blossom is a root of a structure tree).
#
# T: dictionary of vertices at odd levels of the structure tree.
# Dictionary keys are the vertices; T[x] is a vertex with an unmatched
# edge to x. To find the parent in the structure tree, use leader[T[x]].
#
# unexplored: collection of unexplored vertices within blossoms of S
#
# base: if x was originally a T-vertex, but becomes part of a blossom,
# base[t] will be the pair (v,w) at the base of the blossom, where v and t
# are on the same side of the blossom and w is on the other side.
leader = UnionFind()
S = {}
T = {}
unexplored = []
base = {}
# Subroutines for augmenting path search.
# Many of these are called only from one place, but are split out
# as subroutines to improve modularization and readability.
def blossom(v,w,a):
"""Create a new blossom from edge v-w with common ancestor a."""
def findSide(v,w):
path = [leader[v]]
b = (v,w) # new base for all T nodes found on the path
while path[-1] != a:
tnode = S[path[-1]]
path.append(tnode)
base[tnode] = b
unexplored.append(tnode)
path.append(leader[T[tnode]])
return path
a = leader[a] # sanity check
path1,path2 = findSide(v,w), findSide(w,v)
leader.union(*path1)
leader.union(*path2)
S[leader[a]] = S[a] # update structure tree
topless = object() # should be unequal to any graph vertex
def alternatingPath(start, goal = topless):
"""Return sequence of vertices on alternating path from start to goal.
The goal must be a T node along the path from the start to
the root of the structure tree. If goal is omitted, we find
an alternating path to the structure tree root.
"""
path = []
while 1:
while start in T:
v, w = base[start]
vs = alternatingPath(v, start)
vs.reverse()
path += vs
start = w
path.append(start)
if start not in matching:
return path # reached top of structure tree, done!
tnode = matching[start]
path.append(tnode)
if tnode == goal:
return path # finished recursive subpath
start = T[tnode]
def alternate(v):
"""Make v unmatched by alternating the path to the root of its structure tree."""
path = alternatingPath(v)
path.reverse()
for i in range(0,len(path)-1,2):
matching[path[i]] = path[i+1]
matching[path[i+1]] = path[i]
def addMatch(v, w):
"""Here with an S-S edge vw connecting vertices in different structure trees.
Find the corresponding augmenting path and use it to augment the matching.
"""
alternate(v)
alternate(w)
matching[v] = w
matching[w] = v
def ss(v,w):
"""Handle detection of an S-S edge in augmenting path search.
Like augment(), returns true iff the matching size was increased.
"""
if leader[v] == leader[w]:
return False # self-loop within blossom, ignore
# parallel search up two branches of structure tree
# until we find a common ancestor of v and w
path1, head1 = {}, v
path2, head2 = {}, w
def step(path, head):
head = leader[head]
parent = leader[S[head]]
if parent == head:
return head # found root of structure tree
path[head] = parent
path[parent] = leader[T[parent]]
return path[parent]
while 1:
head1 = step(path1, head1)
head2 = step(path2, head2)
if head1 == head2:
blossom(v, w, head1)
return False
if leader[S[head1]] == head1 and leader[S[head2]] == head2:
addMatch(v, w)
return True
if head1 in path2:
blossom(v, w, head1)
return False
if head2 in path1:
blossom(v, w, head2)
return False
# Start of main augmenting path search code.
for v in G:
if v not in matching:
S[v] = v
unexplored.append(v)
current = 0 # index into unexplored, in FIFO order so we get short paths
while current < len(unexplored):
v = unexplored[current]
current += 1
for w in G[v]:
if leader[w] in S: # S-S edge: blossom or augmenting path
if ss(v,w):
return True
elif w not in T: # previously unexplored node, add as T-node
T[w] = v
u = matching[w]
if leader[u] not in S:
S[u] = w # and add its match as an S-node
unexplored.append(u)
return False # ran out of graph without finding an augmenting path
# augment the matching until it is maximum
while augment():
pass
return matching
def greedyMatching(G, initialMatching=None):
"""Near-linear-time greedy heuristic for creating high-cardinality matching.
If there is any vertex with one unmatched neighbor, we match it.
Otherwise, if there is a vertex with two unmatched neighbors, we contract
it away and store the contraction on a stack for later matching.
If neither of these two cases applies, we match an arbitrary edge.
"""
# Copy initial matching so we can use it nondestructively
matching = {}
if initialMatching:
for x in initialMatching:
matching[x] = initialMatching[x]
# Copy graph to new subgraph of available edges
# Representation: nested dictionary rep->rep->pair
# where the reps are representative vertices for merged clusters
# and the pair is an unmatched original pair of vertices
avail = {}
has_edge = False
for v in G:
if v not in matching:
avail[v] = {}
for w in G[v]:
if w not in matching:
avail[v][w] = (v,w)
has_edge = True
if not avail[v]:
del avail[v]
if not has_edge:
return matching
# make sets of degree one and degree two vertices
deg1 = {v for v in avail if len(avail[v]) == 1}
deg2 = {v for v in avail if len(avail[v]) == 2}
d2edges = []
def updateDegree(v):
"""Cluster degree changed, update sets."""
if v in deg1:
deg1.remove(v)
elif v in deg2:
deg2.remove(v)
if len(avail[v]) == 0:
del avail[v]
elif len(avail[v]) == 1:
deg1.add(v)
elif len(avail[v]) == 2:
deg2.add(v)
def addMatch(v,w):
"""Add edge connecting two given cluster reps, update avail."""
p,q = avail[v][w]
matching[p] = q
matching[q] = p
for x in avail[v].keys():
if x != w:
del avail[x][v]
updateDegree(x)
for x in avail[w].keys():
if x != v:
del avail[x][w]
updateDegree(x)
avail[v] = avail[w] = {}
updateDegree(v)
updateDegree(w)
def contract(v):
"""Handle degree two vertex."""
u,w = avail[v] # find reps for two neighbors
d2edges.extend([avail[v][u],avail[v][w]])
del avail[u][v]
del avail[w][v]
if len(avail[u]) > len(avail[w]):
u,w = w,u # swap to preserve near-linear time bound
for x in avail[u].keys():
del avail[x][u]
if x in avail[w]:
updateDegree(x)
elif x != w:
avail[x][w] = avail[w][x] = avail[u][x]
avail[u] = avail[v] = {}
updateDegree(u)
updateDegree(v)
updateDegree(w)
# loop adding edges or contracting deg2 clusters
while avail:
if deg1:
v = arbitrary_item(deg1)
w = arbitrary_item(avail[v])
addMatch(v,w)
elif deg2:
v = arbitrary_item(deg2)
contract(v)
else:
v = arbitrary_item(avail)
w = arbitrary_item(avail[v])
addMatch(v,w)
# at this point the edges listed in d2edges form | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from abc import ABC, abstractstaticmethod
from typing import Callable, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torchvision.ops import batched_nms
from combustion.vision import batch_box_target
from .fpn_shared_head import SharedDecoder2d
class BaseFCOSDecoder(nn.Module, ABC):
def __init__(
self,
in_channels: int,
num_classes: int,
num_regressions: int,
num_convs: int,
strides: Tuple[int, ...],
activation: nn.Module = nn.SiLU(),
reg_activation: nn.Module = nn.ReLU(),
num_groups: int = 32,
gn_epsilon: float = 1e-5,
cls_prior: float = 0.01,
):
super().__init__()
self.cls_head = SharedDecoder2d(
in_channels,
num_classes,
num_convs,
scaled=False,
strides=strides,
activation=activation,
final_activation=nn.Identity(),
num_groups=num_groups,
gn_epsilon=gn_epsilon,
)
self.reg_head = SharedDecoder2d(
in_channels,
num_regressions + 1,
num_convs,
scaled=False,
strides=strides,
activation=activation,
final_activation=nn.Identity(),
num_groups=num_groups,
gn_epsilon=gn_epsilon,
)
self.reg_activation = reg_activation
self.strides = strides
bias_value = float(torch.tensor(cls_prior).logit().item())
bias: Tensor = self.cls_head.final_conv_pw.bias # type: ignore
torch.nn.init.constant_(bias, bias_value)
def forward(self, fpn: Tuple[Tensor]) -> Tuple[List[Tensor], List[Tensor], List[Tensor]]:
cls = self.cls_head(fpn)
_ = self.reg_head(fpn)
centerness = [layer[..., 0:1, :, :] for layer in _]
reg = [self.reg_activation(layer[..., 1:, :, :] * s) for s, layer in zip(self.strides, _)]
return cls, reg, centerness
@abstractstaticmethod
def postprocess(cls: List[Tensor], reg: List[Tensor], centerness: List[Tensor]) -> Tensor: # type: ignore
raise NotImplementedError()
@staticmethod
def reduce_heatmaps(
heatmap: Tuple[Tensor, ...],
reduction: Callable[[Tensor, Tensor], Tensor] = torch.max,
mode: str = "nearest",
) -> Tensor:
r"""Helper function that reduces FCOS FPN heatmaps into a single channel heatmap
suitable for visualization.
Args:
heatmap (tuple of :class:`torch.Tensor`):
FCOS FPN heatmaps to reduce
reduction:
Function that should accept two equally sized tensors and reduce them to a
single output tensor. By default, heatmaps are reduced with :func:`torch.max`.
"""
result = heatmap[0]
C = result.shape[1]
# reduce each FPN level
for i in range(len(heatmap) - 1):
current_level = F.interpolate(heatmap[i + 1], result.shape[-2:], mode=mode)
result = reduction(current_level, result)
# reduce across channels to a 1 channel heatmap
if C != 1:
result = torch.amax(result, dim=1, keepdim=True)
return result
def _postprocess_level(
stride: int, level_cls: Tensor, level_reg: Tensor, level_centerness: Tensor, threshold: float, from_logits: bool
):
if from_logits:
level_cls = torch.sigmoid(level_cls)
level_centerness = torch.sigmoid(level_centerness)
# scale classifications based on centerness
scaled_score = (level_cls * level_centerness.expand_as(level_cls)).sqrt_()
# get indices of positions that exceed threshold
positive_locations = (level_cls >= threshold).nonzero()
# extract coordinates of positive predictions and drop scores for negative predictions
batch, class_id, y, x = positive_locations.split(1, dim=-1)
raw_score = level_cls[batch, class_id, y, x]
scaled_score = scaled_score[batch, class_id, y, x]
assert not raw_score.isnan().any()
assert not scaled_score.isnan().any()
# use stride to compute base coodinates within the original image
# use pred regression to compute l, t, r, b offset
base = positive_locations[..., -2:].roll(1, -1).float().mul_(stride).add_(stride / 2.0).repeat(1, 2)
offset = level_reg[batch, :, y, x].view_as(base)
offset[..., :2].neg_()
# compute final regressions and clamp to lie within image_size
coords = base + offset
# record the boxes and box -> batch mapping
boxes = torch.cat([coords, raw_score, scaled_score, class_id], dim=-1)
return boxes, batch
def _apply_nms(
final_boxes: Tensor, final_batch_idx: Tensor, nms_threshold: float, num_classes: int
) -> Tuple[Tensor, Tensor]:
coords = final_boxes[..., :4]
final_boxes[..., -3, None]
scaled_score = final_boxes[..., -2, None]
class_id = final_boxes[..., -1, None]
# torchvision NMS cant do batches of images, but it can separate based on class id
# create a new "class id" that distinguishes batch and class
idx = (final_batch_idx * num_classes + class_id.view_as(final_batch_idx)).view(-1).long()
keep = batched_nms(coords.float(), scaled_score.view(-1), idx, nms_threshold)
final_boxes = final_boxes[keep, :]
final_batch_idx = final_batch_idx[keep, :]
return final_boxes.contiguous(), final_batch_idx.contiguous()
def _apply_pre_nms_limit(
final_boxes: Tensor, final_batch_idx: Tensor, limit: int, batch_size: int
) -> Tuple[Tensor, Tensor]:
# restrict top k boxes prior to NMS to avoid memory explosion
pre_nms_top_k: List[Tensor] = []
pre_nms_top_k_batch: List[Tensor] = []
for i in range(batch_size):
# find indices of top k highest scaled score boxes within the batch
topk = final_boxes[(final_batch_idx == i).view(-1), -2].argsort(descending=True)[:limit]
# use indices to extract boxes from this batch and update final_batch_idx
values = final_boxes[(final_batch_idx == i).view(-1)][topk, :]
pre_nms_top_k.append(values)
values = final_batch_idx[(final_batch_idx == i).view(-1)][topk, :]
pre_nms_top_k_batch.append(values)
final_boxes = torch.cat(pre_nms_top_k)
final_batch_idx = torch.cat(pre_nms_top_k_batch)
return final_boxes, final_batch_idx
class FCOSDecoder(BaseFCOSDecoder):
r"""Decoder for Fully Convolutional One-Stage Object Detector (FCOS) as described
in PAPER. FCOS is an anchor-free object detection implementation that predicts
detection, regression, and centerness heatmaps at each FPN level. These predictions
are postprocessed to create a set of anchor boxes.
Args:
in_channels (int):
Number of input channels at each FPN level.
num_classes (int):
Number of classes to detect.
num_convs (int):
Number of convolutional repeats in each decoder head.
strides (tuple of ints, optional):
Strides at each FPN level. By default, assume each FPN level differs
in stride by a factor of 2.
activation (nn.Module):
Activation function for each intermediate repeat in the heads.
bn_momentum (float):
Momentum value for batch norm
bn_epsilon (float):
Epsilon value for batch norm
Returns:
List of classification, regression, and centerness predictions for each
FPN level.
Shape:
* ``fpn`` - :math:`(N, C, H_i, W_i)` where :math:`i` is the :math:`i`'th FPN level
* Classification - :math:`(N, O, H_i, W_i)` where :math:`O` is the number of classes
* Regression - :math:`(N, 4, H_i, W_i)`
* Centerness - :math:`(N, 1, H_i, W_i)`
"""
def __init__(
self,
in_channels: int,
num_classes: int,
num_convs: int,
strides: Tuple[int, ...],
activation: nn.Module = nn.SiLU(),
num_groups: int = 32,
gn_epsilon: float = 1e-5,
):
super().__init__(
in_channels,
num_classes,
4,
num_convs,
strides,
activation,
nn.ReLU(),
num_groups,
gn_epsilon,
)
@staticmethod
@torch.jit.script
def postprocess(
cls: List[Tensor], # type: ignore
reg: List[Tensor],
centerness: List[Tensor],
strides: List[int],
threshold: float = 0.05,
pad_value: float = -1,
from_logits: bool = False,
nms_threshold: Optional[float] = 0.5,
use_raw_score: bool = False,
max_boxes: Optional[int] = None,
pre_nms_max_boxes: Optional[int] = 1000,
) -> Tensor:
r"""Postprocesses detection, regression, and centerness predictions into a set
of anchor boxes.
Args:
cls (iterable of tensors):
Classification predictions at each FPN level
reg (iterable of tensors):
Regression predictions at each FPN level
centerness (iterable of tensors):
Centerness predictions at each FPN level
strides (tuple of ints):
Strides at each FPN level.
threshold (float):
Detection confidence threshold
from_logits (bool):
If ``True``, assume that ``cls`` and ``centerness`` are logits and not
probabilities.
nms_threshold (float, optional):
Threshold for non-maximal suppression. If ``None``, do not apply NMS.
use_raw_score (bool):
If ``True``, assign scores to boxes based on their predicted classification score.
Otherwise, scores are assigned based on classification and centerness scores.
max_boxes (int, optional):
An optional limit on the maximum number of boxes per image
pre_nms_max_boxes (int, optional):
An optional limit on the maximum number of boxes per image before NMS
Returns:
Predicted boxes in the form :math:`(x_1, y_1, x_2, y_x, score, class)`.
Shape:
* ``cls`` - :math:`(*, C, H_i, W_i)` where :math:`i` is the :math:`i`'th FPN level
* ``reg`` - :math:`(*, 4, H_i, W_i)` where :math:`i` is the :math:`i`'th FPN level
* ``centerness`` - :math:`(*, 1, H_i, W_i)` where :math:`i` is the :math:`i`'th FPN level
* Output - :math:`(*, N, 6)`
"""
torch.autograd.set_grad_enabled(False)
threshold = abs(float(threshold))
nms_threshold = abs(float(nms_threshold)) if nms_threshold is not None else None
assert len(strides) == len(cls)
assert len(strides) == len(reg)
assert len(strides) == len(centerness)
_ = [x * strides[0] for x in cls[0].shape[-2:]]
y_lim, x_lim = _
assert x_lim > 0
assert y_lim > 0
batch_idx, boxes = [], []
batch_size = cls[0].shape[0]
num_classes = cls[0].shape[1]
# iterate over each FPN level
for stride, level_cls, level_reg, level_centerness in zip(strides, cls, reg, centerness):
bbox, batch = _postprocess_level(stride, level_cls, level_reg, level_centerness, threshold, from_logits)
boxes.append(bbox)
batch_idx.append(batch)
# combine boxes across all FPN levels
if len(boxes):
final_boxes = torch.cat(boxes, dim=-2)
final_batch_idx = torch.cat(batch_idx, dim=-2)
del boxes
del batch_idx
# handle case of no boxes across entire batch
else:
return reg[0].new_empty(batch_size, 0, 6)
# restrict to top k boxes before NMS to avoid exploding memory
if pre_nms_max_boxes is not None:
final_boxes, final_batch_idx = _apply_pre_nms_limit(
final_boxes, final_batch_idx, pre_nms_max_boxes, batch_size
)
# ensure boxes are bounded within image area
coords = final_boxes[..., :4]
coords.clamp_min_(0)
coords[..., 2].clamp_max_(x_lim)
coords[..., 3].clamp_max_(y_lim)
# apply NMS to final_boxes
if nms_threshold is not None:
final_boxes, final_batch_idx = _apply_nms(final_boxes, final_batch_idx, nms_threshold, num_classes)
# create final box using raw or centerness adjusted score as specified
coords, raw_score, scaled_score, class_id = final_boxes.split((4, 1, 1, 1), dim=-1)
if use_raw_score:
final_boxes = | |
parameters - nreal
nreal = int(nreal) # cast to int if needed
if nreal <= 0:
if verbose >= 1:
print('SIMUL_3D: nreal <= 0: nothing to do!')
return None
# --- Fill mpds_geosClassicInput structure (C)
mpds_geosClassicInput, flag = fill_mpds_geosClassicInput(
space_dim,
cov_model,
nx, ny, nz,
sx, sy, sz,
ox, oy, oz,
varname,
outputReportFile,
computationMode,
None,
dataPointSet,
mask,
mean,
var,
searchRadiusRelative,
nneighborMax,
searchNeighborhoodSortMode,
nGibbsSamplerPath,
seed,
nreal)
if not flag:
print("ERROR (SIMUL_3D): can not fill input structure!")
return None
# --- Prepare mpds_geosClassicIOutput structure (C)
# Allocate mpds_geosClassicOutput
mpds_geosClassicOutput = geosclassic.malloc_MPDS_GEOSCLASSICOUTPUT()
# Init mpds_geosClassicOutput
geosclassic.MPDSGeosClassicInitGeosClassicOutput(mpds_geosClassicOutput)
# --- Set progress monitor
mpds_progressMonitor = geosclassic.malloc_MPDS_PROGRESSMONITOR()
geosclassic.MPDSInitProgressMonitor(mpds_progressMonitor)
# Set function to update progress monitor:
# according to geosclassic.MPDS_SHOW_PROGRESS_MONITOR set to 4 for compilation of py module
# the function
# mpds_updateProgressMonitor = geosclassic.MPDSUpdateProgressMonitorAllOnlyPercentStdout_ptr
# should be used, but the following function can also be used:
# mpds_updateProgressMonitor = geosclassic.MPDSUpdateProgressMonitor0_ptr: no output
# mpds_updateProgressMonitor = geosclassic.MPDSUpdateProgressMonitorWarningOnlyStdout_ptr: warning only
if verbose == 0:
mpds_updateProgressMonitor = geosclassic.MPDSUpdateProgressMonitor0_ptr
elif verbose == 1:
mpds_updateProgressMonitor = geosclassic.MPDSUpdateProgressMonitor0_ptr
# mpds_updateProgressMonitor = geosclassic.MPDSUpdateProgressMonitorWarningOnlyStdout_ptr
else:
mpds_updateProgressMonitor = geosclassic.MPDSUpdateProgressMonitorAllOnlyPercentStdout_ptr
# --- Set number of threads
if nthreads <= 0:
nth = max(os.cpu_count() + nthreads, 1)
else:
nth = nthreads
if verbose >= 1:
print('Geos-Classic running... [VERSION {:s} / BUILD NUMBER {:s} / OpenMP {:d} thread(s)]'.format(geosclassic.MPDS_GEOS_CLASSIC_VERSION_NUMBER, geosclassic.MPDS_GEOS_CLASSIC_BUILD_NUMBER, nth))
sys.stdout.flush()
sys.stdout.flush() # twice!, so that the previous print is flushed before launching GeosClassic...
# --- Launch "GeosClassicSim" (launch C code)
# err = geosclassic.MPDSGeosClassicSim(mpds_geosClassicInput, mpds_geosClassicOutput, mpds_progressMonitor, mpds_updateProgressMonitor )
err = geosclassic.MPDSOMPGeosClassicSim(mpds_geosClassicInput, mpds_geosClassicOutput, mpds_progressMonitor, mpds_updateProgressMonitor, nth)
# Free memory on C side: mpds_geosClassicInput
geosclassic.MPDSGeosClassicFreeGeosClassicInput(mpds_geosClassicInput)
#geosclassic.MPDSFree(mpds_geosClassicInput)
geosclassic.free_MPDS_GEOSCLASSICINPUT(mpds_geosClassicInput)
if err:
err_message = geosclassic.mpds_get_error_message(-err)
err_message = err_message.replace('\n', '')
print(err_message)
geosclassic_output = None
else:
geosclassic_output = geosclassic_output_C2py(mpds_geosClassicOutput, mpds_progressMonitor)
# Free memory on C side: mpds_geosClassicOutput
geosclassic.MPDSGeosClassicFreeGeosClassicOutput(mpds_geosClassicOutput)
#geosclassic.MPDSFree (mpds_geosClassicOutput)
geosclassic.free_MPDS_GEOSCLASSICOUTPUT(mpds_geosClassicOutput)
# Free memory on C side: mpds_progressMonitor
#geosclassic.MPDSFree(mpds_progressMonitor)
geosclassic.free_MPDS_PROGRESSMONITOR(mpds_progressMonitor)
if verbose >= 1 and geosclassic_output:
print('Geos-Classic run complete')
# Show (print) encountered warnings
if verbose >= 1 and geosclassic_output and geosclassic_output['nwarning']:
print('\nWarnings encountered ({} times in all):'.format(geosclassic_output['nwarning']))
for i, warning_message in enumerate(geosclassic_output['warnings']):
print('#{:3d}: {}'.format(i+1, warning_message))
return geosclassic_output
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def estimate1D(
cov_model,
dimension, spacing=1.0, origin=0.0,
method='simple_kriging',
mean=None, var=None,
x=None, v=None,
mask=None,
use_unique_neighborhood=False,
searchRadiusRelative=1.0,
nneighborMax=12,
searchNeighborhoodSortMode=None,
outputReportFile=None,
nthreads=-1, verbose=1):
"""
Computes estimate and standard deviation for 1D grid of simple or ordinary kriging.
:param cov_model: (CovModel1D class) covariance model in 1D, see
definition of the class in module geone.covModel
:param dimension: (int) nx, number of cells
:param spacing: (float) sx, spacing between two adjacent cells
:param origin: (float) ox, origin of the 1D simulation
- used for localizing the conditioning points
:param method: (string) indicates the method used:
- 'simple_kriging':
simulation based on simple kriging
- 'ordinary_kriging':
simulation based on ordinary kriging
:param mean: (None or float or ndarray) mean of the simulation:
- None : mean of hard data values (stationary),
(0 if no hard data)
- float : for stationary mean (set manually)
- ndarray: for non stationary mean, must contain
as many entries as number of grid cells
(reshaped if needed)
For ordinary kriging (method='ordinary_kriging'),
it is used for case with no neighbor
:param var: (None or float or ndarray) variance of the simulation
(for simple kriging only):
- None : variance not modified
(only covariance model is used)
- float : for stationary variance (set manually)
- ndarray: for non stationary variance, must contain
as many entries as number of grid cells
(reshaped if needed)
For ordinary kriging (method='ordinary_kriging'),
this parameter must be None (only covariance model
is used)
:param x: (1-dimensional array or float or None) coordinate of
conditioning points for hard data
:param v: (1-dimensional array or float or None) value
at conditioning points for hard data
(same type as x)
:param mask: (nd-array of ints, or None) if given, mask values
over the SG: 1 for simulated cell / 0 for not simulated
cell (nunber of entries should be equal to the number of
grid cells)
:param use_unique_neighborhood:
(bool) indicating if a unique neighborhood is used
- True: all data points are taken into account for
computing estimates and standard deviation;
in this case: parameters
searchRadiusRelative,
nneighborMax,
searchNeighborhoodSortMode,
are unused
- False: only data points within a search ellipsoid
are taken into account for computing estimates
and standard deviation (see parameters
searchRadiusRelative, nneighborMax,
searchNeighborhoodSortMode)
:param searchRadiusRelative:
(float) indicating how restricting the search ellipsoid (should be positive):
let r_i be the ranges of the covariance model along its main axes,
if x is a node to be simulated, a node y is taken into account iff it is
within the ellipsoid centered at x of half-axes searchRadiusRelative * r_i
(unused if use_unique_neighborhood is True)
Note:
- if a range is a variable parameter, its maximal value over the simulation grid
is considered
:param nneighborMax:(int) maximum number of nodes retrieved from the search ellipsoid,
set -1 for unlimited
(unused if use_unique_neighborhood is True)
:param searchNeighborhoodSortMode:
(int) indicating how to sort the search neighboorhood nodes
(neighbors), they are sorted in increasing order according to:
- searchNeighborhoodSortMode = 0:
distance in the usual axes system
- searchNeighborhoodSortMode = 1:
distance in the axes sytem supporting the covariance model
and accounting for anisotropy given by the ranges
- searchNeighborhoodSortMode = 2:
minus the evaluation of the covariance model
(unused if use_unique_neighborhood is True)
Note:
- if the covariance model has any variable parameter (non-stationary),
then searchNeighborhoodSortMode = 2 is not allowed
- if the covariance model has any range or angle set as a variable parameter,
then searchNeighborhoodSortMode must be set to 0
- greatest possible value as default
:param outputReportFile:
(string or None) name of the report file, if None: no report file
:param nthreads:
(int) number of thread(s) to use for "GeosClassicSim" program (C),
(nthreads = -n <= 0: for maximal number of threads except n,
but at least 1)
:param verbose:
(int) indicates what is displayed during the GeosClassicSim run:
- 0: mininal display
- 1: version and warning(s) encountered
- 2 (or >1): version, progress, and warning(s) encountered
:return geosclassic_output: (dict)
{'image':image,
'nwarning':nwarning,
'warnings':warnings}
image: (Img (class)) output image, with image.nv=2 variables (estimate and
standard deviation)
(image is None if mpds_geosClassicOutput->outputImage is NULL)
nwarning:
(int) total number of warning(s) encountered
(same warnings can be counted several times)
warnings:
(list of strings) list of distinct warnings encountered
(can be empty)
"""
# --- Set grid geometry and varname
# Set grid geometry
nx, ny, nz = dimension, 1, 1
sx, sy, sz = spacing, 1.0, 1.0
ox, oy, oz = origin, 0.0, 0.0
nxy = nx * ny
nxyz = nxy * nz
# spatial dimension
space_dim = 1
# Set varname
varname = 'V0'
# --- Check and prepare parameters
# cov_model
if not isinstance(cov_model, gcm.CovModel1D):
print("ERROR (ESTIM_1D): 'cov_model' (first argument) is not valid")
return None
for el in cov_model.elem:
# weight
w = el[1]['w']
if np.size(w) != 1 and np.size(w) != nxyz:
print("ERROR (ESTIM_1D): 'cov_model': weight ('w') not compatible with simulation grid")
return None
# ranges
if 'r' in el[1].keys():
r = el[1]['r']
if np.size(r) != 1 and np.size(r) != nxyz:
print("ERROR (ESTIM_1D): 'cov_model': range ('r') not compatible with simulation grid")
return None
# additional parameter (s)
if 's' in el[1].keys():
s = el[1]['s']
if np.size(s) != 1 and np.size(s) != nxyz:
print("ERROR (ESTIM_1D): 'cov_model': parameter ('s') not compatible with simulation grid")
return None
# method
# computationMode=0: GEOS_CLASSIC_OK
# computationMode=1: GEOS_CLASSIC_SK
# computationMode=2: GEOS_CLASSIC_SIM_OK
# computationMode=3: GEOS_CLASSIC_SIM_SK
# if method not in ('simple_kriging', 'ordinary_kriging'):
# print("ERROR (ESTIM_1D): 'method' is not valid")
# return None
if method == 'simple_kriging':
computationMode = 1
elif method == 'ordinary_kriging':
computationMode = 0
else:
print("ERROR (ESTIM_1D): 'method' is not valid")
return None
# data points: x, v
dataPointSet = []
# data point set from x, v
if x is not None:
x = np.asarray(x, dtype='float').reshape(-1) # cast in 1-dimensional array if needed
| |
<reponame>icesat-2UT/PhoREAL
# -*- coding: utf-8 -*-
"""
Script to perform most basic functionalities required to get ATL03 swath
Copyright 2019 Applied Research Laboratories, University of Texas at Austin
This package is free software; the copyright holder gives unlimited
permission to copy and/or distribute, with or without modification, as
long as this notice is preserved.
Authors:
<NAME>
<NAME>
Date: September 20, 2019
"""
# Import modules
import os
import numpy as np
import time as runTime
from icesatIO import (readAtl03H5, readAtl08H5,
readAtl03DataMapping, readAtl08DataMapping,
readTruthRegionsTxtFile,
writeLas, writeKml, writeArrayToCSV, writeLog,
GtToBeamNum, GtToBeamSW,
atlRotationStruct, atl03Struct, atl08Struct)
from icesatUtils import (getNameParts, getAtl08Mapping, getLatLon2UTM,
getCoordRotFwd, getClosest, interp_vals)
# Function to read ICESat-2 data
def getAtlMeasuredSwath(atl03FilePath = False, atl08FilePath = False,
outFilePath = False, gtNum = 'gt1r', trimInfo = 'auto',
createAtl03LasFile = False, createAtl03KmlFile = False,
createAtl08KmlFile = False, createAtl03CsvFile = False,
createAtl08CsvFile = False, logFileID = False):
# pklHeaderFile = None, LAS_DIR = None):
# Initialize outputs
atl03Data = []
headerData = False
rotationData = False
# Initialize ATL08 data
atl08Data = []
atl08_lat = []
atl08_lon = []
atl08_maxCanopy = []
atl08_teBestFit = []
atl08_teMedian = []
atl08_time = []
atl08_easting = []
atl08_northing = []
atl08_crossTrack = []
atl08_alongTrack = []
atl08_classification = []
atl08_intensity = []
# pklHeader = False
# if type(pklHeaderFile) != type(None):
# pklHeader = True
# if type(LAS_DIR) == type(None):
# raise ValueError('LAS_DIR == None, please input LAS_DIR argument')
# Only execute code if input ATL03 .h5 file and output path declared
if(atl03FilePath and outFilePath):
# Start timer
timeStart = runTime.time()
# Get beam # and S/W
try:
beamNum = GtToBeamNum(atl03FilePath, gtNum)
beamStrength = GtToBeamSW(atl03FilePath, gtNum)
except:
beamNum = np.NaN
beamStrength = np.NaN
# endTry
# Print message
writeLog(' Ground Track Number: %s (Beam #%s, Beam Strength: %s)\n' %(gtNum, beamNum, beamStrength), logFileID)
# Get ATL03 file path/name
atl03FilePath = os.path.normpath(os.path.abspath(atl03FilePath))
atl03FileName = os.path.splitext(os.path.basename(atl03FilePath))[0]
# Read ATL03 data from h5 file
writeLog(' Reading ATL03 .h5 file: %s' % atl03FilePath, logFileID)
lat_all = readAtl03H5(atl03FilePath, '/heights/lat_ph', gtNum)
lon_all = readAtl03H5(atl03FilePath, '/heights/lon_ph', gtNum)
z_all = readAtl03H5(atl03FilePath, '/heights/h_ph', gtNum)
deltaTime_all = readAtl03H5(atl03FilePath, '/heights/delta_time', gtNum)
signalConf_all = readAtl03H5(atl03FilePath, '/heights/signal_conf_ph', gtNum)
zGeoidal = readAtl03H5(atl03FilePath, '/geophys_corr/geoid', gtNum)
zGeoidal_deltaTime = readAtl03H5(atl03FilePath, '/geophys_corr/delta_time', gtNum)
solar_elev = readAtl03H5(atl03FilePath, '/geolocation/solar_elevation', gtNum)
solar_time = readAtl03H5(atl03FilePath, '/geolocation/delta_time', gtNum)
atl03_ph_index_beg, atl03_segment_id, atl03_seg_deltaTime = readAtl03DataMapping(atl03FilePath, gtNum, return_delta_time=True)
try:
zGeoidal_all = interp_vals(zGeoidal_deltaTime, zGeoidal, deltaTime_all, removeThresh=True)
zMsl_all = z_all - zGeoidal_all
atl03_segment_id_interp = interp_vals(atl03_seg_deltaTime, atl03_segment_id, deltaTime_all)
atl03_segment_id_interp = np.round(atl03_segment_id_interp)
solar_elev_all = interp_vals(solar_time, solar_elev, deltaTime_all, removeThresh=True)
except:
zGeoidal_all = []
zMsl_all = np.empty(np.shape(z_all))
zMsl_all[:] = np.NaN
atl03_segment_id_interp = []
solar_elev_all = []
# endTry
badVars = []
if(len(lat_all)==0):
badVar = 'Latitude (lat_ph)'
badVars.append(badVar)
# endIf
if(len(lon_all)==0):
badVar = 'Longitude (lon_ph)'
badVars.append(badVar)
# endIf
if(len(z_all)==0):
badVar = 'Height (h_ph)'
badVars.append(badVar)
# endIf
if(len(deltaTime_all)==0):
badVar = 'Delta Time (delta_time)'
badVars.append(badVar)
# endIf
if(len(signalConf_all)==0):
badVar = 'Signal Confidence (signal_conf_ph)'
badVars.append(badVar)
# endIf
if(len(badVars)==0):
# Get time from delta time
min_delta_time = np.min(deltaTime_all)
time_all = deltaTime_all - min_delta_time
# Get track direction
if(np.abs(lat_all[-1]) >= np.abs(lat_all[0])):
trackDirection = 'Ascending'
else:
trackDirection = 'Descending'
# endIf
writeLog(' Track Direction: %s' %trackDirection, logFileID)
# Extract metadata from ATL03 file name
atl03h5Info = getNameParts(atl03FileName)
# Map ATL08 to ATL03 ground photons
if(atl08FilePath):
# Get ATL08 file path/name
atl08FilePath = os.path.normpath(os.path.abspath(atl08FilePath))
atl08FileName = os.path.splitext(os.path.basename(atl08FilePath))[0]
atl08h5Info = getNameParts(atl08FileName)
# Read ATL08 data from .h5 file
writeLog(' Reading ATL08 .h5 file: %s' % atl08FilePath, logFileID)
atl08_lat = readAtl08H5(atl08FilePath, '/land_segments/latitude', gtNum)
atl08_lon = readAtl08H5(atl08FilePath, '/land_segments/longitude', gtNum)
atl08_maxCanopy = readAtl08H5(atl08FilePath, '/land_segments/canopy/h_max_canopy_abs', gtNum)
atl08_teBestFit = readAtl08H5(atl08FilePath, '/land_segments/terrain/h_te_best_fit', gtNum)
atl08_teMedian = readAtl08H5(atl08FilePath, '/land_segments/terrain/h_te_median', gtNum)
atl08_deltaTime = readAtl08H5(atl08FilePath, '/land_segments/delta_time', gtNum)
atl08_zGeoidal = interp_vals(zGeoidal_deltaTime, zGeoidal, atl08_deltaTime, removeThresh=True)
atl08_maxCanopyMsl = atl08_maxCanopy - atl08_zGeoidal
atl08_teBestFitMsl = atl08_teBestFit - atl08_zGeoidal
atl08_teMedianMsl = atl08_teMedian - atl08_zGeoidal
atl08_classed_pc_indx, atl08_classed_pc_flag, atl08_segment_id = readAtl08DataMapping(atl08FilePath, gtNum)
atl08_signalConf = np.zeros(np.size(atl08_lat))
atl08_classification = np.zeros(np.size(atl08_lat))
atl08_intensity = np.zeros(np.size(atl08_lat))
if((len(atl08_lat)>0) and (len(atl08_lon)>0)):
# Get time from delta time
atl08_time = atl08_deltaTime - min_delta_time
# Do ATL08 to ATL03 mapping
writeLog(' Mapping ATL08 to ATL03 Ground Photons...', logFileID)
try:
classification_all = getAtl08Mapping(atl03_ph_index_beg, atl03_segment_id, atl08_classed_pc_indx, atl08_classed_pc_flag, atl08_segment_id)
except:
writeLog(' WARNING: Could not map ATL08 to ATL03 Ground Photons.', logFileID)
classification_all = atl08_classification
# endTry
# Get length to trim data by
class_length = len(classification_all)
lat_length = len(lat_all)
data_length = np.min([class_length, lat_length])
# Trim ATL03 data down to size of classification array
atl03_lat = lat_all[0:data_length]
atl03_lon = lon_all[0:data_length]
atl03_z = z_all[0:data_length]
atl03_zMsl = zMsl_all[0:data_length]
atl03_time = time_all[0:data_length]
atl03_deltaTime = deltaTime_all[0:data_length]
atl03_signalConf = signalConf_all[0:data_length]
atl03_classification = classification_all[0:data_length]
atl03_intensity = np.zeros(np.size(atl03_lat))
atl03_solar_elev = solar_elev_all[0:data_length]
atl03_segment_id_interp = atl03_segment_id_interp[0:data_length]
dataIsMapped = True
else:
writeLog(' WARNING: ATL08 file does not contain usable data.', logFileID)
atl08FilePath = False
# Store data
atl03_lat = lat_all
atl03_lon = lon_all
atl03_z = z_all
atl03_zMsl = zMsl_all
atl03_time = time_all
atl03_deltaTime = deltaTime_all
atl03_signalConf = signalConf_all
atl03_classification = np.zeros(np.size(atl03_lat))
atl03_intensity = np.zeros(np.size(atl03_lat))
atl03_solar_elev = solar_elev_all
atl03_segment_id_interp = atl03_segment_id_interp
dataIsMapped = False
# endIf
else:
# Message to screen
writeLog(' Not Mapping ATL08 to ATL03 Ground Photons', logFileID)
# Store data
atl03_lat = lat_all
atl03_lon = lon_all
atl03_z = z_all
atl03_zMsl = zMsl_all
atl03_time = time_all
atl03_deltaTime = deltaTime_all
atl03_signalConf = signalConf_all
atl03_classification = np.zeros(np.size(atl03_lat))
atl03_intensity = np.zeros(np.size(atl03_lat))
atl03_solar_elev = solar_elev_all
atl03_segment_id_interp = atl03_segment_id_interp
dataIsMapped = False
# endIf
# Get trim options
trimParts = trimInfo.split(',')
trimMode = trimParts[0]
trimType = 'None'
if(('manual' in trimMode.lower()) and (len(trimParts) > 1)):
trimType = trimParts[1]
trimMin = float(trimParts[2])
trimMax = float(trimParts[3])
# endIf
# If selected to manually trim data, do this first
if('manual' in trimMode.lower()):
# Trim by lat or time
if('lonlat' in trimType.lower()):
lonMin, lonMax = float(trimParts[2]), float(trimParts[3])
latMin, latMax = float(trimParts[4]), float(trimParts[5])
writeLog(' Manual Trim Mode (Min Lon: %s, Max Lon: %s)' % (lonMin, lonMax), logFileID)
writeLog(' Manual Trim Mode (Min Lat: %s, Max Lat: %s)' % (latMin, latMax), logFileID)
atl03IndsToKeepLon = (atl03_lon >= lonMin) & (atl03_lon <= lonMax)
atl03IndsToKeepLat = (atl03_lat >= latMin) & (atl03_lat <= latMax)
atl03IndsToKeep = atl03IndsToKeepLon & atl03IndsToKeepLat
if(atl08FilePath):
atl08IndsToKeepLon = (atl08_lon >= lonMin) & (atl08_lon <= lonMax)
atl08IndsToKeepLat = (atl08_lat >= latMin) & (atl08_lat <= latMax)
atl08IndsToKeep = atl08IndsToKeepLon & atl08IndsToKeepLat
elif('lat' in trimType.lower()):
writeLog(' Manual Trim Mode (Min Lat: %s, Max Lat: %s)' % (trimMin, trimMax), logFileID)
atl03IndsToKeep = (atl03_lat >= trimMin) & (atl03_lat <= trimMax)
if(atl08FilePath):
atl08IndsToKeep = (atl08_lat >= trimMin) & (atl08_lat <= trimMax)
# endIf
elif('lon' in trimType.lower()):
writeLog(' Manual Trim Mode (Min Lon: %s, Max Lon: %s)' % (trimMin, trimMax), logFileID)
atl03IndsToKeep = (atl03_lon >= trimMin) & (atl03_lon <= trimMax)
if(atl08FilePath):
atl08IndsToKeep = (atl03_lon >= trimMin) & (atl03_lon <= trimMax)
# endIf
elif('time' in trimType.lower()):
writeLog(' Manual Trim Mode (Min Time: %s, Max Time: %s)' % (trimMin, trimMax), logFileID)
atl03IndsToKeep = (atl03_time >= trimMin) & (atl03_time <= trimMax)
if(atl08FilePath):
atl08IndsToKeep = (atl08_time >= trimMin) & (atl08_time <= trimMax)
# endIf
else:
writeLog(' Manual Trim Mode is Missing Args, Not Trimming Data', logFileID)
atl03IndsToKeep = np.ones(np.shape(atl03_lat), dtype = bool)
if(atl08FilePath):
atl08IndsToKeep = np.ones(np.shape(atl08_lat), dtype = bool)
# endIf
# endif
# Trim ATL03 data
if atl03IndsToKeep.sum() == 0:
# no data left given manual constraints
return atl03Data, atl08Data, headerData, rotationData
# endIf
atl03_lat = atl03_lat[atl03IndsToKeep]
atl03_lon = atl03_lon[atl03IndsToKeep]
atl03_z = atl03_z[atl03IndsToKeep]
atl03_zMsl = atl03_zMsl[atl03IndsToKeep]
atl03_time = atl03_time[atl03IndsToKeep]
atl03_deltaTime = atl03_deltaTime[atl03IndsToKeep]
atl03_signalConf = atl03_signalConf[atl03IndsToKeep]
atl03_classification = atl03_classification[atl03IndsToKeep]
atl03_intensity = atl03_intensity[atl03IndsToKeep]
atl03_solar_elev = atl03_solar_elev[atl03IndsToKeep]
atl03_segment_id_interp = atl03_segment_id_interp[atl03IndsToKeep]
# Trim ATL08 data
if(atl08FilePath):
atl08_lat = atl08_lat[atl08IndsToKeep]
atl08_lon = atl08_lon[atl08IndsToKeep]
atl08_maxCanopy = atl08_maxCanopy[atl08IndsToKeep]
atl08_teBestFit = atl08_teBestFit[atl08IndsToKeep]
atl08_teMedian = atl08_teMedian[atl08IndsToKeep]
atl08_maxCanopyMsl = atl08_maxCanopyMsl[atl08IndsToKeep]
atl08_teBestFitMsl = atl08_teBestFitMsl[atl08IndsToKeep]
atl08_teMedianMsl = atl08_teMedianMsl[atl08IndsToKeep]
atl08_time = atl08_time[atl08IndsToKeep]
atl08_deltaTime = atl08_deltaTime[atl08IndsToKeep]
atl08_signalConf = atl08_signalConf[atl08IndsToKeep]
atl08_classification = atl08_classification[atl08IndsToKeep]
atl08_intensity = atl08_intensity[atl08IndsToKeep]
# endIf
elif('none' in trimMode.lower()):
writeLog(' Trim Mode: None', logFileID)
# endIf
| |
import enum
import os
from typing import Optional, Union, Tuple, List
from PIL import Image, ImageFont
from platypush.plugins import Plugin, action
class DeviceInterface(enum.Enum):
I2C = 'i2c'
SPI = 'spi'
class DeviceSlot(enum.IntEnum):
BACK = 0
FRONT = 1
class DeviceRotation(enum.IntEnum):
ROTATE_0 = 0
ROTATE_90 = 1
ROTATE_180 = 2
ROTATE_270 = 3
class LumaOledPlugin(Plugin):
"""
Plugin to interact with small OLED-based RaspberryPi displays through the luma.oled driver.
Requires:
* **luma.oled** (``pip install git+https://github.com/rm-hull/luma.oled``)
"""
def __init__(self,
interface: str,
device: str,
port: int = 0,
slot: int = DeviceSlot.BACK.value,
width: int = 128,
height: int = 64,
rotate: int = DeviceRotation.ROTATE_0.value,
gpio_DC: int = 24,
gpio_RST: int = 25,
bus_speed_hz: int = 8000000,
address: int = 0x3c,
cs_high: bool = False,
transfer_size: int = 4096,
spi_mode: Optional[int] = None,
font: Optional[str] = None,
font_size: int = 10,
**kwargs):
"""
:param interface: Serial interface the display is connected to (``spi`` or ``i2c``).
:param device: Display chipset type (supported: ssd1306 ssd1309, ssd1322, ssd1325, ssd1327, ssd1331, ssd1351, ssd1362, sh1106).
:param port: Device port (usually 0 or 1).
:param slot: Device slot (0 for back, 1 for front).
:param width: Display width.
:param height: Display height.
:param rotate: Display rotation (0 for no rotation, 1 for 90 degrees, 2 for 180 degrees, 3 for 270 degrees).
:param gpio_DC: [SPI only] GPIO PIN used for data (default: 24).
:param gpio_RST: [SPI only] GPIO PIN used for RST (default: 25).
:param bus_speed_hz: [SPI only] Bus speed in Hz (default: 8 MHz).
:param address: [I2C only] Device address (default: 0x3c).
:param cs_high: [SPI only] Set to True if the SPI chip select is high.
:param transfer_size: [SPI only] Maximum amount of bytes to transfer in one go (default: 4096).
:param spi_mode: [SPI only] SPI mode as two bit pattern of clock polarity and phase [CPOL|CPHA], 0-3 (default:None).
:param font: Path to a default TTF font used to display the text.
:param font_size: Font size - it only applies if ``font`` is set.
"""
import luma.core.interface.serial
import luma.oled.device
from luma.core.render import canvas
super().__init__(**kwargs)
iface_name = interface
interface = getattr(luma.core.interface.serial, DeviceInterface(interface).value)
if iface_name == DeviceInterface.SPI.value:
self.serial = interface(port=port, device=slot, cs_high=cs_high, gpio_DC=gpio_DC,
gpio_RST=gpio_RST, bus_speed_hz=bus_speed_hz,
transfer_size=transfer_size, spi_mode=spi_mode)
else:
self.serial = interface(port=port, address=address)
device = getattr(luma.oled.device, device)
self.device = device(self.serial, width=width, height=height, rotate=rotate)
self.canvas = canvas(self.device)
self.font = None
self.font_size = font_size
self.font = self._get_font(font, font_size)
def _get_font(self, font: Optional[str] = None, font_size: Optional[int] = None):
if font:
return ImageFont.truetype(os.path.abspath(os.path.expanduser(font)), font_size or self.font_size)
return self.font
@action
def clear(self):
"""
clear the display canvas.
"""
from luma.core.render import canvas
self.device.clear()
del self.canvas
self.canvas = canvas(self.device)
@action
def text(self, text: str, pos: Union[Tuple[int], List[int]] = (0, 0),
fill: str = 'white', font: Optional[str] = None, font_size: Optional[int] = None, clear: bool = False):
"""
Draw text on the canvas.
:param text: Text to be drawn.
:param pos: Position of the text.
:param fill: Text color (default: ``white``).
:param font: ``font`` type override.
:param font_size: ``font_size`` override.
:param clear: Set to True if you want to clear the canvas before writing the text (default: False).
"""
if clear:
self.clear()
font = self._get_font(font, font_size)
with self.canvas as draw:
draw.text(pos, text, fill=fill, font=font)
@action
def rectangle(self, xy: Optional[Union[Tuple[int], List[int]]] = None,
fill: Optional[str] = None, outline: Optional[str] = None,
width: int = 1, clear: bool = False):
"""
Draw a rectangle on the canvas.
:param xy: Two points defining the bounding box, either as [(x0, y0), (x1, y1)] or [x0, y0, x1, y1]. Default: bounding box of the device.
:param fill: Fill color - can be ``black`` or ``white``.
:param outline: Outline color - can be ``black`` or ``white``.
:param width: Figure width in pixels (default: 1).
:param clear: Set to True if you want to clear the canvas before writing the text (default: False).
"""
if clear:
self.clear()
if not xy:
xy = self.device.bounding_box
with self.canvas as draw:
draw.rectangle(xy, outline=outline, fill=fill, width=width)
@action
def arc(self, start: int, end: int, xy: Optional[Union[Tuple[int], List[int]]] = None,
fill: Optional[str] = None, outline: Optional[str] = None,
width: int = 1, clear: bool = False):
"""
Draw an arc on the canvas.
:param start: Starting angle, in degrees (measured from 3 o' clock and increasing clockwise).
:param end: Ending angle, in degrees (measured from 3 o' clock and increasing clockwise).
:param xy: Two points defining the bounding box, either as [(x0, y0), (x1, y1)] or [x0, y0, x1, y1]. Default: bounding box of the device.
:param fill: Fill color - can be ``black`` or ``white``.
:param outline: Outline color - can be ``black`` or ``white``.
:param width: Figure width in pixels (default: 1).
:param clear: Set to True if you want to clear the canvas before writing the text (default: False).
"""
if clear:
self.clear()
if not xy:
xy = self.device.bounding_box
with self.canvas as draw:
draw.arc(xy, start=start, end=end, outline=outline, fill=fill, width=width)
@action
def chord(self, start: int, end: int, xy: Optional[Union[Tuple[int], List[int]]] = None,
fill: Optional[str] = None, outline: Optional[str] = None,
width: int = 1, clear: bool = False):
"""
Same as ``arc``, but it connects the end points with a straight line.
:param start: Starting angle, in degrees (measured from 3 o' clock and increasing clockwise).
:param end: Ending angle, in degrees (measured from 3 o' clock and increasing clockwise).
:param xy: Two points defining the bounding box, either as [(x0, y0), (x1, y1)] or [x0, y0, x1, y1]. Default: bounding box of the device.
:param fill: Fill color - can be ``black`` or ``white``.
:param outline: Outline color - can be ``black`` or ``white``.
:param width: Figure width in pixels (default: 1).
:param clear: Set to True if you want to clear the canvas before writing the text (default: False).
"""
if clear:
self.clear()
if not xy:
xy = self.device.bounding_box
with self.canvas as draw:
draw.chord(xy, start=start, end=end, outline=outline, fill=fill, width=width)
@action
def pieslice(self, start: int, end: int, xy: Optional[Union[Tuple[int], List[int]]] = None,
fill: Optional[str] = None, outline: Optional[str] = None,
width: int = 1, clear: bool = False):
"""
Same as ``arc``, but it also draws straight lines between the end points and the center of the bounding box.
:param start: Starting angle, in degrees (measured from 3 o' clock and increasing clockwise).
:param end: Ending angle, in degrees (measured from 3 o' clock and increasing clockwise).
:param xy: Two points defining the bounding box, either as [(x0, y0), (x1, y1)] or [x0, y0, x1, y1]. Default: bounding box of the device.
:param fill: Fill color - can be ``black`` or ``white``.
:param outline: Outline color - can be ``black`` or ``white``.
:param width: Figure width in pixels (default: 1).
:param clear: Set to True if you want to clear the canvas before writing the text (default: False).
"""
if clear:
self.clear()
if not xy:
xy = self.device.bounding_box
with self.canvas as draw:
draw.pieslice(xy, start=start, end=end, outline=outline, fill=fill, width=width)
@action
def ellipse(self, xy: Optional[Union[Tuple[int], List[int]]] = None,
fill: Optional[str] = None, outline: Optional[str] = None,
width: int = 1, clear: bool = False):
"""
Draw an ellipse on the canvas.
:param xy: Two points defining the bounding box, either as [(x0, y0), (x1, y1)] or [x0, y0, x1, y1]. Default: bounding box of the device.
:param fill: Fill color - can be ``black`` or ``white``.
:param outline: Outline color - can be ``black`` or ``white``.
:param width: Figure width in pixels (default: 1).
:param clear: Set to True if you want to clear the canvas before writing the text (default: False).
"""
if clear:
self.clear()
if not xy:
xy = self.device.bounding_box
with self.canvas as draw:
draw.ellipse(xy, outline=outline, fill=fill, width=width)
@action
def line(self, xy: Optional[Union[Tuple[int], List[int]]] = None,
fill: Optional[str] = None, outline: Optional[str] = None,
width: int = 1, curve: bool = False, clear: bool = False):
"""
Draw a line on the canvas.
:param xy: Sequence of either 2-tuples like [(x, y), (x, y), ...] or numeric values like [x, | |
rusage[6]
self.majflt = rusage[7]
def record_running(self, cur_time):
assert (self.time_start is None), self.time_start
self.time_start = cur_time
self.status = run_status.running
for fd in self.job_output:
self.io[fd] = (cStringIO.StringIO(), None, None)
def add_io(self, fd, payload, eof):
"""
called when we got notificatiion that
the process emitted something (payload)
from its file descriptor (fd).
eof = 1 iff this is the last data from
the fd.
"""
# inline : string IO object
# filename : filename or None
# wp : file object for filename or None
if self.work.server.logfp:
self.work.server.LOG("add_io : run=%s fd=%d eof=%d payload=[%s]\n"
% (self, fd, eof, payload))
inline,filename,wp = self.io[fd]
if payload != "":
# record whatever is output
inline.write(payload)
s = inline.getvalue()
max_inline_io = 128 * 1024 # 128KB
if len(s) > max_inline_io:
# on-memory data too large, flush into file
if wp is None:
filename = "run_%d_%d_%d" % (self.work_idx, self.run_idx, fd)
filename = os.path.join(self.io_dir, filename)
wp = open(filename, "wb")
wp.write(s)
wp.flush()
# and free the memory
inline.truncate()
self.io[fd] = (inline, filename, wp)
if eof:
# got EOF, so we indicate it by deleting
# the entry from io and move the record
# to done_io
s = inline.getvalue()
if wp:
wp.write(s)
wp.close()
inline.truncate()
# mark io from fd has done
del self.io[fd]
s = inline.getvalue()
self.done_io[fd] = (s, filename)
if self.work.server.conf.deliver_job_output:
self.work.add_io(fd, payload, eof, self.man_name)
def get_io_filenames(self):
fds = {}
for fd in self.io.keys(): fds[fd] = None
for fd in self.done_io.keys(): fds[fd] = None
fds = fds.keys()
fds.sort()
S = {}
for fd in fds:
x = self.io.get(fd)
if x:
_,filename,_ = x
else:
_,filename = self.done_io.get(fd)
S[fd] = filename
return S
def get_io_inline(self):
"""
FIXIT: should merge stdio and stderr
"""
fds = {}
for fd in self.io.keys(): fds[fd] = None
for fd in self.done_io.keys(): fds[fd] = None
fds = fds.keys()
fds.sort()
S = {}
for fd in fds:
x = self.io.get(fd)
if x:
inline,_,_ = x
s = inline.getvalue()
else:
s,_ = self.done_io.get(fd)
S[fd] = s
return S
def is_finished(self):
"""
check if this guy has really finished
('wait'ed by gxpd and their out fds closed)
"""
if len(self.io) > 0: return 0
if self.status == run_status.queued: return 0
if self.status == run_status.running: return 0
return 1
def record_no_throw(self, cur_time):
self.status = run_status.no_throw
self.finish(cur_time)
def finish(self, cur_time):
self.time_end = cur_time
if self.time_start is None:
# none if the job has not started.
# we consider them just started now
self.time_start = cur_time
self.time_since_start = self.time_end - self.time_start
return self.work.finish_or_retry(self.status, self.exit_status,
self.term_sig, self.man_name)
def sync(self, cur_time):
self.time_since_start = cur_time - self.time_start
self.work.server.works.update_run(self)
# db-related stuff
# following fields of this object will go to database/csv file/html
# for field x for which get_td_x method exists,
# get_td_worker_time method is called and its return value used .
# so result column will be obtained by self.get_td_result(), etc.
db_fields_1 = [ "run_idx", "result", "time_since_start", "man_name" ]
db_fields_2 = [ "time_start", "time_end",
"worker_time_start", "worker_time_end", "worker_time",
"utime", "stime", "maxrss", "ixrss", "idrss", "isrss",
"minflt", "majflt", "io", "io_filename" ]
def get_td_result(self):
if self.status == run_status.finished:
if self.exit_status is not None:
if self.exit_status == 0:
return ("job_success", "exit 0")
else:
return ("job_failed", ("exit %d" % self.exit_status))
elif self.term_sig is not None:
return ("job_killed", ("killed %d" % self.term_sig))
else:
assert 0, (self.status, self.exit_status, self.term_sig)
else:
return (("job_%s" % self.status), self.status)
def get_td_worker_time(self):
s = self.worker_time_start
e = self.worker_time_end
if s is None or e is None:
assert s is None
assert e is None
return "-"
else:
return e - s
def get_td_io(self):
io = []
for fd,(inline,_,_) in self.io.items():
io.append(inline.getvalue())
for fd,(inline_s,_) in self.done_io.items():
io.append(inline_s)
x = "".join(io)
# if len(x) == 0: return "<br>"
return x
def get_td_io_filename(self):
filenames = []
for fd,(_,filename,_) in self.io.items():
if filename is not None:
filenames.append('<a href="%s">%d</a>'
% (filename, fd))
x = ",".join(filenames)
if len(x) == 0: return "-"
return x
class Work:
"""
a work or a job sent from clients
"""
db_fields_1 = [ "work_idx", "cmd", ]
db_fields_2 = [ "pid", "dirs", "time_req" ]
def init(self, cmd, pid, dirs, envs, req, affinity):
# command line (string)
self.cmd = cmd
# pid (or None if not applicable/relevant)
self.pid = pid
# directories that should be tried for job's cwd
self.dirs = dirs
# environments that must be set for the job
self.envs = envs.copy()
# resource requirement of the work
self.requirement = req
self.affinity = affinity
self.next_run_idx = 0
return self
def init2(self, work_idx, cur_time, server):
self.envs["GXP_JOBSCHED_WORK_IDX"] = ("%d" % work_idx)
self.envs["GXP_MAKE_WORK_IDX"] = ("%d" % work_idx)
self.work_idx = work_idx
self.time_req = cur_time # time requested
# volatile fields
self.server = server
return self
def make_run(self):
"""
create a new run for this work
"""
run_idx = self.next_run_idx
self.next_run_idx = run_idx + 1
run = Run().init(self, run_idx,
self.server.conf.state_dir,
self.server.conf.job_output)
self.server.runs_todo.append(run)
# add run to DB
self.server.works.add_run(self.work_idx, run)
def retry(self):
# retry
msg = ("work '%s' will be retried\n" % self.cmd)
if self.server.logfp: self.server.LOG(msg)
self.make_run()
def add_io(self, fd, payload, eof, man_name):
self.server.wkg.add_io(self.work_idx, fd, payload, eof, man_name)
def finish_or_retry(self, status, exit_status, term_sig, man_name):
if status == run_status.worker_died \
or status == run_status.worker_left:
self.retry()
return 0
else:
msg = ("work '%s' finished\n" % self.cmd)
if self.server.logfp: self.server.LOG(msg)
return self.server.wkg.finish_work(self.work_idx, exit_status, term_sig, man_name)
#
# work generation framework
#
#
# format of stream (example)
#
# echo hello
# echo hoge
# hostname
#
# env: x=y
# cmd: hostname
# end:
#
# stream ::= element*
# element ::= continuation_line* last_line
# continuation_line ::= char* '\' NEWLINE
# last_line ::= NEWLINE | char* any_char_but_backslash NEWLINE
#
# element is either:
# env: ... | cwd: ... | cmd: ... | end: | ...
#
class work_stream_base:
def __init__(self, server):
self.server = server
self.leftover = ""
self.closed = 0
# FIXIT: ugly reuse of tokenizer token_val
self.tk = jobsched_tokenizer()
def close(self):
should_be_implemented_in_subclass
def get_pkt(self):
"""
read some bytes after select says something
is readable.
shall not block (so you should call read_pkt
only once).
on return, all data available should be
in self.lines and self.partial_line.
self.closed flag must be set iff there are no
chance that more data will come.
"""
assert self.closed == 0
pkt = self.readpkt(1024 * 1024)
if pkt == "":
# Es("get_pkt: readpkt returned empty\n")
# call .close() later after we unregister
# the descriptor. this is necessary because
# socket.fileno() raises exception when it is already
# closed
# self.close()
self.closed = 1
else:
self.leftover = self.leftover + pkt
def read_elements(self):
self.server.LOG("read_elements:\n")
lines = self.leftover.splitlines(1)
elements = []
e = []
for line in lines:
# Es("line : [%s]\n" % line)
e.append(line)
# handle continuation lines
if line[-2:] != "\\\n" \
and line[-2:] != "\\\r" \
and line[-3:] != "\\\r\n" \
and (line[-1:] == "\n" or
line[-1:] == "\r"):
x = "".join(e)
# Es("elements.append(%s)\n" % x)
elements.append(x)
e = []
self.leftover = "".join(e)
# this is the last bytes
if self.closed and self.leftover != "":
elements.append(self.leftover)
self.leftover = ""
self.server.LOG("read_elements: %d elements returned\n"
% len(elements))
return elements
def translate_dir(self, cwd):
trans = self.server.conf.translate_dir
for src,dsts in trans:
# look for src that match dire
# canonicalize src so both end with "/"
if src[-1:] != os.path.sep: src = src + os.path.sep
if cwd[-1:] != os.path.sep: cwd = cwd + os.path.sep
n = len(src)
if cwd[:n] == src:
dirs = []
for dst in dsts:
new_dir = os.path.normpath(os.path.join(dst, cwd[n:]))
# remove trailing "/"
if new_dir != os.path.sep and new_dir[-1:] == os.path.sep:
new_dir = new_dir[:-1]
dirs.append(new_dir)
return dirs
if cwd != os.path.sep and cwd[-1:] == os.path.sep:
cwd = cwd[:-1]
return [ cwd ]
def translate_dirs(self, cwds):
dirs = []
for cwd in cwds:
for d in self.translate_dir(cwd):
dirs.append(d)
return dirs
def read_works(self):
"""
assume data is ready in self.lines + self.partial_line
"""
self.server.LOG("read_works:\n")
self.get_pkt()
elements = self.read_elements()
works = []
cmd = None
pid = None
dirs = []
envs = {}
requirement = { "cpu" : 1 } # bare minimum default
affinity = {}
leftover_elements | |
in indigo.devices.iter()]
# Variables
elif values_dict.get('editSourceFilter', 'A') == "V":
[list_.append(t) for t in [(u"-3", u"%%separator%%"),
(u"-4", u"%%disabled:Variables%%"),
(u"-5", u"%%separator%%")
]
]
[list_.append((var.id, u"{name}".format(name=var.name))) for var in indigo.variables.iter()]
# Devices and variables
else:
[list_.append(t) for t in [(u"-1", u"%%disabled:Devices%%"), (u"-2", u"%%separator%%")]]
[list_.append((dev.id, u"{name}".format(name=dev.name))) for dev in indigo.devices.iter()]
[list_.append(t) for t in [(u"-3", u"%%separator%%"),
(u"-4", u"%%disabled:Variables%%"),
(u"-5", u"%%separator%%")
]
]
[list_.append((var.id, u"{name}".format(name=var.name))) for var in indigo.variables.iter()]
return list_
# =============================================================================
def get_csv_device_list(self, fltr="", values_dict=None, type_id="", target_id=0):
"""
Return a list of CSV Engine devices set to manual refresh
The get_csv_device_list() method returns a list of CSV Engine devices with a
manual refresh interval.
:param unicode fltr:
:param class 'indigo.Dict' values_dict:
:param unicode type_id:
:param target_id:
:return:
"""
# Return a list of tuples that contains only CSV devices set to manual refresh
# (refreshInterval = 0) for config menu.
return [(dev.id, dev.name) for dev in indigo.devices.iter("self") if
dev.deviceTypeId == "csvEngine" and dev.pluginProps['refreshInterval'] == "0"]
# =============================================================================
def get_csv_source_list(self, fltr="", values_dict=None, type_id="", target_id=0):
"""
Return a list of CSV sources from CSV Engine devices set to manual refresh
The get_csv_source_list() method returns a list of CSV sources for the target
CSV Engine device.
:param unicode fltr:
:param class 'indigo.Dict' values_dict:
:param unicode type_id:
:param target_id:
:return:
"""
if not values_dict:
return []
# Once user selects a device ( see get_csv_device_list() ), populate the dropdown
# menu.
else:
target_device = int(values_dict['targetDevice'])
dev = indigo.devices[target_device]
dev_dict = ast.literal_eval(dev.pluginProps['columnDict'])
return [(k, dev_dict[k][0]) for k in dev_dict]
# =============================================================================
def deviceStateValueListAdd(self, type_id="", values_dict=None, dev_id=0, target_id=0):
"""
Formulates list of device states for CSV engine
Once a user selects a device or variable within the CSV engine configuration
dialog, we need to obtain the relevant device states to chose from. If the
user selects a variable, we simply return the variable value identifier. The
return is a list of tuples of the form:
-----
:param unicode type_id:
:param class 'indigo.Dict' values_dict:
:param int dev_id:
:param int target_id:
"""
if values_dict['addSource'] != u'':
try:
# User has selected an Indigo device element and then set the filter to Variables only.
if int(values_dict['addSource']) in indigo.devices and values_dict['addSourceFilter'] == "V":
return [('None', u'Please select a data source first')]
# User has selected an Indigo device element and the filter is set to Devices only or Show All.
elif int(values_dict['addSource']) in indigo.devices and values_dict['addSourceFilter'] != "V":
dev = indigo.devices[int(values_dict['addSource'])]
return [x for x in dev.states.keys() if ".ui" not in x]
elif int(values_dict['addSource']) in indigo.variables and values_dict['addSourceFilter'] != "D":
return [('value', 'value')]
elif int(values_dict['addSource']) in indigo.variables and values_dict['addSourceFilter'] == "D":
return [('None', u'Please select a data source first')]
except ValueError:
return [('None', u'Please select a data source first')]
else:
return [('None', u'Please select a data source first')]
# =============================================================================
def deviceStateValueListEdit(self, type_id="", values_dict=None, dev_id=0, target_id=0):
"""
Formulates list of device states for CSV engine
Once a user selects a device or variable within the CSV engine configuration
dialog, we need to obtain the relevant device states to chose from. If the
user selects a variable, we simply return the variable value identifier. The
return is a list of tuples of the form:
-----
:param unicode type_id:
:param class 'indigo.Dict' values_dict:
:param int dev_id:
:param int target_id:
"""
if values_dict['editSource'] != u'':
try:
# User has selected an Indigo device element and then set the filter to Variables only.
if int(values_dict['editSource']) in indigo.devices and values_dict['editSourceFilter'] == "V":
return [('None', u'Please select a data source first')]
# User has selected an Indigo device element and the filter is set to Devices only or Show All.
elif int(values_dict['editSource']) in indigo.devices and values_dict['editSourceFilter'] != "V":
dev = indigo.devices[int(values_dict['editSource'])]
return [x for x in dev.states.keys() if ".ui" not in x]
elif int(values_dict['editSource']) in indigo.variables and values_dict['editSourceFilter'] != "D":
return [('value', 'value')]
elif int(values_dict['editSource']) in indigo.variables and values_dict['editSourceFilter'] == "D":
return [('None', u'Please select a data source first')]
except ValueError:
return [('None', u'Please select a data source first')]
else:
return [('None', u'Please select a data source first')]
# =============================================================================
def fix_rgb(self, color):
return r"#{c}".format(c=color.replace(' ', '').replace('#', ''))
# =============================================================================
def format_markers(self, p_dict):
"""
Format matplotlib markers
The Devices.xml file cannot contain '<' or '>' as a value, as this conflicts
with the construction of the XML code. Matplotlib needs these values for
select built-in marker styles, so we need to change them to what MPL is
expecting.
-----
:param p_dict:
"""
markers = ('area1Marker', 'area2Marker', 'area3Marker', 'area4Marker', 'area5Marker', 'area6Marker',
'area7Marker', 'area8Marker', 'line1Marker', 'line2Marker', 'line3Marker', 'line4Marker',
'line5Marker', 'line6Marker', 'line7Marker', 'line8Marker', 'group1Marker', 'group2Marker',
'group3Marker', 'group4Marker')
marker_dict = {"PIX": ",", "TL": "<", "TR": ">"}
for marker in markers:
try:
if p_dict[marker] in marker_dict.keys():
p_dict[marker] = marker_dict[p_dict[marker]]
except KeyError:
pass
return p_dict
# =============================================================================
def generatorDeviceStates(self, fltr="", values_dict=None, type_id="", target_id=0):
"""
Returns device states list or variable 'value'.
Returns a list of device states or 'value' for a variable, based on ID
transmitted in the filter attribute. The generatorDeviceStates() method
returns a list of device states each list includes only states for the
selected device. If a variable id is provided, the list returns one
element. The lists are generated in the DLFramework module.
Returns:
[('dev state name', 'dev state name'), ('dev state name', 'dev state name')]
or
[('value', 'value')]
-----
:param unicode fltr:
:param class 'indigo.Dict' values_dict:
:param unicode type_id:
:param int target_id:
"""
return self.Fogbert.generatorStateOrValue(values_dict[fltr])
# =============================================================================
def generatorDeviceList(self, fltr="", values_dict=None, type_id="", target_id=0):
"""
Returns a list of Indigo variables.
Provides a list of Indigo variables for various dropdown menus. The method is
agnostic as to whether the variable is enabled or disabled. The method returns
a list of tuples in the form::
[(dev.id, dev.name), (dev.id, dev.name)].
The list is generated within the DLFramework module.
-----
:param unicode fltr:
:param class 'indigo.Dict' values_dict:
:param unicode type_id:
:param int target_id:
"""
return self.Fogbert.deviceList()
# =============================================================================
def latestDevVarList(self, fltr="", values_dict=None, type_id="", target_id=0):
return self.dev_var_list
# =============================================================================
def generatorDeviceAndVariableList(self, fltr="", values_dict=None, type_id="", target_id=0):
"""
Create a list of devices and variables for config menu controls
Provides a list of Indigo devices and variables for various dropdown menus. The
method is agnostic as to whether the devices and variables are enabled or
disabled. All devices are listed first and then all variables. The method
returns a list of tuples in the form::
[(dev.id, dev.name), (var.id, var.name)].
It prepends (D) or (V) to make it easier to distinguish between the two.
The list is generated within the DLFramework module.
-----
:param unicode fltr:
:param class 'indigo.Dict' values_dict:
:param unicode type_id:
:param int target_id:
"""
return self.Fogbert.deviceAndVariableList()
# =============================================================================
def generatorVariableList(self, fltr="", values_dict=None, type_id="", target_id=0):
"""
Returns a list of Indigo variables.
Provides a list of Indigo variables for various dropdown menus. The method is
agnostic as to whether the variable is enabled or disabled. The method returns
a list of tuples in the form::
[(var.id, var.name), (var.id, var.name)].
The list is generated within the DLFramework module.
-----
:param unicode fltr:
:param class 'indigo.Dict' values_dict:
:param unicode type_id:
:param int target_id:
"""
return self.Fogbert.variableList()
# =============================================================================
def getAxisList(self, fltr="", values_dict=None, type_id="", target_id=0):
"""
Returns a list of axis formats.
Returns a list of Python date formatting strings for use in plotting date
labels. The list does not include all Python format specifiers.
-----
:param str fltr:
:param class 'indigo.Dict' values_dict:
:param unicode type_id:
:param int target_id:
"""
now = dt.datetime.now()
axis_list_menu = [("None", "None"),
("-1", "%%separator%%"),
("%I:%M", dt.datetime.strftime(now, "%I:%M") + ' (12 hour clock)'),
("%H:%M", dt.datetime.strftime(now, "%H:%M") + ' (24 hour clock)'),
("%l:%M %p", dt.datetime.strftime(now, "%l:%M %p").strip() + ' (full time)'),
("%a", dt.datetime.strftime(now, "%a") + ' (short day)'),
("%A", dt.datetime.strftime(now, "%A") + ' (long day)*'),
("%b", dt.datetime.strftime(now, "%b") + ' (short month)'),
("%B", dt.datetime.strftime(now, "%B") + ' (long month)'),
("%d", dt.datetime.strftime(now, "%d") + ' (date)'),
("%Y", dt.datetime.strftime(now, "%Y") + ' (year)'),
("%b %d", dt.datetime.strftime(now, "%b %d") + ' (month date)'),
("%d %b", dt.datetime.strftime(now, "%d %b") + | |
<gh_stars>10-100
# Copyright (c) 2019 The Regents of the University of Michigan
# All rights reserved.
# This software is licensed under the BSD 3-Clause License.
r"""Submodule containing all standard functions."""
import numpy as np
def exp(q):
r"""Compute the natural exponential function :math:`e^q`.
The exponential of a quaternion in terms of its scalar and vector parts
:math:`q = a + \boldsymbol{v}` is defined by exponential power series:
formula :math:`e^x = \sum_{k=0}^{\infty} \frac{x^k}{k!}` as follows:
.. math::
\begin{align}
e^q &= e^{a+v} \\
&= e^a \left(\sum_{k=0}^{\infty} \frac{v^k}{k!} \right) \\
&= e^a \left(\cos \lvert \lvert \boldsymbol{v} \rvert \rvert +
\frac{\boldsymbol{v}}{\lvert \lvert \boldsymbol{v} \rvert
\rvert} \sin \lvert \lvert \boldsymbol{v} \rvert \rvert
\right)
\end{align}
Args:
q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.
Returns:
(..., 4) :class:`numpy.ndarray`: Exponentials of ``q``.
Example::
>>> rowan.exp([1, 0, 0, 0])
array([2.71828183, 0. , 0. , 0. ])
"""
q = np.asarray(q)
expo = np.empty(q.shape)
norms = np.linalg.norm(q[..., 1:], axis=-1)
e = np.exp(q[..., 0])
expo[..., 0] = e * np.cos(norms)
norm_zero = np.isclose(norms, 0)
not_zero = np.logical_not(norm_zero)
if np.any(not_zero):
expo[not_zero, 1:] = (
e[not_zero, np.newaxis]
* (q[not_zero, 1:] / norms[not_zero, np.newaxis])
* np.sin(norms)[not_zero, np.newaxis]
)
if np.any(norm_zero):
expo[norm_zero, 1:] = 0
else:
expo[..., 1:] = 0
return expo
def expb(q, b):
r"""Compute the exponential function :math:`b^q`.
We define the exponential of a quaternion to an arbitrary base relative
to the exponential function :math:`e^q` using the change of base
formula as follows:
.. math::
\begin{align}
b^q &= y \\
q &= \log_b y = \frac{\ln y}{\ln b}\\
y &= e^{q\ln b}
\end{align}
Args:
q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.
Returns:
(..., 4) :class:`numpy.ndarray`: Exponentials of ``q``.
Example::
>>> rowan.expb([1, 0, 0, 0], 2)
array([2., 0., 0., 0.])
"""
q = np.asarray(q)
return exp(q * np.log(b))
def exp10(q):
r"""Compute the exponential function :math:`10^q`.
Wrapper around :func:`expb`.
Args:
q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.
Returns:
(..., 4) :class:`numpy.ndarray`: Exponentials of ``q``.
Example::
>>> rowan.exp10([1, 0, 0, 0])
array([10., 0., 0., 0.])
"""
return expb(q, 10)
def log(q):
r"""Compute the quaternion natural logarithm.
The natural of a quaternion in terms of its scalar and vector parts
:math:`q = a + \boldsymbol{v}` is defined by inverting the exponential
formula (see :func:`exp`), and is defined by the formula
:math:`\frac{x^k}{k!}` as follows:
.. math::
\begin{equation}
\ln(q) = \ln\lvert\lvert q \rvert\rvert +
\frac{\boldsymbol{v}}{\lvert\lvert \boldsymbol{v}
\rvert\rvert} \arccos\left(\frac{a}{q}\right)
\end{equation}
Args:
q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.
Returns:
(..., 4) :class:`numpy.ndarray`: Logarithms of ``q``.
Example::
>>> rowan.log([1, 0, 0, 0])
array([0., 0., 0., 0.])
"""
q = np.asarray(q)
log = np.empty(q.shape)
# We need all the norms to avoid divide by zeros later.
# Can also use these to minimize the amount of work done.
q_norms = norm(q)
q_norm_zero = np.isclose(q_norms, 0)
q_not_zero = np.logical_not(q_norm_zero)
v_norms = np.linalg.norm(q[..., 1:], axis=-1)
v_norm_zero = np.isclose(v_norms, 0)
v_not_zero = np.logical_not(v_norm_zero)
if np.any(q_not_zero):
if np.any(q_norm_zero):
log[q_norm_zero, 0] = -np.inf
log[q_not_zero, 0] = np.log(q_norms[q_not_zero])
else:
log[..., 0] = -np.inf
if np.any(v_not_zero):
prefactor = np.empty(q[v_not_zero, 1:].shape)
prefactor = q[v_not_zero, 1:] / v_norms[v_not_zero, np.newaxis]
inv_cos = np.empty(v_norms[v_not_zero].shape)
inv_cos = np.arccos(q[v_not_zero, 0] / q_norms[v_not_zero])
if np.any(v_norm_zero):
log[v_norm_zero, 1:] = 0
log[v_not_zero, 1:] = prefactor * inv_cos[..., np.newaxis]
else:
log[..., 1:] = 0
return log
def logb(q, b):
r"""Compute the quaternion logarithm to some base b.
The quaternion logarithm for arbitrary bases is defined using the
standard change of basis formula relative to the natural logarithm.
.. math::
\begin{align}
\log_b q &= y \\
q &= b^y \\
\ln q &= y \ln b \\
y &= \log_b q = \frac{\ln q}{\ln b}
\end{align}
Args:
q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.
n ((...) :class:`numpy.ndarray`): Scalars to use as log bases.
Returns:
(..., 4) :class:`numpy.ndarray`: Logarithms of ``q``.
Example::
>>> rowan.logb([1, 0, 0, 0], 2)
array([0., 0., 0., 0.])
"""
q = np.asarray(q)
return log(q) / np.log(b)
def log10(q):
r"""Compute the quaternion logarithm base 10.
Wrapper around :func:`logb`.
Args:
q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.
Returns:
(..., 4) :class:`numpy.ndarray`: Logarithms of ``q``.
Example::
>>> rowan.log10([1, 0, 0, 0])
array([0., 0., 0., 0.])
"""
q = np.asarray(q)
return logb(q, 10)
def power(q, n):
r"""Compute the power of a quaternion :math:`q^n`.
Quaternions raised to a scalar power are defined according to the polar
decomposition angle :math:`\theta` and vector :math:`\hat{u}`:
:math:`q^n = \lvert\lvert q \rvert\rvert^n \left( \cos(n\theta) + \hat{u}
\sin(n\theta)\right)`. However, this can be computed
more efficiently by noting that :math:`q^n = \exp(n \ln(q))`.
Args:
q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.
n ((...) :class:`numpy.ndarray`): Scalars to exponentiate quaternions with.
Returns:
(..., 4) :class:`numpy.ndarray`: Powers of ``q``.
Example::
>>> rowan.power([1, 0, 0, 0], 5)
array([1., 0., 0., 0.])
"""
# TODO: Write polar decomposition function #noqa
q = np.asarray(q)
newshape = np.broadcast(q[..., 0], n).shape
q = np.broadcast_to(q, newshape + (4,))
n = np.broadcast_to(n, newshape)
# Note that we follow the convention that 0^0 = 1
check = n == 0
if np.any(check):
powers = np.empty(newshape + (4,))
powers[check] = np.array([1, 0, 0, 0])
not_check = np.logical_not(check)
if np.any(not_check):
powers[not_check] = exp(n[not_check, np.newaxis] * log(q[not_check, :]))
else:
powers = exp(n[..., np.newaxis] * log(q))
return powers
def conjugate(q):
r"""Conjugates an array of quaternions.
Args:
q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.
Returns:
(..., 4) :class:`numpy.ndarray`: Conjugates of ``q``.
Example::
>>> rowan.conjugate([0.5, 0.5, -0.5, 0.5])
array([ 0.5, -0.5, 0.5, -0.5])
"""
# Don't use asarray to avoid modifying in place
conjugate = np.array(q)
conjugate[..., 1:] *= -1
return conjugate
def inverse(q):
r"""Compute the inverse of an array of quaternions.
Args:
q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.
Returns:
(..., 4) :class:`numpy.ndarray`: Inverses of ``q``.
Example::
>>> rowan.inverse([1, 0, 0, 0])
array([ 1., -0., -0., -0.])
"""
# Copy input so that we can safely modify in place, ensure float.
inverses = np.array(q, dtype=float)
normsq = norm(inverses) ** 2
if np.any(normsq):
inverses[..., 1:] *= -1
# Would like to do this in place, but can't guarantee type safety
inverses[normsq > 0] = inverses[normsq > 0] / normsq[normsq > 0, np.newaxis]
return inverses
def multiply(qi, qj):
r"""Multiplies two arrays of quaternions.
Note that quaternion multiplication is generally non-commutative, so the
first and second set of quaternions must be passed in the correct order.
Args:
qi ((..., 4) :class:`numpy.ndarray`): Array of left quaternions.
qj ((..., 4) :class:`numpy.ndarray`): Array of right quaternions.
Returns:
(..., 4) :class:`numpy.ndarray`:
Element-wise products of ``q`` (obeying broadcasting rules up to the last
dimension of ``qi`` and ``qj``).
Example::
>>> rowan.multiply([1, 0, 0, 0], [2, 0, 0, 0])
array([2., 0., 0., 0.])
"""
qi = np.asarray(qi)
qj = np.asarray(qj)
output = np.empty(np.broadcast(qi, qj).shape)
output[..., 0] = qi[..., 0] * qj[..., 0] - np.sum(
qi[..., 1:] * qj[..., 1:], axis=-1
)
output[..., 1:] = (
qi[..., 0, np.newaxis] * qj[..., 1:]
+ qj[..., 0, np.newaxis] * qi[..., 1:]
+ np.cross(qi[..., 1:], qj[..., 1:])
)
return output
def divide(qi, qj):
r"""Divides two arrays of quaternions.
Division is non-commutative; this function returns
:math:`q_i q_j^{-1}`.
Args:
qi ((..., 4) :class:`numpy.ndarray`): Dividend quaternions.
qj ((..., 4) :class:`numpy.ndarray`): Divisor quaternions.
Returns:
(..., 4) :class:`numpy.ndarray`:
Element-wise quotients of ``q`` (obeying broadcasting rules up to the last
dimension of ``qi`` and ``qj``).
Example::
>>> rowan.divide([1, 0, 0, 0], [2, 0, 0, 0])
array([0.5, 0. , 0. , 0. ])
"""
return multiply(qi, inverse(qj))
def norm(q):
r"""Compute the quaternion norm.
Args:
q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.
Returns:
(...) :class:`numpy.ndarray`: Norms of ``q``.
Example::
>>> rowan.norm([10, 0, 0, 0])
10.0
"""
q = np.asarray(q)
return np.linalg.norm(q, axis=-1)
def normalize(q):
r"""Normalize quaternions.
Args:
q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.
Returns:
(..., 4) :class:`numpy.ndarray`: Normalized versions of ``q``.
Example::
>>> rowan.normalize([10, 0, 0, 0])
array([1., 0., 0., 0.])
"""
q = np.asarray(q)
norms = norm(q)
return q / norms[..., np.newaxis]
def is_unit(q):
"""Check if all input quaternions have unit norm.
Args:
q ((..., 4) :class:`numpy.ndarray`): Array of quaternions.
Returns:
(...) :class:`numpy.ndarray` of bool:
Whether or not all inputs are unit quaternions.
Example::
>>> rowan.is_unit([10, 0, 0, 0])
False
"""
return np.allclose(norm(q), 1)
def _validate_unit(q, msg="Arguments must be unit quaternions"):
"""Ensure that all | |
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
print 'Case #%s: %.16f' % (test + 1, ans)
return a
def func_0b1c94ef5ed74644860d862b57fafbea():
infile = open('codejam/test_files/Y14R5P1/A.in')
for test in range(int(infile.readline())):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
print 'Case #%s: %.16f' % (test + 1, ans)
return s
def func_410b049f503f4e38881fa2141c176947(infile):
for test in range(int(infile.readline())):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
print 'Case #%s: %.16f' % (test + 1, ans)
infile.close()
return p
def func_85943584a33d4b959eb4c249a46b8c11(infile):
for test in range(int(infile.readline())):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
print 'Case #%s: %.16f' % (test + 1, ans)
infile.close()
return ans
def func_6369321d3ac44dfd936ff4703d801909(infile):
for test in range(int(infile.readline())):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
print 'Case #%s: %.16f' % (test + 1, ans)
infile.close()
return C
def func_d141750250824621a2c648877e90c9c0(infile):
for test in range(int(infile.readline())):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
print 'Case #%s: %.16f' % (test + 1, ans)
infile.close()
return test
def func_e929bd088b6740cbb9bcf0d7f6217198(infile):
for test in range(int(infile.readline())):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
print 'Case #%s: %.16f' % (test + 1, ans)
infile.close()
return S
def func_5e35218e133e4519be4da9cc82f8a487(infile):
for test in range(int(infile.readline())):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
print 'Case #%s: %.16f' % (test + 1, ans)
infile.close()
return B
def func_507aba8b9ce74a31aa78ab302635fc5c(infile):
for test in range(int(infile.readline())):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
print 'Case #%s: %.16f' % (test + 1, ans)
infile.close()
return A
def func_300b76e068084dca928c421ffb9cd1a9(infile):
for test in range(int(infile.readline())):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
print 'Case #%s: %.16f' % (test + 1, ans)
infile.close()
return a
def func_244efe9e21774b0db7255ed3b9bed349(infile):
for test in range(int(infile.readline())):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S | |
"""
filebrowser.views
"""
import logging
import re
import json
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import get_template
from django.conf import settings
from .utils import get_rucio_file, get_rucio_pfns_from_guids, fetch_file, get_filebrowser_vo, \
remove_folder, get_fullpath_filebrowser_directory, list_file_directory
from core.oauth.utils import login_customrequired
from core.common.models import Filestable4, FilestableArch
from core.views import DateTimeEncoder, initSelfMonitor
from datetime import datetime
_logger = logging.getLogger('bigpandamon-filebrowser')
filebrowserDateTimeFormat = "%Y %b %d %H:%M:%S"
hostname = "bigpanda.cern.ch"
@login_customrequired
def index(request):
"""
index -- filebrowser's default page
:param request: Django's HTTP request
:type request: django.http.HttpRequest
"""
try:
initSelfMonitor(request)
except:
_logger.exception('Failed to init self monitor')
errors = {}
_logger.debug("index started - " + datetime.now().strftime("%H:%M:%S") + " ")
### check that all expected parameters are in URL
# 'site' is not mandatory anymore, so removing it from the list
expectedFields = ['guid', 'scope', 'lfn']
for expectedField in expectedFields:
try:
request.GET[expectedField]
except:
msg = 'Missing expected GET parameter %s. ' % expectedField
_logger.error(msg)
if 'missingparameter' not in errors.keys():
errors['missingparameter'] = ''
errors['missingparameter'] += msg
### if all expected GET parameters are present, execute file lookup
pfns = []
scope = ''
lfn = ''
guid = ''
site = ''
pattern_string='^[a-zA-Z0-9.\-_]+$'
pattern_site = '^[a-zA-Z0-9.,\-_\/]+$'
pattern_guid='^(\{){0,1}[0-9a-zA-Z]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}(\}){0,1}$'
try:
guid = request.GET['guid']
if re.match(pattern_guid, guid) is None:
guid = None
if 'improperformat' not in errors.keys():
errors['improperformat'] = ''
errors['improperformat'] += 'guid: %s ' % (request.GET['guid'])
except:
pass
try:
site = request.GET['site']
if re.match(pattern_site, site) is None:
site = None
if 'improperformat' not in errors.keys():
errors['improperformat'] = ''
errors['improperformat'] += 'site: %s ' % (request.GET['site'])
except:
pass
try:
lfn = request.GET['lfn']
if re.match(pattern_string, lfn) is None:
lfn = None
if 'improperformat' not in errors.keys():
errors['improperformat'] = ''
errors['improperformat'] += 'lfn: %s ' % (request.GET['lfn'])
except:
pass
try:
scope = request.GET['scope']
if re.match(pattern_string, scope) is None:
scope = None
if 'improperformat' not in errors.keys():
errors['improperformat'] = ''
errors['improperformat'] += 'scope: %s ' % (request.GET['scope'])
except:
pass
# check if size of logfile is too big return to user error message with rucio cli command to download it locally
max_sizemb = 1000
sizemb = -1
fsize = []
try:
fileid = int(request.GET['fileid'])
except:
fileid = -1
lquery = {'type': 'log'}
if lfn and len(lfn) > 0:
lquery['lfn'] = lfn
fsize.extend(Filestable4.objects.filter(**lquery).values('fsize', 'fileid', 'status'))
if len(fsize) == 0:
fsize.extend(FilestableArch.objects.filter(**lquery).values('fsize', 'fileid', 'status'))
if len(fsize) > 0:
try:
if fileid > 0:
sizemb = round(int([f['fsize'] for f in fsize if f['fileid'] == fileid][0])/1000/1000)
else:
sizemb = round(int([f['fsize'] for f in fsize][0])/1000/1000)
except:
_logger.warning("ERROR!!! Failed to calculate log tarball size in MB")
_logger.debug("index step1 - " + datetime.now().strftime("%H:%M:%S") + " ")
### download the file
files = []
dirprefix = ''
tardir = ''
if sizemb > max_sizemb:
_logger.warning('Size of the requested log is {} MB which is more than limit {} MB'.format(sizemb, max_sizemb))
errormessage = """The size of requested log is too big ({}MB).
Please try to download it locally using Rucio CLI by the next command:
rucio download {}:{}""".format(sizemb, scope, lfn)
data = {
'errormessage': errormessage
}
return render_to_response('errorPage.html', data, content_type='text/html')
if not (guid is None or lfn is None or scope is None):
files, errtxt, dirprefix, tardir = get_rucio_file(scope,lfn, guid, 100)
else:
errormessage = ''
if guid is None:
errormessage = 'No guid provided.'
elif lfn is None:
errormessage = 'No lfn provided.'
elif scope is None:
errormessage = 'No scope provided.'
_logger.warning(errormessage)
data = {
'errormessage': errormessage
}
return render_to_response('errorPage.html', data, content_type='text/html')
if not len(files):
msg = 'Something went wrong while the log file downloading. [guid=%s, site=%s, scope=%s, lfn=%s] \n' % \
(guid, site, scope, lfn)
_logger.warning(msg)
errors['download'] = msg
if len(errtxt):
if 'download' not in errors:
errors['download'] = ''
errors['download'] += errtxt
_logger.debug("index step2 - " + datetime.now().strftime("%H:%M:%S") + " ")
totalLogSize = 0
if type(files) is list and len(files) > 0:
for file in files:
totalLogSize += file['size'] if 'size' in file and file['size'] > 0 else 0
# from B to MB
if totalLogSize > 0:
totalLogSize = round(totalLogSize*1.0/1024/1024, 2)
### return the file page
### set request response data
data = {
'request': request,
'errors': errors,
'pfns': pfns,
'files': files,
'dirprefix': dirprefix,
'tardir': tardir,
'scope': scope,
'lfn': lfn,
'site': site,
'guid': guid,
'MEDIA_URL': settings.MEDIA_URL,
'viewParams' : {'MON_VO': str(get_filebrowser_vo()).upper()},
'HOSTNAME': hostname,
'totalLogSize': totalLogSize,
'nfiles': len(files),
}
_logger.debug("index step3 - " + datetime.now().strftime("%H:%M:%S") + " ")
if 'json' not in request.GET:
status = 200
# return 500 if most probably there were issue
if 'download' in errors and errors['download'] and len(errors['download']) > 0:
if len(fsize) > 0 and 'status' in fsize[0] and fsize[0]['status'] != 'failed' and sizemb <= 0:
status = 500
return render_to_response('filebrowser/filebrowser_index.html', data, RequestContext(request), status=status)
else:
resp = HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='application/json')
_logger.debug("index step4 - " + datetime.now().strftime("%H:%M:%S") + " ")
return resp
def api_single_pandaid(request):
"""
api_single_pandaid -- return log file URL for a single PanDA job
:param request: Django's HTTP request
:type request: django.http.HttpRequest
"""
errors = {}
### check that all expected parameters are in URL
# expectedFields = ['guid', 'site', 'scope', 'lfn']
expectedFields = ['pandaid']
for expectedField in expectedFields:
try:
if len(request.GET[expectedField]) < 1:
msg = 'Missing expected GET parameter %s. ' % expectedField
if 'missingparameter' not in errors.keys():
errors['missingparameter'] = ''
errors['missingparameter'] += msg
except:
msg = 'Missing expected GET parameter %s. ' % expectedField
_logger.error(msg)
if 'missingparameter' not in errors.keys():
errors['missingparameter'] = ''
errors['missingparameter'] += msg
### if all expected GET parameters are present, execute file lookup
pfns = []
scope = ''
lfn = ''
guid = ''
site = ''
pandaid = None
status = ''
query = {}
query['type'] = 'log'
try:
pandaid = int(request.GET['pandaid'])
except:
pass
query['pandaid'] = pandaid
file_properties = []
try:
file_properties = Filestable4.objects.filter(**query).values('pandaid', 'guid', \
'scope', 'lfn', 'destinationse', 'status')
except:
pass
if len(file_properties):
file_properties = file_properties[0]
try:
guid = file_properties['guid']
except:
pass
try:
site = file_properties['destinationse']
except:
pass
try:
lfn = file_properties['lfn']
except:
pass
try:
scope = file_properties['scope']
except:
pass
try:
status = file_properties['status']
except:
pass
if 'missingparameter' not in errors.keys():
pfns, errtxt = get_rucio_pfns_from_guids(guids=[guid], site=[site], \
lfns=[lfn], scopes=[scope])
if len(errtxt):
if 'lookup' not in errors:
errors['lookup'] = ''
errors['lookup'] += errtxt
### download the file
files = []
dirprefix = ''
tardir = ''
if len(pfns):
pfn = pfns[0]
files, errtxt, dirprefix, tardir = fetch_file(pfn, guid, unpack=False, listfiles=False)
if not len(pfns):
msg = 'File download failed. [pfn=%s guid=%s, site=%s, scope=%s, lfn=%s]' % \
(pfn, guid, site, scope, lfn)
_logger.warning(msg)
errors['download'] = msg
if len(errtxt):
if 'download' in errors:
errors['download'] += errtxt
else: # file not found in DB
if 'lookup' not in errors:
errors['lookup'] = ''
errors['lookup'] += 'Log file for this job has not been found. '
### return the file page
url = None
data = { \
'pandaid': pandaid, \
'url': url, \
'errors': errors, \
'pfns': pfns, \
'scope': scope, \
'lfn': lfn, \
'site': site, \
'guid': guid, \
'status': status, \
'timestamp': datetime.utcnow().isoformat() \
}
if not len(errors):
url = 'http://' + hostname + \
settings.MEDIA_URL + dirprefix + '/' + lfn
data['url'] = url
### set request response data
return render_to_response('filebrowser/filebrowser_api_single_pandaid.html', {'data': data}, RequestContext(request))
elif 'pandaid' not in request.GET.keys() or pandaid == None:
t = get_template('filebrowser/filebrowser_api_single_pandaid.html')
context = RequestContext(request, {'data':data})
return HttpResponse(t.render(context), status=400)
elif not len(file_properties):
t = get_template('filebrowser/filebrowser_api_single_pandaid.html')
context = RequestContext(request, {'data':data})
return HttpResponse(t.render(context), status=404)
else:
t = get_template('filebrowser/filebrowser_api_single_pandaid.html')
context = RequestContext(request, {'data':data})
return HttpResponse(t.render(context), status=400)
def get_job_log_file_path(pandaid, filename=''):
"""
Download log tarball of a job and return path to a local copy of memory_monitor_output.txt file
:param pandaid:
:param filename: str, if empty the function returm path to tarball folder
:return: file_path: str
"""
file_path = None
files = []
scope = ''
lfn = ''
guid = ''
dirprefix = ''
tardir = ''
query = {}
query['type'] = 'log'
query['pandaid'] = int(pandaid)
values = ['pandaid', 'guid', 'scope', 'lfn']
file_properties = []
file_properties.extend(Filestable4.objects.filter(**query).values(*values))
if len(file_properties) == 0:
file_properties.extend(FilestableArch.objects.filter(**query).values(*values))
if len(file_properties):
file_properties = file_properties[0]
try:
guid = file_properties['guid']
except:
pass
try:
lfn = file_properties['lfn']
except:
| |
anyway be forced later if he attempts virt= or all=.
self.validate_model(loop_type='real_init', stop=False)
# Set where to look for CutTools installation.
# In further versions, it will be set in the same manner as _mgme_dir so that
# the user can chose its own CutTools distribution.
self._cuttools_dir=str(pjoin(self._mgme_dir,'vendor','CutTools'))
if not os.path.isdir(pjoin(self._cuttools_dir, 'src','cts')):
logger.warning(('Warning: Directory %s is not a valid CutTools directory.'+\
'Using default CutTools instead.') % \
self._cuttools_dir)
self._cuttools_dir=str(pjoin(self._mgme_dir,'vendor','CutTools'))
# Set where to look for IREGI installation
self._iregi_dir=str(os.path.join(self._mgme_dir,'vendor','IREGI','src'))
if not os.path.isdir(self._iregi_dir):
logger.warning(('Warning: Directory %s is not a valid IREGI directory.'+\
'Using default IREGI instead.')%\
self._iregi_dir)
self._iregi_dir=str(os.path.join(self._mgme_dir,'vendor','IREGI','src'))
def do_display(self, line, output=sys.stdout):
# if we arrive here it means that a _fks_display_opts has been chosen
args = self.split_arg(line)
#check the validity of the arguments
self.check_display(args)
if args[0] in ['diagrams', 'processes', 'diagrams_text']:
get_amps_dict = {'real': self._fks_multi_proc.get_real_amplitudes,
'born': self._fks_multi_proc.get_born_amplitudes,
'loop': self._fks_multi_proc.get_virt_amplitudes}
if args[0] == 'diagrams':
if len(args)>=2 and args[1] in list(get_amps_dict.keys()):
get_amps = get_amps_dict[args[1]]
self._curr_amps = get_amps()
#check that if one requests the virt diagrams, there are virt_amplitudes
if args[1] == 'loop' and len(self._curr_amps) == 0:
raise self.InvalidCmd('No virtuals have been generated')
self.draw(' '.join(args[2:]),type = args[1])
else:
for diag_type, get_amps in get_amps_dict.items():
self._curr_amps = get_amps()
self.draw(' '.join(args[1:]), Dtype=diag_type)
# set _curr_amps back to empty
self._curr_amps = diagram_generation.AmplitudeList()
if args[0] == 'diagrams_text':
if len(args)>=2 and args[1] in list(get_amps_dict.keys()):
get_amps = get_amps_dict[args[1]]
self._curr_amps = get_amps()
#check that if one requests the virt diagrams, there are virt_amplitudes
if args[1] in ['virt', 'loop'] and len(self._curr_amps) == 0:
raise self.InvalidCmd('No virtuals have been generated')
text = "\n".join([amp.nice_string() for amp in self._curr_amps])
else:
text = 'Born diagrams:\n'
text += '\n'.join(amp.nice_string() for amp in get_amps_dict['born']())
text += '\n\nReal diagrams:'
text += '\n'.join(amp.nice_string() for amp in get_amps_dict['real']())
text += '\n\nLoop diagrams:\n'
text += '\n'.join(amp.nice_string() for amp in get_amps_dict['loop']())
pydoc.pager(text)
# set _curr_amps back to empty
self._curr_amps = diagram_generation.AmplitudeList()
elif args[0] == 'processes':
if len(args)>=2 and args[1] in list(get_amps_dict.keys()):
get_amps = get_amps_dict[args[1]]
self._curr_amps = get_amps()
#check that if one requests the virt diagrams, there are virt_amplitudes
if args[1] in ['virt', 'loop'] and len(self._curr_amps) == 0:
raise self.InvalidCmd('No virtuals have been generated')
print('\n'.join(amp.nice_string_processes() for amp in self._curr_amps))
else:
print('Born processes:')
print('\n'.join(amp.nice_string_processes() for amp in get_amps_dict['born']()))
print('Real processes:')
print('\n'.join(amp.nice_string_processes() for amp in get_amps_dict['real']()))
print('Loop processes:')
print('\n'.join(amp.nice_string_processes() for amp in get_amps_dict['loop']()))
# set _curr_amps back to empty
self._curr_amps = diagram_generation.AmplitudeList()
else:
mg_interface.MadGraphCmd.do_display(self,line,output)
def do_add(self, line, *args,**opt):
args = self.split_arg(line)
# Check the validity of the arguments
self.check_add(args)
if args[0] == 'model':
return self.add_model(args[1:])
elif args[0] != 'process':
raise self.InvalidCmd("The add command can only be used with process or model")
else:
line = ' '.join(args[1:])
proc_type=self.extract_process_type(line)
if proc_type[1] not in ['real', 'LOonly']:
run_interface.check_compiler(self.options, block=False)
#validate_model will reset self._generate_info; to avoid
#this store it
geninfo = self._generate_info
self.validate_model(proc_type[1], coupling_type=proc_type[2])
self._generate_info = geninfo
#now generate the amplitudes as usual
#self.options['group_subprocesses'] = 'False'
collect_mirror_procs = False
ignore_six_quark_processes = self.options['ignore_six_quark_processes']
if ',' in line:
myprocdef, line = mg_interface.MadGraphCmd.extract_decay_chain_process(self,line)
if myprocdef.are_decays_perturbed():
raise MadGraph5Error("Decay processes cannot be perturbed")
else:
myprocdef = mg_interface.MadGraphCmd.extract_process(self,line)
self.proc_validity(myprocdef,'aMCatNLO_%s'%proc_type[1])
self._curr_proc_defs.append(myprocdef)
# if myprocdef['perturbation_couplings']!=['QCD']:
# message = ""FKS for reals only available in QCD for now, you asked %s" \
# % ', '.join(myprocdef['perturbation_couplings'])"
# logger.info("%s. Checking for loop induced")
# new_line = ln
#
#
# raise self.InvalidCmd("FKS for reals only available in QCD for now, you asked %s" \
# % ', '.join(myprocdef['perturbation_couplings']))
##
# if the new nlo process generation mode is enabled, the number of cores to be
# used has to be passed
# ncores_for_proc_gen has the following meaning
# 0 : do things the old way
# > 0 use ncores_for_proc_gen
# -1 : use all cores
if self.options['low_mem_multicore_nlo_generation']:
if self.options['nb_core']:
self.ncores_for_proc_gen = int(self.options['nb_core'])
else:
self.ncores_for_proc_gen = -1
else:
self.ncores_for_proc_gen = 0
# this is the options dictionary to pass to the FKSMultiProcess
fks_options = {'OLP': self.options['OLP'],
'ignore_six_quark_processes': self.options['ignore_six_quark_processes'],
'ncores_for_proc_gen': self.ncores_for_proc_gen}
try:
self._fks_multi_proc.add(fks_base.FKSMultiProcess(myprocdef,fks_options))
except AttributeError:
self._fks_multi_proc = fks_base.FKSMultiProcess(myprocdef,fks_options)
def do_output(self, line):
"""Main commands: Initialize a new Template or reinitialize one"""
args = self.split_arg(line)
# Check Argument validity
self.check_output(args)
noclean = '-noclean' in args
force = '-f' in args
nojpeg = '-nojpeg' in args
main_file_name = ""
try:
main_file_name = args[args.index('-name') + 1]
except Exception:
pass
# For NLO, the group_subprocesses is automatically set to false
group_processes = False
# initialize the writer
if self._export_format in ['NLO']:
self._curr_exporter = export_v4.ExportV4Factory(self, noclean,
output_type='amcatnlo',group_subprocesses=group_processes)
self._curr_exporter.pass_information_from_cmd(self)
# check if a dir with the same name already exists
if not force and not noclean and os.path.isdir(self._export_dir)\
and self._export_format in ['NLO']:
# Don't ask if user already specified force or noclean
logger.info('INFO: directory %s already exists.' % self._export_dir)
logger.info('If you continue this directory will be deleted and replaced.')
answer = self.ask('Do you want to continue?', 'y', ['y','n'],
timeout=self.options['timeout'])
if answer != 'y':
raise self.InvalidCmd('Stopped by user request')
# if one gets here either used -f or answered yes to the question about
# removing the dir
if os.path.exists(self._export_dir):
shutil.rmtree(self._export_dir)
# Make a Template Copy
if self._export_format in ['NLO']:
self._curr_exporter.copy_fkstemplate()
# Reset _done_export, since we have new directory
self._done_export = False
# Perform export and finalize right away
self.export(nojpeg, main_file_name, group_processes=group_processes)
# Pass potential new information generated during the export.
self._curr_exporter.pass_information_from_cmd(self)
# Automatically run finalize
self.finalize(nojpeg)
# Generate the virtuals if from OLP
if self.options['OLP']!='MadLoop':
self._curr_exporter.generate_virtuals_from_OLP(
self.born_processes_for_olp,self._export_dir,self.options['OLP'])
# Remember that we have done export
self._done_export = (self._export_dir, self._export_format)
# Reset _export_dir, so we don't overwrite by mistake later
self._export_dir = None
# Export a matrix element
def export(self, nojpeg = False, main_file_name = "", group_processes=False):
"""Export a generated amplitude to file"""
self._curr_helas_model = helas_call_writers.FortranUFOHelasCallWriter(self._curr_model)
def generate_matrix_elements(self, group=False):
"""Helper function to generate the matrix elements before
exporting"""
# Sort amplitudes according to number of diagrams,
# to get most efficient multichannel output
self._curr_amps.sort(key = lambda a: a.get_number_of_diagrams(), reverse=True)
cpu_time1 = time.time()
ndiags = 0
if not self._curr_matrix_elements.get_matrix_elements():
if group:
raise MadGraph5Error("Cannot group subprocesses when "+\
"exporting to NLO")
else:
self._curr_matrix_elements = \
fks_helas.FKSHelasMultiProcess(\
self._fks_multi_proc,
loop_optimized= self.options['loop_optimized_output'])
if not self.options['low_mem_multicore_nlo_generation']:
# generate the code the old way
ndiags = sum([len(me.get('diagrams')) for \
me in self._curr_matrix_elements.\
get_matrix_elements()])
# assign a unique id number to all process and
# generate a list of possible PDF combinations
uid = 0
initial_states=[]
for me in self._curr_matrix_elements.get_matrix_elements():
uid += 1 # update the identification number
me.get('processes')[0].set('uid', uid)
try:
initial_states.append(sorted(list(set((p.get_initial_pdg(1),p.get_initial_pdg(2)) for \
p in me.born_matrix_element.get('processes')))))
except IndexError:
initial_states.append(sorted(list(set((p.get_initial_pdg(1)) for \
p in me.born_matrix_element.get('processes')))))
for fksreal in me.real_processes:
# Pick out all initial state particles for the two beams
try:
initial_states.append(sorted(list(set((p.get_initial_pdg(1),p.get_initial_pdg(2)) for \
p in fksreal.matrix_element.get('processes')))))
except IndexError:
initial_states.append(sorted(list(set((p.get_initial_pdg(1)) for \
p in fksreal.matrix_element.get('processes')))))
# remove doubles from the list
checked = []
for e in initial_states:
if e not in checked:
checked.append(e)
initial_states=checked
self._curr_matrix_elements.set('initial_states',initial_states)
else:
#new NLO generation
if self._curr_matrix_elements['has_loops']:
self._curr_exporter.opt['mp'] = True
self._curr_exporter.model = self._curr_model
ndiags = 0
cpu_time2 = time.time()
return ndiags, cpu_time2 - cpu_time1
# Start of the actual routine
ndiags, cpu_time = generate_matrix_elements(self, group=group_processes)
calls = 0
path = self._export_dir
if self._export_format in ['NLO']:
path = os.path.join(path, 'SubProcesses')
#_curr_matrix_element is a FKSHelasMultiProcess Object
self._fks_directories = []
proc_charac = self._curr_exporter.proc_characteristic
for charac in ['has_isr', 'has_fsr', 'has_loops']:
proc_charac[charac] = self._curr_matrix_elements[charac]
# prepare for the generation
# glob_directories_map is for the new NLO generation
global glob_directories_map
glob_directories_map = []
# Save processes instances generated
self.born_processes_for_olp = []
self.born_processes = []
for ime, me in \
enumerate(self._curr_matrix_elements.get('matrix_elements')):
if not self.options['low_mem_multicore_nlo_generation']:
#me is a FKSHelasProcessFromReals
calls = calls + \
self._curr_exporter.generate_directories_fks(me,
self._curr_helas_model,
ime, len(self._curr_matrix_elements.get('matrix_elements')),
path,self.options['OLP'])
self._fks_directories.extend(self._curr_exporter.fksdirs)
self.born_processes_for_olp.append(me.born_matrix_element.get('processes')[0])
self.born_processes.append(me.born_matrix_element.get('processes'))
else:
glob_directories_map.append(\
[self._curr_exporter, me, self._curr_helas_model,
ime, len(self._curr_matrix_elements.get('matrix_elements')),
path, self.options['OLP']])
if self.options['low_mem_multicore_nlo_generation']:
# start the pool instance with a signal instance to catch ctr+c
logger.info('Writing directories...')
original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
if self.ncores_for_proc_gen < 0: # use all cores
pool = multiprocessing.Pool(maxtasksperchild=1)
else:
pool = multiprocessing.Pool(processes=self.ncores_for_proc_gen,maxtasksperchild=1)
signal.signal(signal.SIGINT, original_sigint_handler)
try:
# the very large timeout passed to get is to be able to catch
| |
# Copyright (c) 2021 Institute for Quantum Computing, Baidu Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
此模块包含处理 MBQC 测量模式的相关操作。
"""
from numpy import pi
from paddle import to_tensor, multiply
from paddle_quantum.mbqc.qobject import Pattern, Circuit
from paddle_quantum.mbqc.utils import div_str_to_float, int_to_div_str, print_progress
__all__ = [
"MCalculus"
]
class MCalculus:
r"""定义测量模式类。
跟据文献 [The measurement calculus, arXiv: 0704.1263] 的测量语言,该类提供处理测量模式的各种基本操作。
"""
def __init__(self):
r"""``MCalculus`` 的构造函数,用于实例化一个 ``MCalculus`` 对象。
"""
self.__circuit_slice = [] # Restore the information of sliced circuit
self.__wild_pattern = [] # Record the background pattern
self.__wild_commands = [] # Record wild commands information
self.__pattern = None # Record standard pattern
self.__measured_qubits = [] # Record the measured qubits in the circuit model
self.__circuit_width = None # Record the circuit width
self.__track = False # Switch of progress bar
def track_progress(self, track=True):
r"""显示测量模式处理过程的进度条开关。
Args:
track (bool, optional): ``True`` 为打开进度条显示,``False`` 为关闭进度条显示,默认为 ``True``
"""
assert isinstance(track, bool), "parameter 'track' must be a bool."
self.__track = track
def set_circuit(self, circuit):
r"""对 ``MCalculus`` 类设置量子电路。
Args:
circuit (Circuit): 量子电路
"""
assert isinstance(circuit, Circuit), "please input a parameter of type 'Circuit'."
assert circuit.is_valid(), "the circuit is not valid as at least one qubit is not performed any gate yet."
self.__circuit_width = circuit.get_width()
self.__slice_circuit(circuit.get_circuit())
for gate in self.__circuit_slice:
self.__to_pattern(gate)
self.__join_patterns()
def __slice_circuit(self, circuit):
r"""对电路进行切片操作,标记每个量子门和量子测量的输入比特和输出比特。
Note:
这是内部方法,用户不需要直接调用到该方法。
在此,我们使用 ``str`` 类型的变量作为节点的标签。为了不丢失原先节点的坐标信息,便于后续的画图操作,
我们使用形如 ``("1/1", "2/1")`` 类型作为所有节点的标签。
Args:
circuit (list): 电路列表,列表中每个元素代表一个量子门或测量
"""
# Slice the circuit to mark the input_/output_ labels for measurement pattern
counter = []
for gate in circuit:
name = gate[0]
which_qubit = gate[1]
assert len(which_qubit) in [1, 2], str(len(which_qubit)) + "-qubit gate is not supported in this version."
if len(which_qubit) == 1: # Single-qubit gates
which_qubit = which_qubit[0] # Take the item only
assert which_qubit not in self.__measured_qubits, \
"please check your qubit index as this qubit has already been measured."
input_ = [(int_to_div_str(which_qubit), int_to_div_str(int(counter.count(which_qubit))))]
if name == 'm':
output_ = [] # No output_ node for measurement
else:
output_ = [(int_to_div_str(which_qubit), int_to_div_str(int(counter.count(which_qubit) + 1)))]
counter += [which_qubit] # Record the index
# The gate after slicing has a form of:
# [original_gate, input_, output_], e.g. = [[h, [0], None], input_, output_]
self.__circuit_slice.append([gate, input_, output_])
else: # Two-qubit gates
control = which_qubit[0]
target = which_qubit[1]
assert control not in self.__measured_qubits and target not in self.__measured_qubits, \
"please check your qubit indices as these qubits have already been measured."
if name == 'cz': # Input and output nodes coincide for CZ gate
input_output = [(int_to_div_str(control), int_to_div_str(int(counter.count(control)))),
(int_to_div_str(target), int_to_div_str(int(counter.count(target))))]
# The gate after slicing has a form of:
# [original_gate, input_, output_], e.g. = [[cz, [0, 1], None], input_, output_]
self.__circuit_slice.append([gate, input_output, input_output])
elif name == 'cnot':
input_ = [(int_to_div_str(control), int_to_div_str(int(counter.count(control)))),
(int_to_div_str(target), int_to_div_str(int(counter.count(target))))]
output_ = [(int_to_div_str(control), int_to_div_str(int(counter.count(control)))),
(int_to_div_str(target), int_to_div_str(int(counter.count(target) + 1)))]
counter += [target] # Record the index
# The gate after slicing has a form of:
# [original_gate, input_, output_], e.g. = [[cnot, [0, 1], None], input_, output_]
self.__circuit_slice.append([gate, input_, output_])
else:
input_ = [(int_to_div_str(control), int_to_div_str(int(counter.count(control)))),
(int_to_div_str(target), int_to_div_str(int(counter.count(target))))]
output_ = [(int_to_div_str(control), int_to_div_str(int(counter.count(control) + 1))),
(int_to_div_str(target), int_to_div_str(int(counter.count(target) + 1)))]
counter += which_qubit # Record the index
# The gate after slicing has a form of:
# [original_gate, input_, output_], e.g. = [[h, [0], None], input_, output_]
self.__circuit_slice.append([gate, input_, output_])
@staticmethod
def __set_ancilla_label(input_, output_, ancilla_num_list=None):
r"""插入辅助比特。
在输入比特和输出比特中间插入辅助比特,辅助比特节点的坐标根据数目而均分,其标签类型与输入和输出比特的标签类型相同。
Note:
这是内部方法,用户不需要直接调用到该方法。
Args:
input_ (list): 测量模式的输入节点
output_ (list): 测量模式的输出节点
ancilla_num_list (list): 需要插入的辅助节点个数列表
Returns:
list: 辅助节点标签列表
"""
assert len(input_) == len(output_), "input and output must have same length."
assert len(input_) in [1, 2], str(len(input_)) + "-qubit gate is not supported in this version."
ancilla_num = [] if ancilla_num_list is None else ancilla_num_list
ancilla_labels = []
for i in range(len(ancilla_num)):
input_qubit = input_[i] # Obtain input qubit
row_in = div_str_to_float(input_qubit[0]) # Row of input qubit
col_in = div_str_to_float(input_qubit[1]) # Column of input qubit
output_qubit = output_[i] # Obtain output qubit
row_out = div_str_to_float(output_qubit[0]) # Row of output qubit
col_out = div_str_to_float(output_qubit[1]) # Column of output qubit
assert row_in == row_out, "please check the qubit labels of your input."
# Calculate Auxiliary qubits' positions
col = col_out - col_in
pos = [int_to_div_str(int(col_in * (ancilla_num[i] + 1) + j * col), ancilla_num[i] + 1)
for j in range(1, ancilla_num[i] + 1)]
# Get the ancilla_labels
for k in range(ancilla_num[i]):
ancilla_labels.append((input_qubit[0], pos[k]))
return ancilla_labels
def __to_pattern(self, gate):
r"""将量子电路中的门和测量翻译为等价的测量模式。
Note:
这是内部方法,用户不需要直接调用到该方法。
Warning:
当前版本支持的量子门为 ``[H, X, Y, Z, S, T, Rx, Ry, Rz, Rz_5, U, CNOT, CNOT_15, CZ]`` 和单比特测量。
注意量子门和测量对应的测量模式不唯一,本方法目前仅选取常用的一种或者两种测量模式进行翻译。
Args:
gate (list): 待翻译的量子门或量子测量,列表中存储的是原始量子门(其中包含量子门名称、作用比特、参数)、输入比特、输出比特
"""
original_gate, input_, output_ = gate
name, which_qubit, param = original_gate
ancilla = []
zero = to_tensor([0], dtype="float64")
minus_one = to_tensor([-1], dtype="float64")
half_pi = to_tensor([pi / 2], dtype="float64")
minus_half_pi = to_tensor([-pi / 2], dtype="float64")
minus_pi = to_tensor([-pi], dtype="float64")
if name == 'h': # Hadamard gate
E = Pattern.CommandE([input_[0], output_[0]])
M = Pattern.CommandM(input_[0], zero, "XY", [], [])
X = Pattern.CommandX(output_[0], [input_[0]])
commands = [E, M, X]
elif name == 'x': # Pauli X gate
ancilla = self.__set_ancilla_label(input_, output_, [1])
E12 = Pattern.CommandE([input_[0], ancilla[0]])
E23 = Pattern.CommandE([ancilla[0], output_[0]])
M1 = Pattern.CommandM(input_[0], zero, "XY", [], [])
M2 = Pattern.CommandM(ancilla[0], minus_pi, "XY", [], [])
X3 = Pattern.CommandX(output_[0], [ancilla[0]])
Z3 = Pattern.CommandZ(output_[0], [input_[0]])
commands = [E12, E23, M1, M2, X3, Z3]
elif name == 'y': # Pauli Y gate
ancilla = self.__set_ancilla_label(input_, output_, [3])
E12 = Pattern.CommandE([input_[0], ancilla[0]])
E23 = Pattern.CommandE([ancilla[0], ancilla[1]])
E34 = Pattern.CommandE([ancilla[1], ancilla[2]])
E45 = Pattern.CommandE([ancilla[2], output_[0]])
M1 = Pattern.CommandM(input_[0], half_pi, "XY", [], [])
M2 = Pattern.CommandM(ancilla[0], half_pi, "XY", [], [])
M3 = Pattern.CommandM(ancilla[1], minus_half_pi, "XY", [], [input_[0], ancilla[0]])
M4 = Pattern.CommandM(ancilla[2], zero, "XY", [], [ancilla[0]])
X5 = Pattern.CommandX(output_[0], [ancilla[2]])
Z5 = Pattern.CommandZ(output_[0], [ancilla[1]])
commands = [E12, E23, E34, E45, M1, M2, M3, M4, X5, Z5]
elif name == 'z': # Pauli Z gate
ancilla = self.__set_ancilla_label(input_, output_, [1])
E12 = Pattern.CommandE([input_[0], ancilla[0]])
E23 = Pattern.CommandE([ancilla[0], output_[0]])
M1 = Pattern.CommandM(input_[0], minus_pi, "XY", [], [])
M2 = Pattern.CommandM(ancilla[0], zero, "XY", [], [])
X3 = Pattern.CommandX(output_[0], [ancilla[0]])
Z3 = Pattern.CommandZ(output_[0], [input_[0]])
commands = [E12, E23, M1, M2, X3, Z3]
elif name == 's': # Phase gate
ancilla = self.__set_ancilla_label(input_, output_, [1])
E12 = Pattern.CommandE([input_[0], ancilla[0]])
E23 = Pattern.CommandE([ancilla[0], output_[0]])
M1 = Pattern.CommandM(input_[0], minus_half_pi, "XY", [], [])
M2 = Pattern.CommandM(ancilla[0], zero, "XY", [], [])
X3 = Pattern.CommandX(output_[0], [ancilla[0]])
Z3 = Pattern.CommandZ(output_[0], [input_[0]])
commands = [E12, E23, M1, M2, X3, Z3]
elif name == 't': # T gate
ancilla = self.__set_ancilla_label(input_, output_, [1])
E12 = Pattern.CommandE([input_[0], ancilla[0]])
E23 = Pattern.CommandE([ancilla[0], output_[0]])
M1 = Pattern.CommandM(input_[0], to_tensor([-pi / 4], dtype="float64"), "XY", [], [])
M2 = Pattern.CommandM(ancilla[0], zero, "XY", [], [])
X3 = Pattern.CommandX(output_[0], [ancilla[0]])
Z3 = Pattern.CommandZ(output_[0], [input_[0]])
commands = [E12, E23, M1, M2, X3, Z3]
elif name == 'rx': # Rotation gate around x axis
ancilla = self.__set_ancilla_label(input_, output_, [1])
E12 = Pattern.CommandE([input_[0], ancilla[0]])
E23 = Pattern.CommandE([ancilla[0], output_[0]])
M1 = Pattern.CommandM(input_[0], zero, "XY", [], [])
M2 = Pattern.CommandM(ancilla[0], multiply(param, minus_one), "XY", [input_[0]], [])
X3 = Pattern.CommandX(output_[0], [ancilla[0]])
Z3 = Pattern.CommandZ(output_[0], [input_[0]])
commands = [E12, E23, M1, M2, X3, Z3]
elif name == 'ry': # Rotation gate around y axis
ancilla = self.__set_ancilla_label(input_, output_, [3])
E12 = Pattern.CommandE([input_[0], ancilla[0]])
E23 = Pattern.CommandE([ancilla[0], ancilla[1]])
E34 = Pattern.CommandE([ancilla[1], ancilla[2]])
E45 = Pattern.CommandE([ancilla[2], output_[0]])
M1 = Pattern.CommandM(input_[0], half_pi, "XY", [], [])
M2 = Pattern.CommandM(ancilla[0], multiply(param, minus_one), "XY", [input_[0]], [])
M3 = Pattern.CommandM(ancilla[1], minus_half_pi, "XY", [], [input_[0], ancilla[0]])
M4 = Pattern.CommandM(ancilla[2], zero, "XY", [], [ancilla[0]])
X5 = Pattern.CommandX(output_[0], [ancilla[2]])
Z5 = Pattern.CommandZ(output_[0], [ancilla[1]])
commands = [E12, E23, E34, E45, M1, M2, M3, M4, X5, Z5]
elif name == 'rz': # Rotation gate around z axis
ancilla = self.__set_ancilla_label(input_, output_, [1])
E12 = Pattern.CommandE([input_[0], ancilla[0]])
E23 = Pattern.CommandE([ancilla[0], | |
= new_masks_softmax.max(dim=1)[0].view(-1).topk(8, largest=False)[0][-1].item()
pending_thresh = max(0.02, max_topk)
new_pos = torch.nonzero(new_masks_softmax[0].max(dim=0)[0] < pending_thresh)
if len(new_pos) > new_pos_limit_2:
# import pdb; pdb.set_trace()
raw_pos = new_masks_softmax.max(dim=1)[0].view(-1).topk(new_pos_limit_2, largest=False)[1]
new_pos_0 = raw_pos // x_curr.shape[-1]
new_pos_1 = raw_pos % x_curr.shape[-1]
new_pos = torch.cat((new_pos_0.view(-1,1), new_pos_1.view(-1,1)), dim=1)
# TODO: 限制新增pos数量,gpu要爆, 依据new_masks_softmax保最小的
new_occupy = 1.0*len(new_pos) / x_curr.shape[-2] / x_curr.shape[-1]
if new_occupy > 0.5 or len(new_pos) <8-1:
print('new_occupy:{}| len(new_pos):{}'.format(new_occupy, len(new_pos)))
new_pyramids = [InstancePyramid(pos, curr_level, level_sizes) for pos in new_pos]
# import pdb; pdb.set_trace()
self.compute_mask(curr_level, x_curr[[i]], new_pyramids, True)
merit_pyramids_idx = new_masks_softmax.topk(2, dim=1)[1].unique()
merit_pyramids = [inst_pyramids[i] for i in range(len(inst_pyramids)) if i in merit_pyramids_idx]
target_len_2_before = sum([len(l) for l in target_support_pyramids_0])
for i2 in range(len(inst_pyramids)):
if i2 not in merit_pyramids_idx:
die_id = inst_pyramids[i2].idx
die_target_idx = inst_pyramids[i2].target_idx
if die_target_idx:
target_support_pyramids_0[die_target_idx].remove(die_id)
target_len_2_after = sum([len(l) for l in target_support_pyramids_0])
# if target_len_2_before != target_len_2_after:
# import pdb; pdb.set_trace()
inst_pyramids = merit_pyramids + new_pyramids
self.log_dict.update({'pyr_num_l2': len(inst_pyramids)})
if self.training:
# self.match_target(curr_level, new_pyramids, target_levels, target_support_pyramids_0)
loss_2 = self.compute_loss(curr_level, inst_pyramids, target_levels, target_support_pyramids_0)
losses_2.append(loss_2)
if not self.training:
test_masks.append(inst_pyramids)
self.log_dict.update({'InstPyr_inst_count': InstancePyramid.inst_count})
# import pdb; pdb.set_trace()
losses['level_0']= sum(loss for loss in losses_0)
losses['level_1']= sum(loss for loss in losses_1)
losses['level_2']= sum(loss for loss in losses_2)
losses['level_3']= sum(loss for loss in losses_3)
losses['level_4']= sum(loss for loss in losses_4)
return losses if self.training else test_masks
def forward_singel_level(self, curr_level, inst_pyramids, x_curr, i, level_sizes,
target_support_pyramids, target_levels, losses_i):
new_pos_limit = [100, 50, 50, 50, 50, 50, 50]
new_pos_quota = 80
if x_curr[[i]].abs().max() > 1e20 or torch.isnan(x_curr[[i]].max()):
# if torch.isnan(x_curr[[i]].max()):
print(curr_level, '\n', x_curr[[i]])
import pdb; pdb.set_trace()
# 生成 upsample mask,对现有的mask pyramids
self.compute_mask(curr_level, x_curr[[i]], inst_pyramids)
# TODO: 考虑其他的new_masks计算方法,比如说 multi target cross entropy loss 中的单一channel
new_masks_minus = torch.cat([i_p.get_mask(curr_level)[:,[1]] - i_p.get_mask(curr_level)[:,[0]] for i_p in inst_pyramids], dim=1)
new_masks_softmax = F.softmax(new_masks_minus,dim=1)
# avg_sharing = 1.0 / len(inst_pyramids)
# num_pixels = int(new_masks_softmax.shape[-1]*new_masks_softmax.shape[-2])
# top_percent = new_masks_softmax.view(-1).topk(int(num_pixels*(1-0.3)))[0][-1].item()
# max_topk = new_masks_softmax.max(dim=1)[0].view(-1).topk(num_pixels-3)[0][-1].item()
max_topk = new_masks_softmax.max(dim=1)[0].view(-1).topk(8, largest=False)[0][-1].item()
# 这里非常的有趣,保证最少选拔8人,如果KOL话语权占不到5%,那就诞生新的KOL proposal
# pending_thresh越高,新增的new_pos越多 所以 max_topk 应该是保底, 应该配合比例
pending_thresh = max(0.02, max_topk)
new_pos = torch.nonzero(new_masks_softmax[0].max(dim=0)[0] < pending_thresh)
# if len(new_pos) > new_pos_limit[curr_level]:
# # import pdb; pdb.set_trace()
# raw_pos = new_masks_softmax.max(dim=1)[0].view(-1).topk(new_pos_limit[curr_level], largest=False)[1]
# new_pos_0 = raw_pos // x_curr.shape[-1]
# new_pos_1 = raw_pos % x_curr.shape[-1]
# new_pos = torch.cat((new_pos_0.view(-1,1), new_pos_1.view(-1,1)), dim=1)
# import pdb; pdb.set_trace()
if len(inst_pyramids) + len(new_pos) > new_pos_quota:
available_number = max(0, new_pos_quota - len(inst_pyramids))
if available_number:
raw_pos = new_masks_softmax.max(dim=1)[0].view(-1).topk(available_number, largest=False)[1]
new_pos_0 = raw_pos // x_curr.shape[-1]
new_pos_1 = raw_pos % x_curr.shape[-1]
new_pos = torch.cat((new_pos_0.view(-1,1), new_pos_1.view(-1,1)), dim=1)
else:
new_pos = []
new_occupy = 1.0*len(new_pos) / x_curr.shape[-2] / x_curr.shape[-1]
# if new_occupy > 0.5 or len(new_pos) <8:
# if new_occupy > 0.5 or len(new_pos) <8-1:
# print('new_occupy:{}| len(new_pos):{}'.format(new_occupy, len(new_pos)))
# import pdb; pdb.set_trace()
new_pyramids = [InstancePyramid(pos, curr_level, level_sizes) for pos in new_pos]
self.compute_mask(curr_level, x_curr[[i]], new_pyramids, True)
# 出清没有领地的pyramid 在所有pixel都进不了前3
# 统计没有pyramid的targets
# 额外惩罚霸占位置的pyramid,保护弱势应得的 pyramid
merit_pyramids_idx = new_masks_softmax.topk(2, dim=1)[1].unique()
# merit_pyramids_idx = new_masks_softmax.topk(3, dim=1)[1].unique()
merit_pyramids = [inst_pyramids[i] for i in range(len(inst_pyramids)) if i in merit_pyramids_idx]
target_len_before = sum([len(l) for l in target_support_pyramids])
# target_len_1_before = sum([len(l) for l in target_support_pyramids_0])
# import pdb; pdb.set_trace()
for reduce_i in range(len(inst_pyramids)):
if reduce_i not in merit_pyramids_idx:
die_id = inst_pyramids[reduce_i].idx
die_target_idx = inst_pyramids[reduce_i].target_idx
if die_target_idx:
target_support_pyramids[die_target_idx].remove(die_id)
# target_support_pyramids_0[die_target_idx].remove(die_id)
target_len_after = sum([len(l) for l in target_support_pyramids])
# target_len_1_after = sum([len(l) for l in target_support_pyramids_0])
# if target_len_1_before != target_len_1_after:
# import pdb; pdb.set_trace()
# import pdb; pdb.set_trace()
inst_pyramids = merit_pyramids + new_pyramids
# self.log_dict.update({'pyr_num_l1': len(inst_pyramids)})
self.log_dict.update({'pyr_num_l'+str(curr_level): len(inst_pyramids)})
if self.training:
self.match_target(curr_level, new_pyramids, target_levels, target_support_pyramids)
loss = self.compute_loss(curr_level, inst_pyramids, target_levels, target_support_pyramids)
losses_i.append(loss)
# import pdb; pdb.set_trace()
return inst_pyramids
def forward(self, image, targets=None):
x_img = image.tensors
# xs_r50 = self.r50(x_img)
# import pdb; pdb.set_trace()
xs_r50 = self.resnet50(x_img)
xs_r50.append(self.res_layer_5(xs_r50[-1]))
xs_r50.append(self.res_layer_6(xs_r50[-1]))
# print('r50 max values:', [f.max().item() for f in xs_r50])
N, _, img_size_h, img_size_w = x_img.shape
device = x_img.device
level_sizes = [tuple(f.shape[-2:]) for f in xs_r50[::-1]]
losses = {}
losses_0 = []
losses_1 = []
losses_2 = []
losses_3 = []
losses_4 = []
losses_5 = []
test_masks = []
target_support_pyramids = None
for i in range(N):
InstancePyramid.inst_count = 0
curr_level = 0
x_curr = xs_r50[-1]
init_pos = torch.nonzero(torch.ones_like(x_curr[0][0]))
inst_pyramids = [InstancePyramid(pos, curr_level, level_sizes) for pos in init_pos]
if x_curr[[i]].abs().max() > 1e19 or torch.isnan(x_curr[[i]].max()):
import pdb; pdb.set_trace()
self.compute_mask(curr_level, x_curr[[i]], inst_pyramids, True)
self.log_dict.update({'pyr_num_l0': len(inst_pyramids)})
target_levels = None
if self.training:
target_levels = self._init_target((img_size_h, img_size_w ), device, targets[i])
target_support_pyramids = [[] for k in range(target_levels[7].shape[1])]
# 统计 target 匹配
self.match_target(0, inst_pyramids, target_levels, target_support_pyramids)
loss_0 = self.compute_loss(0, inst_pyramids, target_levels, target_support_pyramids)
losses_0.append(loss_0)
# print(0, 'len(inst_pyramids)', len(inst_pyramids), target_support_pyramids)
if xs_r50[-2][[i]].abs().max() > 1e20 or torch.isnan(xs_r50[-2][[i]].max()):
print(1, '\n', xs_r50[-2][[i]])
import pdb; pdb.set_trace()
inst_pyramids = self.forward_singel_level(1, inst_pyramids, xs_r50[-2], i, level_sizes,
target_support_pyramids, target_levels, losses_1)
# print(1, 'len(inst_pyramids)', len(inst_pyramids), target_support_pyramids)
if xs_r50[-3][[i]].abs().max() > 1e20 or torch.isnan(xs_r50[-3][[i]].max()):
print(2, '\n', xs_r50[-3][[i]])
import pdb; pdb.set_trace()
inst_pyramids = self.forward_singel_level(2, inst_pyramids, xs_r50[-3], i, level_sizes,
target_support_pyramids, target_levels, losses_2)
# print(2, 'len(inst_pyramids)', len(inst_pyramids), target_support_pyramids)
if xs_r50[-4][[i]].abs().max() > 1e20 or torch.isnan(xs_r50[-4][[i]].max()):
print(3, '\n', xs_r50[-4][[i]])
import pdb; pdb.set_trace()
inst_pyramids = self.forward_singel_level(3, inst_pyramids, xs_r50[-4], i, level_sizes,
target_support_pyramids, target_levels, losses_3)
# print(3, 'len(inst_pyramids)', len(inst_pyramids), target_support_pyramids)
# inst_pyramids = self.forward_singel_level(4, inst_pyramids, xs_r50[-5], i, level_sizes,
# target_support_pyramids, target_levels, losses_4)
# print(4, 'len(inst_pyramids)', len(inst_pyramids), target_support_pyramids)
# inst_pyramids = self.forward_singel_level(5, inst_pyramids, xs_r50[-6], i, level_sizes,
# target_support_pyramids, target_levels, losses_5)
# print(5, 'len(inst_pyramids)', len(inst_pyramids), target_support_pyramids)
# import pdb; pdb.set_trace()
if not self.training:
test_masks.append(inst_pyramids)
self.log_dict.update({'InstPyr_inst_count': InstancePyramid.inst_count})
# import pdb; pdb.set_trace()
losses['level_0']= sum(loss for loss in losses_0)
losses['level_1']= sum(loss for loss in losses_1)
losses['level_2']= sum(loss for loss in losses_2)
losses['level_3']= sum(loss for loss in losses_3)
losses['level_4']= sum(loss for loss in losses_4)
return losses if self.training else test_masks
class InstancePyramid():
inst_count = 0
def __init__(self, pos, init_pub_level, level_sizes):
self.idx = InstancePyramid.inst_count
InstancePyramid.inst_count += 1
self.init_level = init_pub_level
self.level_sizes = level_sizes
self.pos = pos
self.masks = {}
self.mask_logits = {}
self.class_logits = None
self.target_idx = None
self.is_alive = True
# torch.tensor(800.0/2**(7-self.init_level)).ceil().long().item()
self.feature_scales = [7, 13, 25, 50, 100, 200]
# self.gaussian_masks = self.generate_gaussian_masks()
# import pdb; pdb.set_trace()
self.shared_gaussian_mask = self.shared_gaussian_masks()
def set_mask(self, pub_level, mask):
self.masks[pub_level - self.init_level] = mask
def get_mask(self, pub_level):
return self.masks[pub_level - self.init_level]
def set_mask_logits(self, pub_level, mask_logits):
self.mask_logits[pub_level - self.init_level] = mask_logits
def get_mask_logits(self, pub_level):
return self.mask_logits[pub_level - self.init_level]
def bind_target(self, idx):
self.target_idx = idx
def compute_loss(self, target, pub_level):
import pdb; pdb.set_trace()
def get_root_level_pos(self, pub_level):
init_size = self.level_sizes[self.init_level]
req_size = self.level_sizes[pub_level]
h = (self.pos[0].float() / init_size[0] * req_size[0]).round().long()
w = (self.pos[1].float() / init_size[1] * req_size[1]).round().long()
return (h.item(), w.item())
def get_root_response(self, pub_level):
init_size = self.level_sizes[self.init_level]
req_size = self.level_sizes[pub_level]
# h1 = (self.pos[0].float() / init_size[0] * req_size[0]).floor()
# h2 = ((self.pos[0].float()+1) / init_size[0] * req_size[0]).ceil()
# w1 = (self.pos[1].float() / init_size[1] * req_size[1]).floor()
# w2 = ((self.pos[1].float()+1) / init_size[1] * req_size[1]).ceil()
h = (self.pos[0].float() / init_size[0] * req_size[0]).round().long()
w = (self.pos[1].float() / init_size[1] * req_size[1]).round().long()
points = self.masks[pub_level - self.init_level][0,0,h, w]
return points
def generate_gaussian_masks_old(self):
# torch.tensor(800.0/2**(7-self.init_level)).ceil().long().item()
# feature_scales = [7, 13, 25, 50, 100, 200]
gaussian_masks = []
for i in range(len(self.feature_scales)):
f_scale = self.feature_scales[i]
xs = torch.arange(f_scale*4)
ys = torch.arange(f_scale*4).view(-1,1)
gaussian_masks.append((-4*(torch.tensor(2.0, requires_grad=False)).log()*((xs.float()-f_scale*\
2+1)**2+(ys.float()-f_scale*2+1)**2)/f_scale**2).exp())
return gaussian_masks
def get_feature_gaussian_mask_old(self, pub_level, feature_size):
# gaussian_mask = self.gaussian_masks[pub_level - self.init_level]
feature_size = tuple(feature_size)
gaussian_mask = self.gaussian_masks[pub_level]
level_pos = self.get_root_level_pos(pub_level)
ctr = (self.feature_scales[pub_level]*2-1,)*2
feature_g_mask = gaussian_mask[ctr[0]-level_pos[0]:ctr[0]-level_pos[0]+feature_size[0], \
ctr[1]-level_pos[1]:ctr[1]-level_pos[1]+feature_size[1]]
return feature_g_mask
def shared_gaussian_masks(self):
# feature_scales = [7, 13, 25, 50, 100, 200]
xs = torch.arange(7*4)
ys = torch.arange(7*4).view(-1,1)
ln2 = torch.tensor(2.0, requires_grad=False).log()
# shared_gaussian_mask = (-4*ln2*((xs.float()-7*2+1)**2+(ys.float()-7*2+1)**2)/7**2).exp()
shared_gaussian_mask = (-ln2*((xs.float()-7*2+1)**2+(ys.float()-7*2+1)**2)/7**2).exp()
return shared_gaussian_mask
def get_feature_gaussian_mask(self, pub_level, feature_c0):
level_pos = self.get_root_level_pos(pub_level)
feature_g_mask = torch.zeros_like(feature_c0)
src_x0 = max(13-level_pos[0], 0)
src_y0 = max(13-level_pos[1], 0)
# src_x1 = min(src_x0+feature_c0.shape[0], 28)
# src_y1 = min(src_y0+feature_c0.shape[1], 28)
src_x1 = min(13+feature_c0.shape[0]-level_pos[0], 28)
src_y1 = min(13+feature_c0.shape[1]-level_pos[1], 28)
res_x0 = max(0, level_pos[0]-13) #+1?
res_y0 = max(0, level_pos[1]-13)
# res_x1 = min(feature_c0.shape[0], level_pos[0]+14+1)
# res_y1 = min(feature_c0.shape[1], level_pos[1]+14+1)
res_x1 = res_x0+src_x1-src_x0
res_y1 = res_y0+src_y1-src_y0
if feature_g_mask[res_x0:res_x1, res_y0:res_y1].shape != \
self.shared_gaussian_mask[src_x0:src_x1, src_y0:src_y1].shape:
print(feature_g_mask[res_x0:res_x1, res_y0:res_y1].shape)
print(self.shared_gaussian_mask[src_x0:src_x1, src_y0:src_y1].shape)
import pdb; pdb.set_trace()
feature_g_mask[res_x0:res_x1, res_y0:res_y1] = self.shared_gaussian_mask[src_x0:src_x1, src_y0:src_y1]
return feature_g_mask
def train(cfg, local_rank, distributed):
# model = build_detection_model(cfg)
model = MaskPyramids(cfg)
device = torch.device(cfg.MODEL.DEVICE)
model.to(device)
if cfg.MODEL.USE_SYNCBN:
assert is_pytorch_1_1_0_or_later(), \
"SyncBatchNorm is only available in pytorch >= 1.1.0"
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
optimizer = make_optimizer(cfg, model)
| |
#master: https://github.com/yagamiraku/tarkov_flea_bot_toTherapis
# └──fork: https://github.com/astron4ik/tarkov_flea_bot_toTherapis
# └──this fork: https://github.com/Avnsx/EFT_Flea_Market_Bot
#Master: yagamiraku | fork: astron4ik | this fork: Avn
import requests,zlib,hashlib,json,configparser,random,os,time,threading,UnityPy,subprocess
from multiprocessing.pool import ThreadPool
import getToken as token
from win32api import GetFileVersionInfo, LOWORD, HIWORD
config = configparser.ConfigParser()
if os.path.exists('config.ini'):
config.read_file(open('config.ini'))
else:
print('[ERROR:] Did not find config.ini inside installation folder! Running getPath.py now, to create a config.ini file.')
time.sleep(3)
exec(open('./getPath.py').read())
def get_version_number(filename):
try:
info = GetFileVersionInfo (filename, "\\")
ms = info['FileVersionMS']
ls = info['FileVersionLS']
return HIWORD (ms), LOWORD (ms), HIWORD (ls), LOWORD (ls)
except:
print('[ERROR:] Failed to fetch EFT Client version! Does getPath.py & config.ini exist in your installation folder?')
time.sleep(10)
exit()
if __name__ == "__main__":
ClientVersion = ".".join([str (i) for i in get_version_number (config['DEFAULT']['clientpath'])])
UnityVersion = UnityPy.load(config['DEFAULT']['unitypath']).objects[0].assets_file.unity_version
cookies = {}
headers = {'User-Agent': 'UnityPlayer/'+str(UnityVersion)+' (UnityWebRequest/1.0, libcurl/7.52.0-DEV)',
'Content-Type': 'application/json',
'App-Version': 'EFT Client '+str(ClientVersion),
'X-Unity-Version': str(UnityVersion)
}
print(' ▄')
print(' ╓▀▀▄')
print(' ╓▐▌▒█▒▌▀▀▄╖ ╓▀▀▀▀╢')
print(' ┌▀▀▀▀▒█▌▄▄▐▄▀╠ ╓╢▀▀▀▀▀▀▄')
print(' ╒╓▄▄▄▌██▌▐▒█▌▄▄▄ ╓╢▀▀▀▀▀▀▀▀▀')
print(' ▄▌▌▌▒███▌▄░▄╢▌░╠ ╙╨╝╢╢▀▀▀▀▀▀▀▄')
print(' ▒████████▌▌▌▄▌▐▀ ╒▀▀▀▀▀╠╨╝╝')
print(' ▐███▓▓▓▓█▒▌▒█▒█ READY TO ╢▀▀▀▀▀')
print(' ██▓▓▓▓▓▓█▒▌▒░ DO SOME ╒▀▀▀▀▀╠')
print(' ▐███▓▓▓▓▓▓▀▀ STONKS? ▀▀▀▀▀▀')
print(' ▄▓▓█████▓▓ B) ▐▀▀▀▀▀╛')
print(' ▄▄▓▓▓▓▓▓▓▓▀▒█▒▀╙█▄▄ ▀▀▀▀╢▀')
print(' ▄█▓▓▓▓▓▓▓▓▓▓▓▓▓▓▄▄▄▄▐▓▓▓▓▓▓█▄▄ ▐▀▀▀▀╢╛')
print(' ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▌▀▒▓▓▓▓▓▓▓▓▓▓▓ ▀▀▀▀▀▀')
print(' ▐▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▄▓▓▓▓▓▓▓▓▓▓▓▄ ╠▀▀▀▀▀╛')
print(' ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ ╒▀▀▀▀▀▀')
print(' ▒▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ ▐▀▀▀▀╢')
print(' ▐▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▄ ╨╝╝▀▀▀')
print(' ')
print('███████╗██╗ ███████╗ █████╗ ███╗ ███╗ █████╗ ██████╗ ██╗ ██╗███████╗████████╗ ██████╗ ██████╗ ████████╗')
print('██╔════╝██║ ██╔════╝██╔══██╗ ████╗ ████║██╔══██╗██╔══██╗██║ ██╔╝██╔════╝╚══██╔══╝ ██╔══██╗██╔═══██╗╚══██╔══╝')
print('█████╗ ██║ █████╗ ███████║ ██╔████╔██║███████║██████╔╝█████╔╝ █████╗ ██║ ██████╔╝██║ ██║ ██║ ')
print('██╔══╝ ██║ ██╔══╝ ██╔══██║ ██║╚██╔╝██║██╔══██║██╔══██╗██╔═██╗ ██╔══╝ ██║ ██╔══██╗██║ ██║ ██║ ')
print('██║ ███████╗███████╗██║ ██║ ██║ ╚═╝ ██║██║ ██║██║ ██║██║ ██╗███████╗ ██║ ██████╔╝╚██████╔╝ ██║ ')
print('╚═╝ ╚══════╝╚══════╝╚═╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝╚══════╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ')
print('[INFO:] Welcome to Developer Version 0.32 for Game Client Version ' + str(ClientVersion)+ ' on Unity Engine Version ' + str(UnityVersion))
print('[INFO:] This github fork was developed by Avn @ unknowncheats. Additionaly huge thanks to yagamiraku & astron4ik!')
print('[INFO:] The last manual update to this code was on 07/06/2020 by Avn unknowncheats.me/forum/members/2564688.html')
RUBLES = '5449016a4bdc2d6f028b456f'
PMC = {}
Balance = 0
oldBalance = 0
moneyStacks = {}
all_item_list = {}
NPC = ""
use_full_list = True
min_price = 1287 #Minimum Price to profit in Rubles, price also changes speed of income and spent value on therapist
NPC_name = 'Терапевт' #Therapist in russian
WishList = ['5ad7247386f7747487619dc3', '5a0ee34586f774023b6ee092', '5c127c4486f7745625356c13',
'5c1e2a1e86f77431ea0ea84c', '5a13ef0686f7746e5a411744', '5a0f08bc86f77478f33b84c2',
'5a1452ee86f7746f33111763', '5a0eecf686f7740350630097', '5a0ee76686f7743698200d5c',
'5913915886f774123603c392', '5a0ee4b586f7743698200d22', '5a144bdb86f7741d374bbde0',
'5a0ee37f86f774023657a86f', '5a0eeb1a86f774688b70aa5c', '5c052f6886f7746b1e3db148',
'5a0eedb386f77403506300be', '5addaffe86f77470b455f900', '5c052e6986f7746b207bc3c9',
'5da5cdcd86f774529238fb9b', '5b43575a86f77424f443fe62', '5ad7217186f7746744498875',
'5ad7242b86f7740a6a3abd43', '5a13ee1986f774794d4c14cd', '5a0ee72c86f77436955d3435',
'5d1b327086f7742525194449', '5c1e2d1f86f77431e9280bee', '5c1f79a086f7746ed066fb8f',
'5ad5d64486f774079b080af8', '5d1b2f3f86f774252167a52c', '5a0eebed86f77461230ddb3d',
'5d0377ce86f774186372f689', '5780cf942459777df90dcb72', '5bc9bc53d4351e00367fbcee',
'5d80c93086f7744036212b41', '5a0ee62286f774369454a7ac', '5a0f006986f7741ffd2fe484',
'5d1b32c186f774252167a530', '5bc9b9ecd4351e3bac122519', '5a0ec70e86f7742c0b518fba',
'5bc9c049d4351e44f824d360', '5a0ea69f86f7741cd5406619', '5a0eeb8e86f77461257ed71a',
'590de7e986f7741b096e5f32', '5d0378d486f77420421a5ff4', '59e3647686f774176a362507',
'5d03784a86f774203e7e0c4d', '5bc9bdb8d4351e003562b8a1', '5bc9b720d4351e450201234b',
'5bc9c377d4351e3bac12251b', '5a0f045e86f7745b0f0d0e42', '59e3658a86f7741776641ac4',
'5d403f9186f7743cac3f229b', '590de71386f774347051a052', '5ad5d20586f77449be26d877',
'5d0375ff86f774186372f685', '5d1b304286f774253763a528', '5734758f24597738025ee253',
'5d235b4d86f7742e017bc88a', '59136e1e86f774432f15d133', '5d80cb8786f774405611c7d9',
'5c0e534186f7747fa1419867', '5a0f075686f7745bcc42ee12', '5c1265fc86f7743f896a21c2',
'5ad5ccd186f774446d5706e9', '5c0e531286f7747fa54205c2', '5bc9be8fd4351e00334cae6e',
'5d1b385e86f774252167b98a', '5af0561e86f7745f5f3ad6ac', '5c0e533786f7747fa23f4d47',
'5d40407c86f774318526545a', '5734773724597737fd047c14', '5c10c8fd86f7743d7d706df3',
'590a3b0486f7743954552bdb', '5d1b39a386f774252339976f', '5c0e530286f7747fa1419862',
'5af04b6486f774195a3ebb49', '5a8036fb86f77407252ddc02', '573477e124597737dd42e191',
'590a3efd86f77437d351a25b', '590c5c9f86f77477c91c36e7', '590a391c86f774385a33c404',
'59faf98186f774067b6be103', '5780d0532459777a5108b9a2', '590a3c0a86f774385a33c450',
'590a358486f77429692b2790', '56742c324bdc2d150f8b456d', '591383f186f7744a4c5edcf3',
'590c35a486f774273531c822', '5913877a86f774432f15d444', '593aa4be86f77457f56379f8',
'5a0eb38b86f774153b320eb0', '5751496424597720a27126da', '544fb3f34bdc2d03748b456a',
'574eb85c245977648157eec3', '57347baf24597738002c6178', '59e3556c86f7741776641ac2',
'5d1b3f2d86f774253763b735', '<KEY>', '59136a4486f774447a1ed172',
'590c2d8786f774245b1f03f3', '5939a00786f7742fe8132936', '57347c77245977448d35f6e2',
'5798a2832459774b53341029', '5751435d24597720a27126d1', '5d40412b86f7743cb332ac3a',
'5c13cef886f774072e618e82', '590c5a7286f7747884343aea', '59e361e886f774176c10a2a5',
'5900b89686f7744e704a8747', '5c13cd2486f774072c757944', '5bc9c29cd4351e003562b8a3',
'5d40425986f7743185265461', '57505f6224597709a92585a9', '5d4042a986f7743185265463',
'5937ee6486f77408994ba448', '57347d7224597744596b4e72', '5d1b3a5d86f774252167ba22',
'593962ca86f774068014d9af', '<KEY>', '5938504186f7740991483f30',
'5672cb304bdc2dc2088b456a', '5d1c819a86f774771b0acd6c', '59e358a886f7741776641ac3',
'59e366c186f7741778269d85', '5780cf692459777de4559321', '59148f8286f7741b951ea113',
'57347cd0245977445a2d6ff1', '57347d692459774491567cf1', '57347d3d245977448f7b7f61',
'5d4041f086f7743cac3f22a7', '<KEY>', '<KEY>',
'<KEY>', '<KEY>', '<KEY>2',
'57347d692459774491567cf1', '5d1b309586f77425227d1676', '590a3d9c86f774385926e510',
'5734781f24597737e04bf32a', '590c595c86f7747884343ad7', '59148c8a86f774197930e983',
'5673de654bdc2d180f8b456d', '5be4038986f774527d3fae60', '590c2b4386f77425357b6123',
'5913611c86f77479e0084092', '5938603e86f77435642354f4', '<KEY>',
'5a80a29286f7742b25692012', '575062b524597720a31c09a1', '57347d5f245977448b40fa81',
'5913651986f774432f15d132', '<KEY>', '<KEY>',
'5780cf9e2459777df90dcb73', '5780d07a2459777de4559324', '<KEY>',
'573476f124597737e04bf328', '5c06782b86f77426df5407d2', '<KEY>',
'5780cda02459777b272ede61', '573474f924597738002c6174', '5b7c710788a4506dec015957',
'5734779624597737e04bf329', '5914578086f774123569ffa4', '57a349b2245977762b199ec7',
'573476d324597737da2adc13', '591382d986f774465a6413a7', '<KEY>',
'57347c1124597737fb1379e3', '5783c43d2459774bbe137486', '57a349b2245977762b199ec7',
'5d1b31ce86f7742523398394', '<KEY>', '5938994586f774523a425196',
'5780cf722459777a5108b9a1', '<KEY>', '<KEY>',
'573475fb24597737fb1379e1', '<KEY>', '5734770f24597738025ee254',
'<KEY>', '59136f6f86f774447a1ed173', '<KEY>',
'57347b8b24597737dd42e192', '591ae8f986f77406f854be45']
print('[LOG:] Loaded List: Wishlist')
# Full List
if use_full_list:
print('[LOG:] Loaded List: Complete Entry')
WishList = ['5c1d0efb86f7744baf2e7b7b', '5c0a840b86f7742ffa4f2482', '59fb042886f7746c5005a7b2',
'5b6d9ce188a4501afc1b2b25', '5c1d0c5f86f7744bb2683cf0', '5c1d0dc586f7744baf2e7b79',
'5d235bb686f77443f4331278', '5c1e495a86f7743109743dfb', '59fb023c86f7746d0d4b423c',
'5c0530ee86f774697952d952', '59fafd4b86f7745ca07e1232', '5d03794386f77420415576f5',
'5ad5d7d286f77450166e0a89', '5d95d6fa86f77424484aa5e9', '5d80cb5686f77440545d1286',
'5c093db286f7740a1b2617e3', '5c1d0f4986f7744bb01837fa', '5a0ee30786f774023b6ee08f',
'5d8e3ecc86f774414c78d05e', '5d80cab086f77440535be201', '590c60fc86f77412b13fddcf',
'5a0dc95c86f77452440fc675', '5aafbcd986f7745e590fff23', '5a13ef7e86f7741290491063',
'5c093e3486f77430cb02e593', '5ad7247386f7747487619dc3', '5da743f586f7744014504f72',
'5a13f46386f7741dd7384b04', '5d80cbd886f77470855c26c2', '5d1b376e86f774252519444e',
'5a0ee34586f774023b6ee092', '59fb016586f7746d0d4b423a', '5d947d4e86f774447b415895',
'5d80c8f586f77440373c4ed0', '5ad5cfbd86f7742c825d6104', '5d03775b86f774203e7e0c4b',
'5c127c4486f7745625356c13', '59e3639286f7741777737013', '5c1e2a1e86f77431ea0ea84c',
'5a13ef0686f7746e5a411744', '5d8e0db586f7744450412a42', '57347ca924597744596b4e71',
'5aafbde786f774389d0cbc0f', '5a1452ee86f7746f33111763', '5a0f08bc86f77478f33b84c2',
'5ad5db3786f7743568421cce', '5733279d245977289b77ec24', '5a0eecf686f7740350630097',
'5a0ee76686f7743698200d5c', '5a13f35286f77413ef1436b0', '5d9f1fa686f774726974a992',
'5d80ccdd86f77474f7575e02', '5a13f24186f77410e57c5626', '5913915886f774123603c392',
'<KEY>2', '5d80c78786f774403a401e3e', '5d8e0e0e86f774321140eb56',
'5da46e3886f774653b7a83fe', '5d1b33a686f7742523398398', '59e3606886f77417674759a5',
'5d80c6c586f77440351beef1', '<KEY>', '<KEY>',
'<KEY>', '5c1267ee86f77416ec610f72', '<KEY>',
'5a0ee37f86f774023657a86f', '<KEY>', '5a0f0f5886f7741c4e32a472',
'5c052f6886f7746b1e3db148', '5addaffe86f77470b455f900', '5a0dc45586f7742f6b0b73e3',
'59faf7ca86f7740dbe19f6c2', '<KEY>', '5a0eedb386f77403506300be',
'<KEY>', '<KEY>', '<KEY>',
'<KEY>', '5d80c66d86f774405611c7d6', '590c2e1186f77425357b6124',
'5c05308086f7746b2101e90b', '5a0ea64786f7741707720468', '5d80cb3886f77440556dbf09',
'5d8e15b686f774445103b190', '5af0534a86f7743b6f354284', '5da5cdcd86f774529238fb9b',
'5b43575a86f77424f443fe62', '5ad7242b86f7740a6a3abd43', '5ad7217186f7746744498875',
'5a0ee72c86f77436955d3435', '5d80ca9086f774403a401e40', '5a13ee1986f774794d4c14cd',
'5d95d6be86f77424444eb3a7', '5a0eb6ac86f7743124037a28', '5d80c95986f77440351beef3',
'5c1f79a086f7746ed066fb8f', '567143bf4bdc2d1a0f8b4567', '5d0377ce86f774186372f689',
'5c1e2d1f86f77431e9280bee', '5d1b327086f7742525194449', '5d0376a486f7747d8050965c',
'<KEY>', '5780cfa52459777dfb276eb1', '<KEY>',
'5d80cd1a86f77402aa362f42', '5d6fc78386f77449d825f9dc', '5751a89d24597722aa0e8db0',
'5a0eebed86f77461230ddb3d', '5d1b2f3f86f774252167a52c', '5ad5d64486f774079b080af8',
'5c05300686f7746dce784e5d', '<KEY>', '<KEY>1',
'<KEY>0', '5bc9b9ecd4351e3bac122519', '5a0f006986f7741ffd2fe484',
'5ad5d20586f77449be26d877', '<KEY>', '<KEY>',
'5d235a5986f77443f6329bc6', '59fafb5d86f774067a6f2084', '5d80c6fc86f774403a401e3c',
'5a0ea69f86f7741cd5406619', '5a0eeb8e86f77461257ed71a', '5c12688486f77426843c7d32',
'590de7e986f7741b096e5f32', '5780cf942459777df90dcb72', '5d1b313086f77425227d1678',
'5a145d7b86f7744cbb6f4a13', '5a0ea79b86f7741d4a35298e',
'5d1b2fa286f77425227d1674', '59e3647686f774176a362507', '5a0eed4386f77405112912aa',
'5d0378d486f77420421a5ff4', '5d03784a86f774203e7e0c4d', '5c0e531d86f7747fa23f4d42',
'5a0ec70e86f7742c0b518fba', '<KEY>', '5d80ccac86f77470841ff452',
'5d0379a886f77420407aa271', '5bc9b720d4351e450201234b', '5bc9c377d4351e3bac12251b',
'59e3658a86f7741776641ac4', '5a0f068686f7745b0d4ea242', '5b4335ba86f7744d2837a264',
'57347c2e24597744902c94a1', '5d1b2ffd86f77425243e8d17', '5d40419286f774318526545f',
'59e35de086f7741778269d84', '5d403f9186f7743cac3f229b', '5c052fb986f7746b2101e909',
'590de71386f774347051a052', '590a373286f774287540368b', '5a0ec6d286f7742c0b518fb5',
'5d235b4d86f7742e017bc88a', '5a0f045e86f7745b0f0d0e42', '590c346786f77423e50ed342',
'5734758f24597738025ee253', '59e36c6f86f774176c10a2a7', '5d0375ff86f774186372f685',
'5938144586f77473c2087145', '5d1b304286f774253763a528', '5734795124597738002c6176',
'<KEY>', '<KEY>', '<KEY>',
'591afe0186f77431bd616a11', '5bc9be8fd4351e00334cae6e', '59136e1e86f774432f15d133',
'59faf98186f774067b6be103', '<KEY>', '<KEY>',
'<KEY>', '5c0e534186f7747fa1419867',
'<KEY>', '59e35ef086f7741777737012', '<KEY>',
'<KEY>', '<KEY>', '5c0e531286f7747fa54205c2',
'5a0f075686f7745bcc42ee12', '<KEY>', '57347c5b245977448d35f6e1',
'57347d8724597744596b4e76', '5af0561e86f7745f5f3ad6ac', '5c0e533786f7747fa23f4d47',
'5be4038986f774527d3fae60', '5d1b39a386f774252339976f', '57347d9c245977448b40fa85',
'59387a4986f77401cc236e62', '5734773724597737fd047c14',
'<KEY>', '590c5d4b86f774784e1b9c45', '593aa4be86f77457f56379f8',
'5751487e245977207e26a315', '5c10c8fd86f7743d7d706df3', '5d1b385e86f774252167b98a',
'575146b724597720a27126d5', '5d6fc87386f77449db3db94e', '5d63d33b86f7746ea9275524',
'5c0e530286f7747fa1419862', '57347da92459774491567cf5', '5a0eff2986f7741fd654e684',
'590c35a486f774273531c822', '590c5c9f86f77477c91c36e7', '57514643245977207f2c2d09',
'59e3556c86f7741776641ac2', '5c06779c86f77426e00dd782', '59e358a886f7741776641ac3',
'5939a00786f7742fe8132936', '5af0484c86f7740f02001f7f', '5751435d24597720a27126d1',
'<KEY>', '590a391c86f774385a33c404', '5780d0532459777a5108b9a2',
'<KEY>', '<KEY>', '<KEY>',
'57347d3d245977448f7b7f61', '<KEY>', '573477e124597737dd42e191',
'5913877a86f774432f15d444', '<KEY>', '544fb3f34bdc2d03748b456a',
'591383f186f7744a4c5edcf3', '574eb85c245977648157eec3', '57347baf24597738002c6178',
'57347d692459774491567cf1', '59e3556c86f7741776641ac2', '5d1c819a86f774771b0acd6c',
'5937ee6486f77408994ba448', '59136a4486f774447a1ed172', '<KEY>',
'5d1b3f2d86f774253763b735', '5ad5d49886f77455f9731921', '590c5bbd86f774785762df04',
'5d1b309586f77425227d1676', '5bc9c29cd4351e003562b8a3', '57347c77245977448d35f6e2',
'<KEY>', '<KEY>', '5d1b3a5d86f774252167ba22',
'59148c8a86f774197930e983', '5d4042a986f7743185265463', '5c06782b86f77426df5407d2',
'<KEY>', '5a0eb38b86f774153b320eb0', '5d40412b86f7743cb332ac3a',
'590a3d9c86f774385926e510', '<KEY>', '5900b89686f7744e704a8747',
'57347d7224597744596b4e72', '5d40425986f7743185265461', '<KEY>',
'5751496424597720a27126da', '590c5a7286f7747884343aea', '5780cf692459777de4559321',
'59148f8286f7741b951ea113', '<KEY>', '5938504186f7740991483f30',
'<KEY>', '<KEY>', '<KEY>',
'5780d0652459777df90dcb74', '<KEY>', '590a3c0a86f774385a33c450',
'59e361e886f774176c10a2a5', '5798a2832459774b53341029', '<KEY>',
'590c595c86f7747884343ad7', '<KEY>', '<KEY>',
'<KEY>', '5c13cef886f774072e618e82', '5938603e86f77435642354f4',
'<KEY>', '5913651986f774432f15d132', '5783c43d2459774bbe137486',
'593962ca86f774068014d9af', '<KEY>', '<KEY>',
'<KEY>', '<KEY>', '575062b524597720a31c09a1',
'<KEY>', '<KEY>', '5b7c710788a4506dec015957',
'<KEY>', '<KEY>', '<KEY>',
'5780d07a2459777de4559324', '5b7c710788a4506dec015957', '57a349b2245977762b199ec7',
'5d4041f086f7743cac3f22a7', '<KEY>', '<KEY>',
'5914578086f774123569ffa4', '<KEY>', '5780cda02459777b272ede61',
'<KEY>', '591382d986f774465a6413a7', '57a349b2245977762b199ec7',
'<KEY>', '57347cd0245977445a2d6ff1', '<KEY>',
'544fb3364bdc2d34748b456a', '5d1b31ce86f7742523398394', '5938994586f774523a425196',
'5734770f24597738025ee254', '544fb25a4bdc2dfb738b4567', '5780cf722459777a5108b9a1',
'57347b8b24597737dd42e192', '573476d324597737da2adc13', '59136f6f86f774447a1ed173',
'591ae8f986f77406f854be45', '56742c284bdc2d98058b456d']
print("[LOG:] Launching and waiting for the Escape from Tarkov Client...")
startlauncher = config['DEFAULT']['launcherpath']
subprocess.Popen(startlauncher)
def _keep_alive(): #keep session token alive
while True:
get_api_result('https://prod.escapefromtarkov.com/client/game/keepalive', '')
time.sleep(5 * 60)
def goto(linenum): #i don't think this works in first place?
global line
line = linenum
line == 1
def remove_white_space(s): #filter to raw input only
return s.replace(' ', '').replace('\n', '')
def get_api_result(url, data): #read api request
global cookies
try:
data = zlib.compress(remove_white_space(data).encode())
res = requests.post(url,
data=data,
cookies=cookies,
headers=headers,
timeout=5
)
if res.status_code == 200:
if 'PHPSESSID' in res.cookies:
cookies = res.cookies.get_dict()
content = zlib.decompress(res.content).decode()
return json.loads(content)
else:
print("%d" % res.status_code)
raise Exception(res.status_code)
except:
print('[ERROR:] ... Token Requests Fail')
def buy(offer): #purchase item function
print('\n[LOG:] Bought ' + offer['items'][0]['_tpl'] + ' [ ' + all_item_list[offer['items'][0]['_tpl']]['_props'][
'Name'] + ' ]' + ' at ' + str(offer['requirementsCost']) + ' / ' + str(int(offer['itemsCost'] * 0.75)))
offer_id = offer['_id']
offer_count = offer['items'][0]['upd']['StackObjectsCount']
offer_price = offer['items'][0]['upd']['StackObjectsCount'] * offer['summaryCost']
start_time = offer['startTime']
start_from = 56 #wait alteast for 56
spent_time = time.time() - start_time
if spent_time < start_from:
to_wait = start_from - spent_time
time.sleep(to_wait / 100) #division equals random wait timer
print(' Waited ' + str(to_wait / 100) +' Seconds before purchase')
data = {
"data": [
{
"Action": "RagFairBuyOffer",
"offers":
[
{
"id": offer_id,
"count": offer_count,
"items": []
}
]
}
]
}
items = []
for (id, value) in moneyStacks.items():
if value >= offer_price:
items.append((id, offer_price))
break
else:
offer_price -= value
items.append((id, value))
for item in items:
stack_info = dict()
stack_info['id'] = item[0]
stack_info['count'] = item[1]
data['data'][0]['offers'][0]['items'].append(stack_info)
result_data = get_api_result(
'https://prod.escapefromtarkov.com/client/game/profile/items/moving',
json.dumps(data))
if result_data['err'] in (228, 1512, 1514, 1500):
return
if result_data['err'] == 1505:
print("[ERROR:] Purchase Failed ... OUT OF SPACE")
exit()
if result_data['err'] == 0:
# offer was sold out
if len(result_data['data']['badRequest']) > 0:
print('[ERROR:] Purchase Failed ... ITEM WAS SOLD')
todo = {}
# added new item to inventory
elif len(result_data['data']['items'].keys()) > 0:
print(' Purchase Success ... ADDED ITEM')
print(' | Stonks: + ' + format(int(offer['itemsCost'] * 0.75) - offer['summaryCost'],
',') + ' ₽ |')
for item in result_data['data']['items']['new']:
data = {
"data": [
{
"Action": "TradingConfirm",
"type": "sell_to_trader",
"tid": NPC['_id'],
"items":
[
{"id": item['_id'], "count": item['upd']['StackObjectsCount'],
"scheme_id": 0}
]
}
],
"tm": 0,
}
result_data = get_api_result(
'https://prod.escapefromtarkov.com/client/game/profile/items/moving',
json.dumps(data))
# print(result_data)
update_profile()
else:
print('[ERROR:] ' + str(result_data['err']) + '')
def update_profile():
global PMC
global Balance
global oldBalance
global all_item_list
# get_profiles
profile_list = get_api_result('https://prod.escapefromtarkov.com/client/game/profile/list', '{}')
if profile_list is None:
print('profile_list is None.')
if profile_list['err'] != 0:
print(profile_list['errmsg'])
exit()
# print(profile_list)
# get PMC ID
for item in profile_list['data']:
if item['Info']['LastTimePlayedAsSavage'] == 0:
PMC = item
# get items
_inventory = dict()
for item in PMC['Inventory']['items']:
_inventory[item['_id']] = item
oldBalance = Balance
Balance = 0
for item_id, item in _inventory.items():
if item['_tpl'] == RUBLES:
count = item['upd']['StackObjectsCount']
Balance += count
moneyStacks[item_id] = count
timeStr = str(time.strftime("%H:%M:%S", time.localtime(time.time())))
if Balance - oldBalance > | |
import os
from enum import IntEnum
from random import uniform
from math import pi
import json
from ipycanvas import MultiCanvas, Canvas, hold_canvas
from ipywidgets import Image
from babyrobot.envs.lib import GridBase
from babyrobot.envs.lib import Arrows
from babyrobot.envs.lib import Direction
class Level(IntEnum):
Base = 0
Grid = 1
Underlay = 2
Robot = 3
Overlay = 4
class DrawGrid():
num_canvases = 5 # number of canvases/layers
cell_pixels = 64 # pixel dimensions of a grid square
padding = 2 # padding around the cells
wall_width = 4 # the width of maze walls
side_panel = None # by default there's no side info panel
bottom_panel = None # by default there's no bottom info panel
base_color = 'orange' # color of the grid base layer
grid_color = '#777' # grid line color
start_color = '#ed1818' # color of start square
start_text_color = '#fff'
exit_color = 'green' # color of the exit square
exit_text_color = '#fff'
border_color = 'black' # color of the outer border
wall_color = 'black' # color of the walls
def __init__(self, gridbase: GridBase, **kwargs: dict):
self.grid = gridbase
self.show_start_text = kwargs.get('show_start_text',True)
self.show_end_text = kwargs.get('show_end_text',True)
# setup the grid properties
self.set_properties(kwargs.get('grid',None))
# setup any information items
self.add_compass = kwargs.get('add_compass',False)
self.side_panel = kwargs.get('side_panel',None)
self.bottom_panel = kwargs.get('bottom_panel',None)
# load the image used to draw puddles
self.load_puddle_sprite()
# create the set of canvases for drawing
self.create_canvases()
self.draw_level()
'''
Setup Functions
'''
def set_properties( self, grid_props: dict ):
''' setup the grid draw properties '''
if grid_props is not None:
# first test if a theme is specified
theme = grid_props.get('theme','black_orange')
if theme is not None:
theme_path = os.path.join(self.grid.working_directory,f'themes/{theme}.json')
with open(theme_path) as json_file:
grid_props = json.load(json_file)
colors = grid_props.get('colors',None)
if colors is not None:
self.base_color = colors.get('base', self.base_color)
self.grid_color = colors.get('lines', self.grid_color)
self.start_color = colors.get('start', self.start_color)
self.start_text_color = colors.get('start_text', self.start_text_color)
self.exit_color = colors.get('exit', self.exit_color)
self.exit_text_color = colors.get('exit_text', self.exit_text_color)
self.border_color = colors.get('border', self.border_color)
self.wall_color = colors.get('walls', self.wall_color)
widths = grid_props.get('widths',None)
if widths is not None:
self.padding = widths.get('padding', self.padding)
self.wall_width = widths.get('walls', self.wall_width)
def calculate_dimensions(self):
' calculate dimensions of the canvases in pixels '
self.width_pixels = self.grid.width * self.cell_pixels + (self.padding*2)
self.height_pixels = self.grid.height * self.cell_pixels + (self.padding*2)
self.total_width = self.width_pixels
self.total_height = self.height_pixels
# if a compass or info side panel are being added expand the width
if self.add_compass or (self.side_panel is not None):
# test if a width has been specified for the panel
if type(self.side_panel) == int:
self.total_width += self.side_panel
elif type(self.side_panel) == dict:
# side panel has been specified as a dictionary
self.total_width += self.side_panel.get('width',100)
# self.total_height += self.side_panel.get('height',0)
else:
# create the side panel with the default width
self.total_width += 100
# if a bottom panel is specified increase the height
if self.bottom_panel is not None:
# test if a height has been specified for the panel
if type(self.bottom_panel) == int:
self.total_height += self.bottom_panel
elif type(self.bottom_panel) == dict:
# bottom panel has been specified as a dictionary
# e.g. bottom_panel':{'width':200,'height':50,'color':'#644242'}
# self.total_width += self.bottom_panel.get('width',0)
self.total_height += self.bottom_panel.get('height',50)
else:
# create the side panel with the default height
self.total_height += 50
# calculate the number of pixels to center of a square
self.center = self.cell_pixels//2 - self.padding
def create_canvases(self):
# calculate cell values in pixels
self.calculate_dimensions()
# create the canvas layers
self.canvases = MultiCanvas(n_canvases=self.num_canvases,
width=self.total_width,
height=self.total_height,
sync_image_data=True)
'''
Helper Functions
'''
def grid_to_pixels( self, grid_pos, xoff = 0, yoff = 0 ):
x = (grid_pos[0] * self.cell_pixels) + self.padding + xoff
y = (grid_pos[1] * self.cell_pixels) + self.padding + yoff
return x,y
def get_center(self,x,y):
''' get the center of the tile '''
cx = x + self.center
cy = y + self.center
return cx,cy
'''
Draw Functions
'''
def draw_rect(self, canvas_index, width, height, color, x=0, y=0):
''' draw a rectangle of the supplied size and color '''
canvas = self.canvases[canvas_index]
canvas.fill_style = color
canvas.fill_rect(x, y, width, height)
def draw_grid(self,canvas):
''' add dashed lines showing grid '''
# canvas.clear()
canvas.stroke_style = self.grid_color
canvas.line_width = 1
canvas.set_line_dash([4,8])
# draw the grid onto the canvas
for y in range(self.grid.height):
for x in range(self.grid.width):
canvas.stroke_rect(self.cell_pixels * x + self.padding,
self.cell_pixels * y + self.padding,
self.cell_pixels,
self.cell_pixels)
def draw_start(self,canvas):
''' add the start '''
start_x, start_y = self.grid_to_pixels( self.grid.start )
canvas.fill_style = self.start_color
canvas.fill_rect(start_x, start_y, self.cell_pixels, self.cell_pixels)
canvas.fill_style = self.start_text_color
if self.show_start_text:
canvas.text_align = 'left'
canvas.font = 'bold 17px sans-serif'
canvas.fill_text(str("START"), start_x + 5, start_y + 38)
def draw_exit(self, canvas):
''' add the exit '''
end_x, end_y = self.grid_to_pixels( self.grid.end )
canvas.fill_style = self.exit_color
canvas.fill_rect(end_x, end_y, self.cell_pixels, self.cell_pixels)
canvas.fill_style = self.exit_text_color
if self.show_end_text:
canvas.text_align = 'left'
canvas.font = 'bold 20px sans-serif'
canvas.fill_text(str("EXIT"), end_x + 10, end_y + 40)
def draw_border(self,canvas):
''' draw the level border '''
canvas.stroke_style = self.border_color
canvas.line_width = 5
canvas.set_line_dash([0,0])
canvas.stroke_rect(self.padding,
self.padding,
self.width_pixels-(2*self.padding),
self.height_pixels-(2*self.padding))
def draw_maze(self,canvas):
''' draw any maze or walls to the canvas '''
if self.grid.add_maze:
self.grid.maze.write_to_canvas( canvas,
self.grid.height*self.cell_pixels,
self.padding,
color = self.wall_color,
wall_width = self.wall_width)
'''
Puddles
'''
def load_puddle_sprite(self):
' load the puddle sprite image and when loaded callback to split it into individual sprites '
image_path = os.path.join(self.grid.working_directory,'images/big_puddle.png')
self.big_puddle = Image.from_file(image_path)
if self.grid.drawmode == 'colab':
# load a small puddle sprite
image_path = os.path.join(self.grid.working_directory,'images/small_puddle.png')
self.small_puddle = Image.from_file(image_path)
else:
# create a canvas from the big puddle sprite
self.puddle_canvas = Canvas(width=self.cell_pixels, height=self.cell_pixels, sync_image_data=True)
self.puddle_canvas.draw_image( self.big_puddle, 0, 0 )
def draw_puddles(self):
''' draw the list of puddles onto the canvas '''
# test if puddles have been defined
if self.grid.puddles:
canvas = self.canvases[Level.Grid]
with hold_canvas(canvas):
for (x, y), puddle_size in self.grid.puddles:
self.draw_splash( canvas, x, y, puddle_size )
def draw_splash(self,canvas,x,y,puddle_type):
''' draw the specified puddle size at the given location '''
if puddle_type > 0:
# create a puddle canvas, containing the scaled and randomly rotated
# puddle - not current supported on colab
if self.grid.drawmode != 'colab':
# scale the puddle image according to its type (big or small)
scale = puddle_type / 2
# create a new canvas for each splash
splash_canvas = Canvas(width=self.cell_pixels, height=self.cell_pixels)
with hold_canvas(splash_canvas):
pos_x = self.cell_pixels//2
pos_y = self.cell_pixels//2
# Choose a random rotation angle
# (but first set the rotation center with `translate`)
splash_canvas.translate(pos_x, pos_y)
splash_canvas.rotate(uniform(0., pi))
# scale the image
splash_canvas.scale(scale)
# Restore the canvas center
splash_canvas.translate( -pos_x, -pos_y )
# Draw the sprite
splash_canvas.draw_image(self.puddle_canvas, 0, 0)
x_px = x * self.cell_pixels + self.padding
y_px = y * self.cell_pixels + self.padding
if self.grid.drawmode == 'colab':
if puddle_type==1:
canvas.draw_image(self.small_puddle,x_px,y_px,width=self.cell_pixels,height=self.cell_pixels)
else:
canvas.draw_image(self.big_puddle,x_px,y_px,width=self.cell_pixels,height=self.cell_pixels)
else:
canvas.draw_image(splash_canvas,x_px,y_px,width=self.cell_pixels,height=self.cell_pixels)
def draw_compass(self,canvas):
''' draw the compass '''
if self.add_compass:
arrows = Arrows(64,2,length=15,width=5,height=11)
arrows.draw(canvas,
self.width_pixels + 27, 14,
[Direction.North,Direction.West,Direction.South,Direction.East],
center_width = 28 )
canvas.font = 'bold 20px sans-serif'
canvas.fill_text(str("W"), self.width_pixels + 13, 52)
canvas.fill_text(str("N"), self.width_pixels + 49, 18)
canvas.fill_text(str("E"), self.width_pixels + 82, 52)
canvas.fill_text(str("S"), self.width_pixels + 51, 85)
def draw_info_panel(self):
''' add any background color for the info panels '''
if type(self.side_panel) == dict:
# e.g. side_panel':{'width':200,'height':50,'color':'#644242'}
width = self.side_panel.get('width',200)
height = self.side_panel.get('height',self.height_pixels)
color = self.side_panel.get('color','white')
self.draw_rect(Level.Base, width, height, color, x = self.width_pixels, y = 0)
if type(self.bottom_panel) == dict:
# e.g. bottom_panel':{'width':200,'height':50,'color':'#644242'}
width = self.bottom_panel.get('width',self.total_width)
height = self.bottom_panel.get('height',100)
color = self.bottom_panel.get('color','white')
self.draw_rect(Level.Base, width, height, color, x = 0, y = self.height_pixels)
def clear( self, all_info = False ):
''' clear anything currently in the info panels '''
canvas = self.canvases[Level.Overlay]
if self.side_panel is not None:
canvas.clear_rect(self.width_pixels,0,(self.total_width-self.width_pixels),self.total_height)
if self.bottom_panel is not None:
canvas.clear_rect(0,self.height_pixels,self.total_width,(self.total_height-self.height_pixels))
if all_info == True:
canvas.clear()
| |
The metric dimension name.
:vartype name: str
:ivar display_name: The display name for the dimension.
:vartype display_name: str
:ivar to_be_exported_for_shoebox: Whether to export metric to shoebox.
:vartype to_be_exported_for_shoebox: bool
"""
_validation = {
'name': {'readonly': True},
'display_name': {'readonly': True},
'to_be_exported_for_shoebox': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(MetricDimension, self).__init__(**kwargs)
self.name = None
self.display_name = None
self.to_be_exported_for_shoebox = None
class MetricSpecification(msrest.serialization.Model):
"""A metric emitted by service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The metric name.
:vartype name: str
:ivar display_name: The metric display name.
:vartype display_name: str
:ivar display_description: The metric display description.
:vartype display_description: str
:ivar unit: The metric unit. Possible values include: "Bytes", "Count", "Milliseconds".
:vartype unit: str or ~video_analyzer.models.MetricUnit
:ivar aggregation_type: The metric aggregation type. Possible values include: "Average",
"Count", "Total".
:vartype aggregation_type: str or ~video_analyzer.models.MetricAggregationType
:ivar lock_aggregation_type: The metric lock aggregation type. Possible values include:
"Average", "Count", "Total".
:vartype lock_aggregation_type: str or ~video_analyzer.models.MetricAggregationType
:param supported_aggregation_types: Supported aggregation types.
:type supported_aggregation_types: list[str]
:ivar dimensions: The metric dimensions.
:vartype dimensions: list[~video_analyzer.models.MetricDimension]
:ivar enable_regional_mdm_account: Indicates whether regional MDM account is enabled.
:vartype enable_regional_mdm_account: bool
:ivar source_mdm_account: The source MDM account.
:vartype source_mdm_account: str
:ivar source_mdm_namespace: The source MDM namespace.
:vartype source_mdm_namespace: str
:ivar supported_time_grain_types: The supported time grain types.
:vartype supported_time_grain_types: list[str]
"""
_validation = {
'name': {'readonly': True},
'display_name': {'readonly': True},
'display_description': {'readonly': True},
'unit': {'readonly': True},
'aggregation_type': {'readonly': True},
'lock_aggregation_type': {'readonly': True},
'dimensions': {'readonly': True},
'enable_regional_mdm_account': {'readonly': True},
'source_mdm_account': {'readonly': True},
'source_mdm_namespace': {'readonly': True},
'supported_time_grain_types': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'aggregation_type': {'key': 'aggregationType', 'type': 'str'},
'lock_aggregation_type': {'key': 'lockAggregationType', 'type': 'str'},
'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'},
'dimensions': {'key': 'dimensions', 'type': '[MetricDimension]'},
'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'},
'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'},
'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'},
'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(MetricSpecification, self).__init__(**kwargs)
self.name = None
self.display_name = None
self.display_description = None
self.unit = None
self.aggregation_type = None
self.lock_aggregation_type = None
self.supported_aggregation_types = kwargs.get('supported_aggregation_types', None)
self.dimensions = None
self.enable_regional_mdm_account = None
self.source_mdm_account = None
self.source_mdm_namespace = None
self.supported_time_grain_types = None
class NetworkAccessControl(msrest.serialization.Model):
"""Network access control for video analyzer account.
:param integration: Public network access for integration group.
:type integration: ~video_analyzer.models.GroupLevelAccessControl
:param ingestion: Public network access for ingestion group.
:type ingestion: ~video_analyzer.models.GroupLevelAccessControl
:param consumption: Public network access for consumption group.
:type consumption: ~video_analyzer.models.GroupLevelAccessControl
"""
_attribute_map = {
'integration': {'key': 'integration', 'type': 'GroupLevelAccessControl'},
'ingestion': {'key': 'ingestion', 'type': 'GroupLevelAccessControl'},
'consumption': {'key': 'consumption', 'type': 'GroupLevelAccessControl'},
}
def __init__(
self,
**kwargs
):
super(NetworkAccessControl, self).__init__(**kwargs)
self.integration = kwargs.get('integration', None)
self.ingestion = kwargs.get('ingestion', None)
self.consumption = kwargs.get('consumption', None)
class NodeInput(msrest.serialization.Model):
"""Describes an input signal to be used on a pipeline node.
All required parameters must be populated in order to send to Azure.
:param node_name: Required. The name of the upstream node in the pipeline which output is used
as input of the current node.
:type node_name: str
"""
_validation = {
'node_name': {'required': True},
}
_attribute_map = {
'node_name': {'key': 'nodeName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NodeInput, self).__init__(**kwargs)
self.node_name = kwargs['node_name']
class Operation(msrest.serialization.Model):
"""An operation.
All required parameters must be populated in order to send to Azure.
:param name: Required. The operation name.
:type name: str
:param display: The operation display name.
:type display: ~video_analyzer.models.OperationDisplay
:param origin: Origin of the operation.
:type origin: str
:param properties: Operation properties format.
:type properties: ~video_analyzer.models.Properties
:param is_data_action: Whether the operation applies to data-plane.
:type is_data_action: bool
:param action_type: Indicates the action type. Possible values include: "Internal".
:type action_type: str or ~video_analyzer.models.ActionType
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'Properties'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
'action_type': {'key': 'actionType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = kwargs['name']
self.display = kwargs.get('display', None)
self.origin = kwargs.get('origin', None)
self.properties = kwargs.get('properties', None)
self.is_data_action = kwargs.get('is_data_action', None)
self.action_type = kwargs.get('action_type', None)
class OperationCollection(msrest.serialization.Model):
"""A collection of Operation items.
:param value: A collection of Operation items.
:type value: list[~video_analyzer.models.Operation]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
}
def __init__(
self,
**kwargs
):
super(OperationCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class OperationDisplay(msrest.serialization.Model):
"""Operation details.
:param provider: The service provider.
:type provider: str
:param resource: Resource on which the operation is performed.
:type resource: str
:param operation: The operation type.
:type operation: str
:param description: The operation description.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class ParameterDeclaration(msrest.serialization.Model):
"""Single topology parameter declaration. Declared parameters can and must be referenced throughout the topology and can optionally have default values to be used when they are not defined in the pipelines.
All required parameters must be populated in order to send to Azure.
:param name: Required. Name of the parameter.
:type name: str
:param type: Required. Type of the parameter. Possible values include: "String",
"SecretString", "Int", "Double", "Bool".
:type type: str or ~video_analyzer.models.ParameterType
:param description: Description of the parameter.
:type description: str
:param default: The default value for the parameter to be used if the pipeline does not specify
a value.
:type default: str
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'default': {'key': 'default', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ParameterDeclaration, self).__init__(**kwargs)
self.name = kwargs['name']
self.type = kwargs['type']
self.description = kwargs.get('description', None)
self.default = kwargs.get('default', None)
class ParameterDefinition(msrest.serialization.Model):
"""Defines the parameter value of an specific pipeline topology parameter. See pipeline topology parameters for more information.
All required parameters must be populated in order to send to Azure.
:param name: Required. Name of the parameter declared in the pipeline topology.
:type name: str
:param value: Parameter value to be applied on this specific pipeline.
:type value: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ParameterDefinition, self).__init__(**kwargs)
self.name = kwargs['name']
self.value = kwargs.get('value', None)
class PemCertificateList(CertificateSource):
"""A list of PEM formatted certificates.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param certificates: Required. PEM formatted public certificates. One certificate per entry.
:type certificates: list[str]
"""
_validation = {
'type': {'required': True},
'certificates': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'certificates': {'key': 'certificates', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(PemCertificateList, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.PemCertificateList' # type: str
self.certificates = kwargs['certificates']
class PipelineJob(ProxyResource):
"""Pipeline job represents a unique instance of a batch topology, used for offline processing of selected portions of archived content.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param topology_name: Reference to an existing pipeline topology. When activated, this pipeline
job will process content according to the pipeline topology definition.
:type topology_name: str
:param description: An optional description for the pipeline.
:type description: str
:ivar state: Current state of | |
import os
import sys
import sqlite3
import pandas as pd
from matplotlib import pyplot as plt
from multiprocessing import Process, Queue
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utility.setting import db_tick, db_backtest
from utility.static import now, strf_time, strp_time, timedelta_sec, timedelta_day
BATTING = 5000000 # 종목당 배팅금액
TESTPERIOD = 14 # 백테스팅 기간(14일 경우 과거 2주간의 데이터를 백테스팅한다)
TOTALTIME = 198000 # 백테스팅 기간 동안 10시부터 장마감까지의 시간 총합, 단위 초
class BackTester1m:
def __init__(self, q_, code_list_, num_, df_mt_):
self.q = q_
self.code_list = code_list_
self.df_mt = df_mt_
self.gap_ch = num_[0]
self.avg_time = num_[1]
self.gap_sm = num_[2]
self.ch_low = num_[3]
self.dm_low = num_[4]
self.per_low = num_[5]
self.per_high = num_[6]
self.cs_per = num_[7]
self.code = None
self.df = None
self.totalcount = 0
self.totalcount_p = 0
self.totalcount_m = 0
self.totalholdday = 0
self.totaleyun = 0
self.totalper = 0.
self.hold = False
self.buycount = 0
self.buyprice = 0
self.sellprice = 0
self.index = 0
self.indexb = 0
self.indexn = 0
self.ccond = 0
self.csell = 0
self.Start()
def Start(self):
conn = sqlite3.connect(db_tick)
tcount = len(self.code_list)
int_daylimit = int(strf_time('%Y%m%d', timedelta_day(-TESTPERIOD)))
for k, code in enumerate(self.code_list):
self.code = code
self.df = pd.read_sql(f"SELECT * FROM '{code}'", conn)
self.df = self.df.set_index('index')
self.df['직전거래대금'] = self.df['거래대금'].shift(1)
self.df['직전체결강도'] = self.df['체결강도'].shift(1)
self.df['거래대금평균'] = self.df['직전거래대금'].rolling(window=self.avg_time).mean()
self.df['체결강도평균'] = self.df['직전체결강도'].rolling(window=self.avg_time).mean()
self.df['최고체결강도'] = self.df['직전체결강도'].rolling(window=self.avg_time).max()
self.df = self.df.fillna(0)
self.totalcount = 0
self.totalcount_p = 0
self.totalcount_m = 0
self.totalholdday = 0
self.totaleyun = 0
self.totalper = 0.
self.ccond = 0
lasth = len(self.df) - 1
for h, index in enumerate(self.df.index):
if h != 0 and index[:8] != self.df.index[h - 1][:8]:
self.ccond = 0
if int(index[:8]) < int_daylimit or int(index[8:]) <= 100000:
continue
self.index = index
self.indexn = h
if not self.hold and self.BuyTerm():
self.Buy()
elif self.hold and self.SellTerm():
self.Sell()
elif self.hold and (h == lasth or index[:8] != self.df.index[h + 1][:8]):
self.Sell()
self.Report(k + 1, tcount)
conn.close()
def BuyTerm(self):
try:
if self.code not in self.df_mt['거래대금상위100'][self.index]:
self.ccond = 0
else:
self.ccond += 1
except KeyError:
return False
if self.ccond < self.avg_time:
return False
# 전략 비공개
return True
def Buy(self):
if self.df['매도호가1'][self.index] * self.df['매도잔량1'][self.index] >= BATTING:
s1hg = self.df['매도호가1'][self.index]
self.buycount = int(BATTING / s1hg)
self.buyprice = s1hg
else:
s1hg = self.df['매도호가1'][self.index]
s1jr = self.df['매도잔량1'][self.index]
s2hg = self.df['매도호가2'][self.index]
ng = BATTING - s1hg * s1jr
s2jc = int(ng / s2hg)
self.buycount = s1jr + s2jc
self.buyprice = round((s1hg * s1jr + s2hg * s2jc) / self.buycount, 2)
if self.buycount == 0:
return
self.hold = True
self.indexb = self.indexn
self.csell = 0
def SellTerm(self):
if self.df['등락율'][self.index] > 29:
return True
bg = self.buycount * self.buyprice
cg = self.buycount * self.df['현재가'][self.index]
eyun, per = self.GetEyunPer(bg, cg)
# 전략 비공개
return False
def Sell(self):
if self.df['매수잔량1'][self.index] >= self.buycount:
self.sellprice = self.df['매수호가1'][self.index]
else:
b1hg = self.df['매수호가1'][self.index]
b1jr = self.df['매수잔량1'][self.index]
b2hg = self.df['매수호가2'][self.index]
nc = self.buycount - b1jr
self.sellprice = round((b1hg * b1jr + b2hg * nc) / self.buycount, 2)
self.hold = False
self.CalculationEyun()
self.indexb = 0
def CalculationEyun(self):
self.totalcount += 1
bg = self.buycount * self.buyprice
cg = self.buycount * self.sellprice
eyun, per = self.GetEyunPer(bg, cg)
self.totalper = round(self.totalper + per, 2)
self.totaleyun = int(self.totaleyun + eyun)
self.totalholdday += self.indexn - self.indexb
if per > 0:
self.totalcount_p += 1
else:
self.totalcount_m += 1
self.q.put([self.index, self.code, per, eyun])
# noinspection PyMethodMayBeStatic
def GetEyunPer(self, bg, cg):
gtexs = cg * 0.0023
gsfee = cg * 0.00015
gbfee = bg * 0.00015
texs = gtexs - (gtexs % 1)
sfee = gsfee - (gsfee % 10)
bfee = gbfee - (gbfee % 10)
pg = int(cg - texs - sfee - bfee)
eyun = pg - bg
per = round(eyun / bg * 100, 2)
return eyun, per
def Report(self, count, tcount):
if self.totalcount > 0:
plus_per = round((self.totalcount_p / self.totalcount) * 100, 2)
avgholdday = round(self.totalholdday / self.totalcount, 2)
self.q.put([self.code, self.totalcount, avgholdday, self.totalcount_p, self.totalcount_m,
plus_per, self.totalper, self.totaleyun])
totalcount, avgholdday, totalcount_p, totalcount_m, plus_per, totalper, totaleyun = \
self.GetTotal(plus_per, avgholdday)
print(f" 종목코드 {self.code} | 평균보유기간 {avgholdday}초 | 거래횟수 {totalcount}회 | "
f" 익절 {totalcount_p}회 | 손절 {totalcount_m}회 | 승률 {plus_per}% |"
f" 수익률 {totalper}% | 수익금 {totaleyun}원 [{count}/{tcount}]")
else:
self.q.put([self.code, 0, 0, 0, 0, 0., 0., 0])
def GetTotal(self, plus_per, avgholdday):
totalcount = str(self.totalcount)
totalcount = ' ' + totalcount if len(totalcount) == 1 else totalcount
totalcount = ' ' + totalcount if len(totalcount) == 2 else totalcount
avgholdday = str(avgholdday)
avgholdday = ' ' + avgholdday if len(avgholdday.split('.')[0]) == 1 else avgholdday
avgholdday = ' ' + avgholdday if len(avgholdday.split('.')[0]) == 2 else avgholdday
avgholdday = ' ' + avgholdday if len(avgholdday.split('.')[0]) == 3 else avgholdday
avgholdday = avgholdday + '0' if len(avgholdday.split('.')[1]) == 1 else avgholdday
totalcount_p = str(self.totalcount_p)
totalcount_p = ' ' + totalcount_p if len(totalcount_p) == 1 else totalcount_p
totalcount_p = ' ' + totalcount_p if len(totalcount_p) == 2 else totalcount_p
totalcount_m = str(self.totalcount_m)
totalcount_m = ' ' + totalcount_m if len(totalcount_m) == 1 else totalcount_m
totalcount_m = ' ' + totalcount_m if len(totalcount_m) == 2 else totalcount_m
plus_per = str(plus_per)
plus_per = ' ' + plus_per if len(plus_per.split('.')[0]) == 1 else plus_per
plus_per = ' ' + plus_per if len(plus_per.split('.')[0]) == 2 else plus_per
plus_per = plus_per + '0' if len(plus_per.split('.')[1]) == 1 else plus_per
totalper = str(self.totalper)
totalper = ' ' + totalper if len(totalper.split('.')[0]) == 1 else totalper
totalper = ' ' + totalper if len(totalper.split('.')[0]) == 2 else totalper
totalper = ' ' + totalper if len(totalper.split('.')[0]) == 3 else totalper
totalper = totalper + '0' if len(totalper.split('.')[1]) == 1 else totalper
totaleyun = format(self.totaleyun, ',')
if len(totaleyun.split(',')) == 1:
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 1 else totaleyun
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 2 else totaleyun
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 3 else totaleyun
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 4 else totaleyun
elif len(totaleyun.split(',')) == 2:
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 1 else totaleyun
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 2 else totaleyun
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 3 else totaleyun
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 4 else totaleyun
elif len(totaleyun.split(',')) == 3:
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 1 else totaleyun
return totalcount, avgholdday, totalcount_p, totalcount_m, plus_per, totalper, totaleyun
class Total:
def __init__(self, q_, last_, num_, df1_):
super().__init__()
self.q = q_
self.last = last_
self.name = df1_
self.gap_ch = num_[0]
self.avg_time = num_[1]
self.gap_sm = num_[2]
self.ch_low = num_[3]
self.dm_low = num_[4]
self.per_low = num_[5]
self.per_high = num_[6]
self.cs_per = num_[7]
self.Start()
def Start(self):
columns = ['거래횟수', '평균보유기간', '익절', '손절', '승률', '수익률', '수익금']
df_back = pd.DataFrame(columns=columns)
df_tsg = pd.DataFrame(columns=['종목명', 'per', 'ttsg'])
k = 0
while True:
data = self.q.get()
if len(data) == 4:
name = self.name['종목명'][data[1]]
if data[0] in df_tsg.index:
df_tsg.at[data[0]] = df_tsg['종목명'][data[0]] + ';' + name, \
df_tsg['per'][data[0]] + data[2], \
df_tsg['ttsg'][data[0]] + data[3]
else:
df_tsg.at[data[0]] = name, data[2], data[3]
else:
df_back.at[data[0]] = data[1], data[2], data[3], data[4], data[5], data[6], data[7]
k += 1
if k == self.last:
break
if len(df_back) > 0:
tc = df_back['거래횟수'].sum()
if tc != 0:
pc = df_back['익절'].sum()
mc = df_back['손절'].sum()
pper = round(pc / tc * 100, 2)
df_back_ = df_back[df_back['평균보유기간'] != 0]
avghold = round(df_back_['평균보유기간'].sum() / len(df_back_), 2)
avgsp = round(df_back['수익률'].sum() / tc, 2)
tsg = int(df_back['수익금'].sum())
onedaycount = round(tc / TOTALTIME, 4)
onegm = int(BATTING * onedaycount * avghold)
if onegm < BATTING:
onegm = BATTING
tsp = round(tsg / onegm * 100, 4)
text = [self.gap_ch, self.avg_time, self.gap_sm, self.ch_low, self.dm_low,
self.per_low, self.per_high, self.cs_per]
print(f' {text}')
text = f" 종목당 배팅금액 {format(BATTING, ',')}원, 필요자금 {format(onegm, ',')}원, "\
f" 종목출현빈도수 {onedaycount}개/초, 거래횟수 {tc}회, 평균보유기간 {avghold}초,\n 익절 {pc}회, "\
f" 손절 {mc}회, 승률 {pper}%, 평균수익률 {avgsp}%, 수익률합계 {tsp}%, 수익금합계 {format(tsg, ',')}원"
print(text)
conn = sqlite3.connect(db_backtest)
df_back.to_sql(f"{strf_time('%Y%m%d')}_2cm", conn, if_exists='replace', chunksize=1000)
conn.close()
if len(df_tsg) > 0:
df_tsg['체결시간'] = df_tsg.index
df_tsg.sort_values(by=['체결시간'], inplace=True)
df_tsg['ttsg_cumsum'] = df_tsg['ttsg'].cumsum()
df_tsg[['ttsg', 'ttsg_cumsum']] = df_tsg[['ttsg', 'ttsg_cumsum']].astype(int)
conn = sqlite3.connect(db_backtest)
df_tsg.to_sql(f"{strf_time('%Y%m%d')}_2tm", conn, if_exists='replace', chunksize=1000)
conn.close()
df_tsg.plot(figsize=(12, 9), | |
range(18):
# angle = i * 20
# _def_lm.append('r{}_90'.format(angle))
def mw_lutmap_is_valid(lutmap: dict) -> bool:
"""
Test if lutmap obeys schema.
Args:
lutmap
Return:
valid (bool):
"""
# FIXME: make this part of the validator for the LutMap parameter.
for key, value in lutmap.items():
if not isinstance(key, int):
raise TypeError
if value['type'] not in valid_types:
raise ValueError("{} not in {}".format(value['type'],
valid_types))
return True
def theta_to_amp(theta: float, amp180: float):
"""
Convert θ in deg to pulse amplitude based on a reference amp180.
Note that all angles are mapped onto the domain [-180, 180) so that
the minimum possible angle for each rotation is used.
"""
# phase wrapped to [-180, 180)
theta_wrap = ((-theta+180) % 360-180)*-1
amp = theta_wrap/180*amp180
return amp
class Base_MW_LutMan(Base_LutMan):
"""
The base class for the microwave lutman.
Standard microwave pulses are generated based on a lutmap.
- Schema of lutmap.
- important attributes
self._wave_dict
Typical usage flow of the mw-lutmans
1. specify a lutmap that determines what waveforms are used.
2. set some parameters such as mw_amp180
3. generate waveforms -> stored in self._wave_dict
4. upload waveforms
"""
def set_default_lutmap(self):
"""Set the default lutmap for standard microwave drive pulses."""
self.LutMap(default_mw_lutmap.copy())
def set_inspire_lutmap(self):
"""Set the default lutmap for expanded microwave drive pulses."""
self.LutMap(inspire_mw_lutmap.copy())
def codeword_idx_to_parnames(self, cw_idx: int):
"""Convert a codeword_idx to a list of par names for the waveform."""
# the possible channels way of doing this is to make it work both for
# VSM style lutmans and no VSM style lutmans.
possible_channels = ('channel_GI', 'channel_GQ',
'channel_DI', 'channel_DQ',
'channel_I', 'channel_Q')
codewords = ['wave_ch{}_cw{:03}'.format(self[ch](), cw_idx)
for ch in possible_channels if hasattr(self, ch)]
return codewords
def _add_waveform_parameters(self):
# defined here so that the VSM based LutMan can overwrite this
self.wf_func = wf.mod_gauss
self.spec_func = wf.block_pulse
self._add_channel_params()
self.add_parameter('cfg_sideband_mode',
vals=vals.Enum('real-time', 'static'),
initial_value='static',
parameter_class=ManualParameter)
self.add_parameter('mw_amp180', unit='frac', vals=vals.Numbers(-1, 1),
parameter_class=ManualParameter,
initial_value=1.0)
self.add_parameter('mw_amp90_scale',
vals=vals.Numbers(-1, 1),
parameter_class=ManualParameter,
initial_value=0.5)
self.add_parameter('mw_motzoi', vals=vals.Numbers(-2, 2),
parameter_class=ManualParameter,
initial_value=0.0)
self.add_parameter('mw_gauss_width',
vals=vals.Numbers(min_value=1e-9), unit='s',
parameter_class=ManualParameter,
initial_value=4e-9)
self.add_parameter('mw_phi', label='Phase of Rphi pulse',
vals=vals.Numbers(), unit='deg',
parameter_class=ManualParameter,
initial_value=0)
self.add_parameter('spec_length',
vals=vals.Numbers(), unit='s',
parameter_class=ManualParameter,
initial_value=20e-9)
self.add_parameter('spec_amp',
vals=vals.Numbers(), unit='frac',
parameter_class=ManualParameter,
initial_value=1)
# parameters related to timings
self.add_parameter('pulse_delay', unit='s', vals=vals.Numbers(0, 1e-6),
parameter_class=ManualParameter,
initial_value=0)
# square pulse duratio for larger pulses
self.add_parameter('sq_pulse_duration', unit='s', vals=vals.Numbers(0, 1e-6),
parameter_class=ManualParameter,
initial_value=40e-9)
self.add_parameter(
'mw_modulation', vals=vals.Numbers(), unit='Hz',
docstring=('Modulation frequency for qubit driving pulses. Note'
' that when using an AWG with build in modulation this'
' should be set to 0.'),
parameter_class=ManualParameter, initial_value=50.0e6)
self._add_mixer_corr_pars()
self.add_parameter('mw_ef_modulation', vals=vals.Numbers(), unit='Hz',
docstring=('Modulation frequency for driving pulses to the '
'second excited-state.'),
parameter_class=ManualParameter, initial_value=50.0e6)
self.add_parameter('mw_ef_amp180', unit='frac',
docstring=(
'Pulse amplitude for pulsing the ef/12 transition'),
vals=vals.Numbers(-1, 1),
parameter_class=ManualParameter, initial_value=.2)
def _add_mixer_corr_pars(self):
self.add_parameter('mixer_alpha', vals=vals.Numbers(),
parameter_class=ManualParameter,
initial_value=1.0)
self.add_parameter('mixer_phi', vals=vals.Numbers(), unit='deg',
parameter_class=ManualParameter,
initial_value=0.0)
self.add_parameter(
'mixer_apply_predistortion_matrix', vals=vals.Bool(), docstring=(
'If True applies a mixer correction using mixer_phi and '
'mixer_alpha to all microwave pulses using.'),
parameter_class=ManualParameter, initial_value=True)
def _add_channel_params(self):
self.add_parameter('channel_I',
parameter_class=ManualParameter,
vals=vals.Numbers(1, self._num_channels))
self.add_parameter('channel_Q',
parameter_class=ManualParameter,
vals=vals.Numbers(1, self._num_channels))
def generate_standard_waveforms(
self, apply_predistortion_matrix: bool=True):
self._wave_dict = OrderedDict()
if self.cfg_sideband_mode() == 'static':
f_modulation = self.mw_modulation()
else:
f_modulation = 0
# lutmap is expected to obey lutmap mw schema
for idx, waveform in self.LutMap().items():
if waveform['type'] == 'ge':
if waveform['theta'] == 90:
amp = self.mw_amp180()*self.mw_amp90_scale()
elif waveform['theta'] == -90:
amp = - self.mw_amp180() * self.mw_amp90_scale()
else:
amp = theta_to_amp(theta=waveform['theta'],
amp180=self.mw_amp180())
self._wave_dict[idx] = self.wf_func(
amp=amp,
phase=waveform['phi'],
sigma_length=self.mw_gauss_width(),
f_modulation=f_modulation,
sampling_rate=self.sampling_rate(),
motzoi=self.mw_motzoi(),
delay=self.pulse_delay())
elif waveform['type'] == 'ef':
amp = theta_to_amp(theta=waveform['theta'],
amp180=self.mw_ef_amp180())
self._wave_dict[idx] = self.wf_func(
amp=amp,
phase=waveform['phi'],
sigma_length=self.mw_gauss_width(),
f_modulation=self.mw_ef_modulation(),
sampling_rate=self.sampling_rate(),
motzoi=0,
delay=self.pulse_delay())
elif waveform['type'] == 'raw-drag':
self._wave_dict[idx] = self.wf_func(
**waveform["drag_pars"])
elif waveform['type'] == 'spec':
self._wave_dict[idx] = self.spec_func(
amp=self.spec_amp(),
length=self.spec_length(),
sampling_rate=self.sampling_rate(),
delay=0,
phase=0)
elif waveform['type'] == 'square':
# Using a slightly different construction as above
# as the call signatures of these functions is different.
# Apperently the VSM LutMan has both parameters, so make sure
# we detect on the one only available in the VSM. Otherwise, we
# won't get the needed four waveforms.
if 'duration' in waveform.keys():
sq_pulse_duration = waveform['duration']
else:
sq_pulse_duration = self.sq_pulse_duration()
if 'sq_G_amp' in self.parameters:
self._wave_dict[idx] = wf.mod_square_VSM(
amp_G=self.sq_G_amp(), amp_D=self.sq_D_amp(),
length=sq_pulse_duration,#self.mw_gauss_width()*4,
f_modulation=self.mw_modulation() if self.cfg_sideband_mode()!='real-time' else 0,
sampling_rate=self.sampling_rate())
elif 'sq_amp' in self.parameters:
self._wave_dict[idx] = wf.mod_square(
amp=self.sq_amp(), length=sq_pulse_duration,
f_modulation=self.mw_modulation() if self.cfg_sideband_mode()!='real-time' else 0,
phase=0, motzoi=0, sampling_rate=self.sampling_rate())
else:
raise KeyError('Expected parameter "sq_amp" to exist')
else:
raise ValueError
# Add predistortions + test
if (self.mixer_apply_predistortion_matrix()
and apply_predistortion_matrix and self.cfg_sideband_mode != 'real-time'):
self._wave_dict = self.apply_mixer_predistortion_corrections(
self._wave_dict)
return self._wave_dict
def apply_mixer_predistortion_corrections(self, wave_dict):
M = wf.mixer_predistortion_matrix(self.mixer_alpha(),
self.mixer_phi())
for key, val in wave_dict.items():
wave_dict[key] = np.dot(M, val)
return wave_dict
def load_waveform_onto_AWG_lookuptable(self, waveform_name: str,
regenerate_waveforms: bool=False):
if regenerate_waveforms:
self.generate_standard_waveforms()
# FIXME: type mismatch with function parameter, misleading name
if isinstance(waveform_name, int):
cw_idx = waveform_name
else:
raise DeprecationWarning
waveforms = self._wave_dict[cw_idx]
codewords = self.codeword_idx_to_parnames(cw_idx)
for waveform, cw in zip(waveforms, codewords):
self.AWG.get_instr().set(cw, waveform)
def load_phase_pulses_to_AWG_lookuptable(self,
phases=np.arange(0, 360, 20)):
"""
Loads rPhi90 pulses onto the AWG lookuptable.
"""
if (len(phases) > 18):
raise ValueError('max 18 amplitude values can be provided')
lm = self.LutMap()
for i, (phase) in enumerate(phases):
lm[i+9] = {"name": "rPhi90", "theta": 90,
"phi": phase, "type": "ge"}
self.load_waveforms_onto_AWG_lookuptable(regenerate_waveforms=True)
def load_x_pulses_to_AWG_lookuptable(self,
phases=np.arange(0, 360, 20)):
"""
Loads rPhi90 pulses onto the AWG lookuptable.
"""
if (len(phases) > 18):
raise ValueError('max 18 amplitude values can be provided')
lm = self.LutMap()
for i, (phase) in enumerate(phases):
lm[i+9] = {"name": "rPhi90", "theta": phase,
"phi": 0, "type": "ge"}
self.load_waveforms_onto_AWG_lookuptable(regenerate_waveforms=True)
def load_square_waves_to_AWG_lookuptable(self):
"""
Loads square pulses onto the AWG lookuptable.
"""
self.set_default_lutmap()
lm = self.LutMap()
lm[10] = {"name": "square",
"type": "square",
"duration": 1e-6}
lm[11] = {"name": "cw_11",
"type": "square"}
for i in range(12,21):
div = i-12
lm[i] = {"name": "cw_{}".format(i),
"type": "square",
"duration": 40e-9*(i-11)/10}
self.load_waveforms_onto_AWG_lookuptable(regenerate_waveforms=True)
def load_ef_rabi_pulses_to_AWG_lookuptable(self, amps: list=None,
mod_freqs: list=None):
"""
Special loading method that loads (up to) 18 pulses in
order to do a rabi on the ef (1-2) transition.
This method also generates the waveforms.
This method contains several steps
1. determine what ef-pulses to generate
2. generate a LutMap to use and upload the waveforms
3. generate and upload waveforms.
"""
# 1. Determine what ef-pulses to generate
if not isinstance(amps, Iterable) and (mod_freqs is None):
amps = [self.mw_ef_amp180()]
elif len(amps) == 1:
amps = [amps]*len(mod_freqs)
if (len(amps) > 18):
raise ValueError('max 18 amplitude values can be provided')
if mod_freqs is None:
mod_freqs = [self.mw_ef_modulation()]*len(amps)
elif len(mod_freqs) == 1:
mod_freqs = [mod_freqs]*len(amps)
# 2. Generate a LutMap for the ef-pulses
lm = self.LutMap()
for i, (amp, mod_freq) in enumerate(zip(amps, mod_freqs)):
lm[i+9] = {"name": "", "type": "raw-drag",
"drag_pars": {
"amp": amp, "f_modulation": mod_freq,
"sigma_length": self.mw_gauss_width(),
"sampling_rate": self.sampling_rate(),
"motzoi": 0}
}
# 3. generate and upload waveforms
self.load_waveforms_onto_AWG_lookuptable(regenerate_waveforms=True)
class CBox_MW_LutMan(Base_MW_LutMan):
_def_lm = ['I', 'rX180', 'rY180', 'rX90', 'rY90',
'rXm90', 'rYm90', 'rPhi90', 'spec']
# use remaining codewords to set pi/2 gates for various angles
for i in range(18):
angle = i * 20
_def_lm.append('r{}_90'.format(angle))
def __init__(self, name, **kw):
super().__init__(name, **kw)
def _add_channel_params(self):
# CBox channels come in pairs defined in the AWG nr
self.add_parameter('awg_nr', parameter_class=ManualParameter,
initial_value=0, vals=vals.Numbers(0, 2))
def load_waveform_onto_AWG_lookuptable(self, waveform_name: str,
regenerate_waveforms: bool=False):
if regenerate_waveforms:
self.generate_standard_waveforms()
I_wave, Q_wave = self._wave_dict[waveform_name]
codeword = self.LutMap()[waveform_name]
self.AWG.get_instr().set_awg_lookuptable(self.awg_nr(),
codeword, 0, I_wave)
self.AWG.get_instr().set_awg_lookuptable(self.awg_nr(),
codeword, 1, Q_wave)
def set_default_lutmap(self):
"""
Set's the default lutmap for standard microwave drive pulses.
"""
def_lm = self._def_lm
LutMap = OrderedDict()
for cw_idx, cw_key in enumerate(def_lm):
max_cw_cbox = 8
if cw_idx < max_cw_cbox:
LutMap[cw_key] = cw_idx
self.LutMap(LutMap)
class QWG_MW_LutMan(Base_MW_LutMan):
def __init__(self, name, **kw):
self._num_channels = 4
super().__init__(name, **kw)
def _add_channel_params(self):
super()._add_channel_params()
self.add_parameter('channel_amp',
unit='a.u.',
vals=vals.Numbers(-1.8, 1.8),
set_cmd=self._set_channel_amp,
get_cmd=self._get_channel_amp,
docstring=('using the channel amp as additional'
'parameter to allow rabi-type experiments without'
'wave reloading. Should not be using VSM'))
# parameters related to codeword bits
self.add_parameter('bit_shift', unit='', vals=vals.Ints(0, 8),
parameter_class=ManualParameter,
initial_value=0)
self.add_parameter('bit_width', unit='', vals=vals.Ints(0, 8),
parameter_class=ManualParameter,
initial_value=0)
def _add_waveform_parameters(self):
super()._add_waveform_parameters()
# Parameters for a square pulse
self.add_parameter('sq_amp',
unit='frac', vals=vals.Numbers(-1, 1),
parameter_class=ManualParameter,
initial_value=0.5)
def _set_channel_amp(self, val):
AWG = self.AWG.get_instr()
AWG.set('ch{}_amp'.format(self.channel_I()), val)
AWG.set('ch{}_amp'.format(self.channel_Q()), val)
def _get_channel_amp(self):
AWG = self.AWG.get_instr()
val_I = AWG.get('ch{}_amp'.format(self.channel_I()))
val_Q = AWG.get('ch{}_amp'.format(self.channel_Q()))
assert val_Q == val_I
return val_I
def load_waveform_onto_AWG_lookuptable(
self, wave_id: str, regenerate_waveforms: bool=False):
"""
Load a waveform into the AWG.
Args:
wave_id: can be either the "name" of a waveform or
the integer key in self._wave_dict.
regenerate_waveforms | |
j.exceptions.RuntimeError("Not supported on this platform!")
def getVlanTag(self, interface, nicType=None):
"""Get VLan tag on the specified interface and vlan type"""
if nicType is None:
nicType = j.sal.nettools.getNicType(interface)
if nicType == "INFINIBAND" or nicType == "ETHERNET_GB" or nicType == "VIRTUAL":
return "0"
if j.core.platformtype.myplatform.platform_is_linux:
# check if its a vlan
vlanfile = "/proc/net/vlan/%s" % (interface)
if j.sal.fs.exists(vlanfile):
return j.sal.nettools.getVlanTagFromInterface(interface)
bridgefile = "/sys/class/net/%s/brif/" % (interface)
for brif in j.sal.fs.listDirsInDir(bridgefile):
brif = j.sal.fs.getBaseName(brif)
vlanfile = "/proc/net/vlan/%s" % (brif)
if j.sal.fs.exists(vlanfile):
return j.sal.nettools.getVlanTagFromInterface(brif)
return "0"
elif j.core.platformtype.myplatform.platform_is_osx or j.core.platformtype.myplatform.isWindows:
return j.sal.nettools.getVlanTagFromInterface(interface)
else:
raise j.exceptions.RuntimeError("Not supported on this platform!")
def getVlanTagFromInterface(self, interface):
"""Get vlan tag from interface
@param interface: string interface to get vlan tag on
@rtype: integer representing the vlan tag
"""
if j.core.platformtype.myplatform.platform_is_linux:
vlanfile = "/proc/net/vlan/%s" % (interface)
if j.sal.fs.exists(vlanfile):
content = j.sal.fs.readFile(vlanfile)
match = re.search("^%s\s+VID:\s+(?P<vlantag>\d+)\s+.*$" % (interface), content, re.MULTILINE)
if match:
return match.group("vlantag")
else:
raise ValueError("Could not find vlantag for interface %s" % (interface))
else:
raise ValueError("This is not a vlaninterface %s" % (interface))
elif j.core.platformtype.myplatform.platform_is_osx:
# work with interfaces which are subnetted on vlans eq e1000g5000:1
interface = interface.split(":")[0]
match = re.search("^\w+?(?P<interfaceid>\d+)$", interface, re.MULTILINE)
if not match:
raise ValueError("This is not a vlaninterface %s" % (interface))
return int(match.group("interfaceid")) / 1000
elif j.core.platformtype.myplatform.isWindows:
import wmi
vir = wmi.WMI(namespace="virtualization")
mac = j.sal.nettools.getMacAddress(interface)
mac = mac.replace(":", "")
dynFor = vir.Msvm_DynamicForwardingEntry(elementname=mac)
return dynFor[0].VlanId if dynFor else 0
def getReachableIpAddress(self, ip, port):
"""Returns the first local ip address that can connect to the specified ip on the specified port"""
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect((ip, port))
except BaseException:
raise j.exceptions.RuntimeError("Cannot connect to %s:%s, check network configuration" % (ip, port))
return s.getsockname()[0]
def getDefaultIPConfig(self):
ipaddr = self.getReachableIpAddress("8.8.8.8", 22)
for item in j.sal.nettools.getNetworkInfo():
for ipaddr2 in item["ip"]:
# print "%s %s"%(ipaddr2,ipaddr)
if str(ipaddr) == str(ipaddr2):
return item["name"], ipaddr
def bridgeExists(self, bridgename):
if j.core.platformtype.myplatform.platform_is_osx:
cmd = "ifconfig bridge0"
rc, out, err = j.sal.process.execute(cmd, showout=False)
if bridgename in out:
return True
else:
cmd = "brctl show"
rc, out, err = j.sal.process.execute(cmd, showout=False)
for line in out.split("\n"):
if line.lower().startswith(bridgename):
return True
return False
def resetDefaultGateway(self, gw):
def gwexists():
if j.core.platformtype.myplatform.platform_is_osx:
cmd = "netstat -r"
else:
cmd = "ip r"
rc, out, err = j.sal.process.execute(cmd, showout=False)
for line in out.split("\n"):
if line.lower().startswith("default"):
return True
return False
def removegw():
if j.core.platformtype.myplatform.platform_is_osx:
cmd = "route -n delete default"
else:
cmd = "ip route del 0/0"
rc, out, err = j.sal.process.execute(cmd, showout=False, die=False)
removegw()
couter = 0
while gwexists():
removegw()
time.sleep(1)
self._log_debug("try to delete def gw")
counter += 1
if counter > 10:
raise j.exceptions.RuntimeError("cannot delete def gw")
if j.core.platformtype.myplatform.platform_is_osx:
cmd = "route add default %s" % gw
else:
cmd = "route add default gw %s" % gw
j.sal.process.execute(cmd)
return "added %s default gw" % gw
def _linux_networkinfo_get(self, device=None):
"""
returns network info like
[{'cidr': 8, 'ip': ['127.0.0.1'], 'mac': '00:00:00:00:00:00', 'name': 'lo'},
{'cidr': 24,
'ip': ['192.168.0.105'],
'mac': '80:ee:73:a9:19:05',
'name': 'enp2s0'},
{'cidr': 0, 'ip': [], 'mac': '80:ee:73:a9:19:06', 'name': 'enp3s0'},
{'cidr': 16,
'ip': ['172.17.0.1'],
'mac': '02:42:97:63:e6:ba',
'name': 'docker0'}]
:param device: device name, defaults to None
:type device: str, optional
:raises RuntimeError: if the platform isn't implemented
:return: network info
:rtype: list or dict if device is specified
"""
IPBLOCKS = re.compile("(^|\n)(?P<block>\d+:.*?)(?=(\n\d+)|$)", re.S)
IPMAC = re.compile("^\s+link/\w+\s+(?P<mac>(\w+:){5}\w{2})", re.M)
IPIP = re.compile(r"\s+?inet\s(?P<ip>(\d+\.){3}\d+)/(?P<cidr>\d+)", re.M)
IPNAME = re.compile("^\d+: (?P<name>.*?)(?=:)", re.M)
def block_parse(block):
result = {"ip": [], "ip6": [], "cidr": [], "mac": "", "name": ""}
for rec in (IPMAC, IPNAME):
match = rec.search(block)
if match:
result.update(match.groupdict())
for mrec in (IPIP,):
for m in mrec.finditer(block):
for key, value in list(m.groupdict().items()):
result[key].append(value)
_, IPV6, _ = j.sal.process.execute("ifconfig %s | awk '/inet6/{print $2}'" % result["name"], showout=False)
for ipv6 in IPV6.split("\n"):
result["ip6"].append(ipv6)
if j.data.types.list.check(result["cidr"]):
if len(result["cidr"]) == 0:
result["cidr"] = 0
else:
result["cidr"] = int(result["cidr"][0])
return result
def networkinfo_get():
_, output, _ = j.sal.process.execute("ip a", showout=False)
for m in IPBLOCKS.finditer(output):
block = m.group("block")
yield block_parse(block)
res = []
for nic in networkinfo_get():
if nic["name"] == device:
return nic
res.append(nic)
if device is not None:
raise j.exceptions.RuntimeError("could not find device")
return res
def networkinfo_get(self, device=None):
"""
Get network info
[{'cidr': 8, 'ip': ['127.0.0.1'], 'mac': '00:00:00:00:00:00', 'name': 'lo'},
{'cidr': 24,
'ip': ['192.168.0.105'],
'ip6': ['...','...],
'mac': '80:ee:73:a9:19:05',
'name': 'enp2s0'},
{'cidr': 0, 'ip': [], 'mac': '80:ee:73:a9:19:06', 'name': 'enp3s0'},
{'cidr': 16,
'ip': ['172.17.0.1'],
'mac': '02:42:97:63:e6:ba',
'name': 'docker0'}]
:param device: device name, defaults to None
:type device: str, optional
:raises RuntimeError: if the platform isn't implemented
:return: network info
:rtype: list or dict if device is specified
"""
# @TODO: change for windows
# @TODO: use caching feature from jumpscale to keep for e.g. 1 min,
# if not usecache needs to reset cache to make sure we load again
if j.core.platformtype.myplatform.platform_is_linux:
return self._linux_networkinfo_get(device)
else:
raise RuntimeError("not implemented")
def getIpAddress(self, interface):
"""Return a list of ip addresses and netmasks assigned to this interface"""
# TODO: use getNetworkInfo to return info
if j.core.platformtype.myplatform.platform_is_linux or j.core.platformtype.myplatform.platform_is_osx:
output = list()
output = j.builders.system.net.getInfo()
result = {"ip": [], "ip6": []}
for nic in output:
if nic["name"] == interface:
result["ip"].append(nic["ip"])
result["ip6"].append(nic["ip6"])
return result
elif j.core.platformtype.myplatform.isWindows:
import wmi
ipv4Pattern = "^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$"
w = wmi.WMI()
NICIndex = interface.split(":")[0]
nic = w.Win32_NetworkAdapterConfiguration(index=NICIndex)[0]
result = []
if nic.IPAddress:
for x in range(0, len(nic.IPAddress)):
# skip IPv6 addresses for now
if re.match(ipv4Pattern, str(nic.IPAddress[x])) is not None:
result.append([str(nic.IPAddress[x]), str(nic.IPSubnet[x]), ""])
return result
else:
raise j.exceptions.RuntimeError("j.sal.nettools.getIpAddress not supported on this platform")
def getMacAddress(self, interface):
"""Return the MAC address of this interface"""
if interface not in self.getNics():
raise LookupError("Interface %s not found on the system" % interface)
if j.core.platformtype.myplatform.platform_is_linux or j.core.platformtype.myplatform.platform_is_osx:
output = list()
output = j.builders.system.net.getInfo()
result = list()
for nic in output:
if nic["name"] == interface:
result.append(nic["mac"])
break
return result
elif j.core.platformtype.myplatform.isSolaris():
# check if interface is a logical inteface ex: bge0:1
tokens = interface.split(":")
if len(tokens) > 1:
interface = tokens[0]
command = "ifconfig %s" % interface
exitcode, output, err = j.sal.process.execute(command, showout=False, die=False)
if exitcode != 0:
# temporary plumb the interface to lookup its mac
self._log_warning(
"Interface %s is down. Temporarily plumbing it to be able to lookup its MAC address" % interface
)
j.sal.process.execute("%s plumb" % command, showout=False)
exitcode, output, err = j.sal.process.execute(command, showout=False, die=False)
j.sal.process.execute("%s unplumb" % command, showout=False)
if exitcode == 0:
match = re.search(r"^\s*(ipib|ether)\s*(?P<mac>\S*)", output, re.MULTILINE)
if match:
return self.pm_formatMacAddress(match.group("mac"))
return None
elif j.core.platformtype.myplatform.isWindows:
import wmi
w = wmi.WMI()
NICIndex = interface.split(":")[0]
return str(w.Win32_NetworkAdapterConfiguration(index=NICIndex)[0].MACAddress)
else:
raise j.exceptions.RuntimeError("j.sal.nettools.getMacAddress not supported on this platform")
def pm_formatMacAddress(self, macaddress):
macpieces = macaddress.strip().split(":")
mac = ""
for piece in macpieces:
if len(piece) == 1:
mac += "0"
mac += piece + ":"
mac = mac[:-1]
return mac
def isIpLocal(self, ipaddress):
if ipaddress == "127.0.0.1" or ipaddress == "localhost":
return True
return ipaddress in self.getIpAddresses()["ip"]
def isIpInDifferentNetwork(self, ipaddress):
for netinfo in self.getNetworkInfo():
if netinfo["ip"]:
if j.core.platformtype.myplatform.platform_is_linux:
if ipaddress in netaddr.IPNetwork("{}/{}".format(netinfo["ip"][0], netinfo["cidr"])):
return False
elif j.core.platformtype.myplatform.platform_is_osx:
if ipaddress in netaddr.IPNetwork("{}/{}".format(netinfo["ip"][0], netinfo["cidr"][0])):
return False
return True
def getMacAddressForIp(self, ipaddress):
"""Search the MAC address of the given IP address in the ARP table
@param ipaddress: IP address of the machine
@rtype: string
@return: The MAC address corresponding with the given IP
@raise: RuntimeError if no MAC found for IP or if platform is not suppported
"""
if j.core.platformtype.myplatform.platform_is_linux or j.core.platformtype.myplatform.platform_is_osx:
IpAdress = list()
IpAdress.append(ipaddress)
output = list()
output = j.builders.system.net.getInfo()
result = list()
for nic in output:
if nic["ip"] == IpAdress:
result.append(nic["mac"])
return result
elif nic["ip6"] == IpAdress:
result.append(nic["mac"])
return result
return "no MAC found for %s" % ipaddress
else:
raise j.exceptions.RuntimeError("j.sal.nettools.getMacAddressForIp not supported on this platform")
def getHostname(self):
"""Get hostname of the machine
"""
return socket.gethostname()
def isNicConnected(self, interface):
if j.core.platformtype.myplatform.platform_is_linux:
carrierfile = "/sys/class/net/%s/carrier" % (interface)
if not j.sal.fs.exists(carrierfile):
return False
try:
return int(j.sal.fs.readFile(carrierfile)) != 0
except IOError:
return False
elif j.core.platformtype.myplatform.platform_is_osx:
command = "dladm show-dev -p -o STATE %s" % interface
expectResults = ["up", "unknown"]
exitcode, output, err = j.sal.process.execute(command, die=False, showout=False)
if exitcode != 0:
return False
output = output.strip()
if output in expectResults:
return True
else:
return False
def getHostByName(self, dnsHostname):
| |
<gh_stars>1-10
#
# Bindings.py -- Bindings classes for Ginga FITS viewer.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
import math
import os.path
import itertools
import numpy as np
from ginga.misc import Bunch, Settings, Callback
from ginga import trcalc
from ginga import cmap, imap
from ginga.util.paths import icondir
class ImageViewBindings(object):
"""
Mouse Operation and Bindings
"""
def __init__(self, logger, settings=None):
super(ImageViewBindings, self).__init__()
self.logger = logger
self.canpan = False
self.canzoom = False
self.cancut = False
self.cancmap = False
self.canflip = False
self.canrotate = False
# For panning
self._pantype = 1
self._start_x = None
self._start_y = None
self._start_panx = 0
self._start_pany = 0
self._start_scale_x = 0
self._start_scale_y = 0
self._start_rot = 0
self._save = {}
if settings is None:
# No settings passed. Set up defaults.
settings = Settings.SettingGroup(name='bindings',
logger=self.logger)
self.initialize_settings(settings)
self.settings = settings
self.features = dict(
# name, attr pairs
pan='canpan', zoom='canzoom', cuts='cancut', cmap='cancmap',
flip='canflip', rotate='canrotate')
self.cursor_map = {}
def initialize_settings(self, settings):
settings.add_settings(
# You should rarely have to change these.
btn_nobtn=0x0,
btn_left=0x1,
btn_middle=0x2,
btn_right=0x4,
btn_back=0x8,
btn_forward=0x10,
# define our cursors
## cur_pick = 'thinCrossCursor',
## cur_pan = 'openHandCursor',
# Set up our standard modifiers
mod_shift=['shift_l', 'shift_r'],
mod_ctrl=['control_l', 'control_r'],
mod_win=['meta_right'],
# Define our modes
# Mode 'meta' is special: it is an intermediate mode that
# is used primarily to launch other modes
# If the mode initiation character is preceeded by a double
# underscore, then the mode must be initiated from the "meta"
# mode.
dmod_meta=['space', None, None],
dmod_draw=['__b', None, None],
dmod_cmap=['__y', None, None],
dmod_cuts=['__s', None, None],
dmod_dist=['__d', None, None],
dmod_contrast=['__t', None, None],
dmod_rotate=['__r', None, None],
dmod_pan=['__q', None, 'pan'],
dmod_freepan=['__w', None, 'pan'],
dmod_camera=['__c', None, 'pan'],
dmod_naxis=['__n', None, None],
default_mode_type='locked',
default_lock_mode_type='softlock',
# KEYBOARD
kp_zoom_in=['+', '='],
kp_zoom_out=['-', '_'],
kp_zoom=['1', '2', '3', '4', '5', '6', '7', '8', '9', '0'],
kp_zoom_inv=['!', '@', '#', '$', '%', '^', '&', '*', '(', ')'],
kp_zoom_fit=['backquote', 'pan+backquote', 'freepan+backquote'],
kp_autozoom_toggle=['doublequote', 'pan+doublequote'],
kp_autozoom_override=['singlequote', 'pan+singlequote'],
kp_dist_reset=['D', 'dist+D'],
kp_dist_prev=['dist+up', 'dist+b'],
kp_dist_next=['dist+down', 'dist+n'],
kp_pan_set=['p', 'pan+p', 'freepan+p'],
kp_pan_zoom_set=['pan+1', 'freepan+1'],
kp_pan_zoom_save=['pan+z', 'freepan+z'],
kp_pan_left=['pan+*+left', 'freepan+*+left'],
kp_pan_right=['pan+*+right', 'freepan+*+right'],
kp_pan_up=['pan+*+up', 'freepan+*+up'],
kp_pan_down=['pan+*+down', 'freepan+*+down'],
kp_pan_home=['pan+*+home', 'freepan+*+home'],
kp_pan_end=['pan+*+end', 'freepan+*+end'],
kp_pan_page_up=['pan+*+page_up', 'freepan+*+page_up'],
kp_pan_page_down=['pan+*+page_down', 'freepan+*+page_down'],
kp_pan_px_xminus=['shift+left'],
kp_pan_px_xplus=['shift+right'],
kp_pan_px_yminus=['shift+down'],
kp_pan_px_yplus=['shift+up'],
kp_pan_px_center=['shift+home'],
kp_center=['c', 'pan+c', 'freepan+c'],
kp_cut_255=['cuts+A'],
kp_cut_minmax=['cuts+S'],
kp_cut_auto=['a', 'cuts+a'],
kp_autocuts_alg_prev=['cuts+up', 'cuts+b'],
kp_autocuts_alg_next=['cuts+down', 'cuts+n'],
kp_autocuts_toggle=[':', 'cuts+:'],
kp_autocuts_override=[';', 'cuts+;'],
kp_autocenter_toggle=['?', 'pan+?'],
kp_autocenter_override=['/', 'pan+/'],
kp_contrast_restore=['T', 'contrast+T'],
kp_cmap_reset=['Y', 'cmap+Y'],
kp_cmap_restore=['cmap+r'],
kp_cmap_invert=['I', 'cmap+I'],
kp_cmap_prev=['cmap+up', 'cmap+b'],
kp_cmap_next=['cmap+down', 'cmap+n'],
kp_toggle_cbar=['cmap+c'],
kp_imap_reset=['cmap+i'],
kp_imap_prev=['cmap+left', 'cmap+j'],
kp_imap_next=['cmap+right', 'cmap+k'],
kp_flip_x=['[', '{', 'rotate+[', 'rotate+{'],
kp_flip_y=[']', '}', 'rotate+]', 'rotate+}'],
kp_swap_xy=['backslash', '|', 'rotate+backslash', 'rotate+|'],
kp_rotate_reset=['R', 'rotate+R'],
kp_save_profile=['S'],
kp_rotate_inc90=['.', 'rotate+.'],
kp_rotate_dec90=[',', 'rotate+,'],
kp_orient_lh=['o', 'rotate+o'],
kp_orient_rh=['O', 'rotate+O'],
kp_poly_add=['v', 'draw+v'],
kp_poly_del=['z', 'draw+z'],
kp_edit_del=['draw+x'],
kp_reset=['escape'],
kp_lock=['L', 'meta+L'],
kp_softlock=['l', 'meta+l'],
kp_camera_save=['camera+s'],
kp_camera_reset=['camera+r'],
kp_camera_toggle3d=['camera+3'],
# pct of a window of data to move with pan key commands
key_pan_pct=0.666667,
# amount to move (in pixels) when using key pan arrow
key_pan_px_delta=1.0,
# SCROLLING/WHEEL
sc_pan=['ctrl+scroll'],
sc_pan_fine=['pan+shift+scroll'],
sc_pan_coarse=['pan+ctrl+scroll'],
sc_zoom=['scroll', 'freepan+scroll'],
sc_zoom_fine=[],
sc_zoom_coarse=[],
sc_zoom_origin=['shift+scroll', 'freepan+shift+scroll'],
sc_cuts_fine=['cuts+ctrl+scroll'],
sc_cuts_coarse=['cuts+scroll'],
sc_cuts_alg=[],
sc_dist=['dist+scroll'],
sc_cmap=['cmap+scroll'],
sc_imap=['cmap+ctrl+scroll'],
sc_camera_track=['camera+scroll'],
sc_naxis=['naxis+scroll'],
scroll_pan_acceleration=1.0,
# 1.0 is appropriate for a mouse, 0.1 for most trackpads
scroll_zoom_acceleration=1.0,
#scroll_zoom_acceleration=0.1,
scroll_zoom_direct_scale=False,
mouse_zoom_acceleration=1.085,
mouse_rotate_acceleration=0.75,
pan_reverse=False,
pan_multiplier=1.0,
pan_min_scroll_thumb_pct=0.0,
pan_max_scroll_thumb_pct=0.9,
zoom_scroll_reverse=False,
# MOUSE/BUTTON
ms_none=['nobtn'],
ms_cursor=['left'],
ms_wheel=[],
ms_draw=['draw+left', 'win+left', 'right'],
ms_rotate=['rotate+left'],
ms_rotate_reset=['rotate+right'],
ms_contrast=['contrast+left', 'ctrl+right'],
ms_contrast_restore=['contrast+right', 'ctrl+middle'],
ms_pan=['pan+left', 'ctrl+left'],
ms_zoom=['pan+right'],
ms_freepan=['freepan+middle'],
ms_zoom_in=['freepan+left'],
ms_zoom_out=['freepan+right', 'freepan+ctrl+left'],
ms_cutlo=['cuts+shift+left'],
ms_cuthi=['cuts+ctrl+left'],
ms_cutall=['cuts+left'],
ms_cut_auto=['cuts+right'],
ms_panset=['pan+middle', 'shift+left', 'middle'],
ms_cmap_rotate=['cmap+left'],
ms_cmap_restore=['cmap+right'],
ms_camera_orbit=['camera+left'],
ms_camera_pan_delta=['camera+right'],
ms_naxis=['naxis+left'],
# GESTURES (some backends only)
pi_zoom=['pinch'],
pi_zoom_origin=['shift+pinch'],
pa_pan=['pan'],
pa_zoom=['freepan+pan'],
pa_zoom_origin=['freepan+shift+pan'],
pa_naxis=['naxis+pan'],
pinch_actions=['zoom'],
pinch_zoom_acceleration=1.0,
pinch_rotate_acceleration=1.0,
pan_pan_acceleration=1.0,
# No messages for following operations:
msg_panset=False,
)
def get_settings(self):
return self.settings
def window_map(self, viewer):
self.to_default_mode(viewer)
def set_bindings(self, viewer):
viewer.add_callback('map', self.window_map)
bindmap = viewer.get_bindmap()
bindmap.clear_button_map()
bindmap.clear_event_map()
bindmap.add_callback('mode-set', self.mode_set_cb, viewer)
# Set up bindings
self.setup_settings_events(viewer, bindmap)
def set_mode(self, viewer, name, mode_type='oneshot'):
bindmap = viewer.get_bindmap()
bindmap.set_mode(name, mode_type=mode_type)
def mode_set_cb(self, bm, mode, mode_type, viewer):
cursor_name = self.cursor_map.get(mode, 'pick')
viewer.switch_cursor(cursor_name)
def parse_combo(self, combo, modes_set, modifiers_set, pfx):
"""
Parse a string into a mode, a set of modifiers and a trigger.
"""
mode, mods, trigger = None, set([]), combo
if '+' in combo:
if combo.endswith('+'):
# special case: probably contains the keystroke '+'
trigger, combo = '+', combo[:-1]
if '+' in combo:
items = set(combo.split('+'))
else:
items = set(combo)
else:
# trigger is always specified last
items = combo.split('+')
trigger, items = items[-1], set(items[:-1])
if '*' in items:
items.remove('*')
# modifier wildcard
mods = '*'
else:
mods = items.intersection(modifiers_set)
mode = items.intersection(modes_set)
if len(mode) == 0:
mode = None
else:
mode = mode.pop()
if pfx is not None:
trigger = pfx + trigger
return (mode, mods, trigger)
def setup_settings_events(self, viewer, bindmap):
d = self.settings.get_dict()
if len(d) == 0:
self.initialize_settings(self.settings)
d = self.settings.get_dict()
# First scan settings for buttons and modes
bindmap.clear_button_map()
bindmap.clear_modifier_map()
bindmap.clear_mode_map()
bindmap.clear_event_map()
mode_type = self.settings.get('default_mode_type', 'oneshot')
bindmap.set_default_mode_type(mode_type)
for name, value in d.items():
if name.startswith('mod_'):
modname = name[4:]
for combo in value:
# NOTE: for now no chorded combinations to make modifiers
keyname = combo
bindmap.add_modifier(keyname, modname)
elif name.startswith('cur_'):
curname = name[4:]
self.add_cursor(viewer, curname, value)
elif name.startswith('btn_'):
btnname = name[4:]
bindmap.map_button(value, btnname)
elif name.startswith('dmod_'):
mode_name = name[5:]
keyname, mode_type, curname = value
bindmap.add_mode(keyname, mode_name, mode_type=mode_type,
msg=None)
if curname is not None:
self.cursor_map[mode_name] = curname
self.merge_actions(viewer, bindmap, self, d.items())
def merge_actions(self, viewer, bindmap, obj, tups):
modes_set = bindmap.get_modes()
modifiers_set = bindmap.get_modifiers()
# Add events
for name, value in tups:
if len(name) <= 3:
continue
pfx = name[:3]
if pfx not in ('kp_', 'ms_', 'sc_', 'gs_', 'pi_', 'pa_'):
continue
evname = name[3:]
for combo in value:
mode, modifiers, trigger = self.parse_combo(combo, modes_set,
modifiers_set, pfx)
if modifiers == '*':
# wildcard; register for all modifier combinations
modifiers_poss = set([])
for i in range(len(modifiers_set) + 1):
modifiers_poss = modifiers_poss.union(
itertools.combinations(modifiers_set, i))
for modifiers in modifiers_poss:
bindmap.map_event(mode, modifiers, trigger, evname)
else:
bindmap.map_event(mode, modifiers, trigger, evname)
# Register for this symbolic event if we have a handler for it
try:
cb_method = getattr(obj, name)
except AttributeError:
# Do we need a warning here?
#self.logger.warning("No method found matching '%s'" % (name))
cb_method = None
if pfx == 'kp_':
# keyboard event
event = 'keydown-%s' % (evname)
viewer.enable_callback(event)
if cb_method:
viewer.add_callback(event, cb_method)
elif pfx == 'ms_':
# mouse/button event
for action in ('down', 'move', 'up'):
event = '%s-%s' % (evname, action)
viewer.enable_callback(event)
if cb_method:
viewer.add_callback(event, cb_method)
elif pfx == 'sc_':
# scrolling event
event = '%s-scroll' % evname
viewer.enable_callback(event)
if cb_method:
viewer.add_callback(event, cb_method)
elif pfx == 'pi_':
# pinch event
event = '%s-pinch' % evname
viewer.enable_callback(event)
if cb_method:
viewer.add_callback(event, cb_method)
elif pfx == 'pa_':
# pan event
event = '%s-pan' % evname
viewer.enable_callback(event)
if cb_method:
viewer.add_callback(event, cb_method)
elif pfx == 'gs_':
# for backward compatibility
self.logger.warning("'gs_' bindings will be deprecated in a future "
"version--please update your bindings.cfg")
viewer.set_callback(evname, cb_method)
def reset(self, viewer):
bindmap = viewer.get_bindmap()
bindmap.reset_mode(viewer)
viewer.onscreen_message(None)
def add_cursor(self, viewer, curname, curpath):
if not curpath.startswith('/'):
curpath = os.path.join(icondir, curpath)
cursor = viewer.make_cursor(curpath, 8, 8)
viewer.define_cursor(curname, cursor)
##### ENABLERS #####
# These methods are a quick way to enable or disable certain user
# interface features in a ImageView window
def enable_pan(self, tf):
"""Enable the image to be panned interactively (True/False)."""
self.canpan = tf
def enable_zoom(self, tf):
"""Enable the image to be zoomed interactively (True/False)."""
self.canzoom = tf
def enable_cuts(self, tf):
"""Enable the cuts levels to be set interactively (True/False)."""
self.cancut = tf
def enable_cmap(self, tf):
"""Enable the color map to be warped interactively (True/False)."""
self.cancmap = tf
def enable_flip(self, tf):
"""Enable the image to be flipped interactively (True/False)."""
self.canflip = tf
def enable_rotate(self, tf):
"""Enable the image to be rotated interactively (True/False)."""
self.canrotate = tf
def enable(self, **kwdargs):
"""
General enable function encompassing all user interface features.
Usage (e.g.):
viewer.enable(rotate=False, flip=True)
"""
for feat, value in kwdargs:
feat = feat.lower()
if feat not in self.features:
raise ValueError("'%s' is not a feature. Must be one of %s" % (
feat, str(self.features)))
attr = self.features[feat]
setattr(self, attr, bool(value))
def enable_all(self, tf):
for feat, attr in self.features.items():
setattr(self, attr, bool(tf))
##### Help methods #####
# Methods used by the callbacks to do actions.
def get_new_pan(self, viewer, win_x, win_y, ptype=1):
if ptype == 1:
# This is a "free pan", similar to dragging the "lens"
# over the canvas.
xy_mn, xy_mx = viewer.get_limits()
| |
import networkx as nx
from math import inf as INFINITY
test_input1 = """#######
#E..G.#
#...#.#
#.G.#G#
#######"""
test_input2 = """#######
#.E...#
#.....#
#...G.#
#######"""
test_move1 = """#########
#G..G..G#
#.......#
#.......#
#G..E..G#
#.......#
#.......#
#G..G..G#
#########"""
test_move2 = """#########
#.G...G.#
#...G...#
#...E..G#
#.G.....#
#.......#
#G..G..G#
#.......#
#########"""
test_move3 = """#########
#..G.G..#
#...G...#
#.G.E.G.#
#.......#
#G..G..G#
#.......#
#.......#
#########"""
test_move4 = """#########
#.......#
#..GGG..#
#..GEG..#
#G..G...#
#......G#
#.......#
#.......#
#########"""
test_attack1 = """G....
..G..
..EG.
..G..
...G."""
test_attack_move1 = """#######
#.G...#
#...EG#
#.#.#G#
#..G#E#
#.....#
#######"""
test_attack_move2_hps = (200, 197, 197, 200, 197, 197)
test_attack_move2 = """#######
#..G..#
#...EG#
#.#G#G#
#...#E#
#.....#
#######"""
test_attack_move3_hps = (200, 200, 188, 194, 194, 194)
test_attack_move3 = """#######
#...G.#
#..GEG#
#.#.#G#
#...#E#
#.....#
#######"""
test_attack_move23_hps = (200, 200, 131, 131, 131)
test_attack_move23 = """#######
#...G.#
#..G.G#
#.#.#G#
#...#E#
#.....#
#######"""
test_attack_move47_hps = (200, 131, 59, 200)
test_attack_move47 = """#######
#G....#
#.G...#
#.#.#G#
#...#.#
#....G#
#######"""
FOUR_DIRS = [[1, 0], [-1, 0], [0, 1], [0, -1]]
# sort by TRC coords when t=('G', 1, 3, 200)
SORT_BY_TRC = lambda t: t[0:3]
def parse_input(lines):
game_map = set()
creatures = []
for r, row in enumerate(lines):
row = row.strip()
for c, tile in enumerate(row):
if tile in 'GE.':
game_map.add((r, c))
if tile in 'GE':
creatures.append((tile, r, c, 200))
return creatures, game_map
# unit tests parse_input
creatures, game_map = parse_input(test_input1.split())
assert all([('E', 1, 1, 200) in creatures, ('G', 1, 4, 200) in creatures, ('G', 3, 2, 200) in creatures])
def in_range(creature, creatures, game_map):
creature_type, R, C, hp = creature
occupied_cells = [(r, c) for (t, r, c, hp) in creatures]
enemies = [c for c in creatures if c[0] != creature_type]
cells = []
for enemy in enemies:
r, c = enemy[1], enemy[2]
for dr, dc in FOUR_DIRS:
rc = (r + dr, c + dc)
if rc not in occupied_cells:
if rc in game_map:
cells.append(rc)
return cells
# unit tests in_range
creatures, game_map = parse_input(test_input1.split())
cells_in_range = in_range(('E', 1, 1, 200), creatures, game_map)
assert all([rc in cells_in_range for rc in [(1, 3), (1, 5), (2, 2), (3, 1), (3, 3), (2, 5)]])
cells_in_range = in_range(('G', 1, 4, 200), creatures, game_map)
assert all([rc in cells_in_range for rc in [(1, 2), (2, 1)]])
def choose_nearest_reachable(creature, cells_in_range, creatures, game_map):
# build a graph that includes only empty cells
G = nx.Graph()
occupied_cells = [(r, c) for (t, r, c, hp) in creatures]
G.add_node(creature[1:3])
G.add_nodes_from([cell for cell in game_map if cell not in occupied_cells])
for node in G:
(r, c) = node
for dr, dc in FOUR_DIRS:
other = (r + dr, c + dc)
if other in G:
G.add_edge(node, other)
# try to find a path
reachable_cells = {}
for cell in cells_in_range:
try:
path = nx.shortest_path(G, creature[1:3], cell)
path_length = len(path) - 1
reachable_cells[cell] = (path_length, path)
except nx.NetworkXNoPath:
pass
if len(reachable_cells) == 0:
raise ValueError('There is no path even though there is a free cell in range!')
min_length = min((reachable_cells[rc][0] for rc in reachable_cells))
nearest_reachable_cells = [rc for rc in reachable_cells if reachable_cells[rc][0] == min_length]
assert len(nearest_reachable_cells) > 0
chosen = sorted(nearest_reachable_cells, key=lambda item: item[0])[0]
assert min_length > 0
return list(reachable_cells.keys()), nearest_reachable_cells, chosen
# unit tests choose_nearest_reachable
creatures, game_map = parse_input(test_input1.split())
creature = ('E', 1, 1, 200)
cells_in_range = in_range(creature, creatures, game_map)
reachable_cells, nearest_reachable_cells, chosen = choose_nearest_reachable(creature, cells_in_range,
creatures,
game_map)
assert chosen == (1, 3)
assert all([rc in reachable_cells for rc in [(1, 3), (2, 2), (3, 1), (3, 3)]])
assert all([rc in nearest_reachable_cells for rc in [(1, 3), (2, 2), (3, 1)]])
def choose_next_pos(chosen, creature, creatures, game_map):
occupied_cells = [(r, c) for (t, r, c, hp) in creatures]
dists = {cell: INFINITY for cell in occupied_cells}
dists[chosen] = 0
while len(dists) < len(game_map):
new_dists = {}
for cell, val in dists.items():
if val < INFINITY:
(r, c) = cell
for dr, dc in FOUR_DIRS:
other = (r + dr, c + dc)
if other not in dists and other in game_map:
new_dists[other] = val + 1
if len(new_dists) == 0:
break
else:
dists.update(new_dists)
t, r, c, hp = creature
adjacent = {(r + dr, c + dc): dists[(r + dr, c + dc)] for dr, dc in FOUR_DIRS
if (r + dr, c + dc) in dists}
assert len(adjacent) > 0
closest_dist = min(adjacent.values())
closest_cell = sorted({k: v for k, v in adjacent.items() if v == closest_dist})[0]
return dists, closest_cell
# unit tests choose_next_pos
creatures, game_map = parse_input(test_input2.split())
creature = ('E', 1, 2, 200)
cells_in_range = in_range(creature, creatures, game_map)
reachable_cells, nearest_reachable_cells, chosen = choose_nearest_reachable(creature, cells_in_range,
creatures,
game_map)
assert chosen == (2, 4)
dists, next_pos = choose_next_pos(chosen, creature, creatures, game_map)
assert next_pos == (1, 3)
assert dists[(1, 1)] == 4
def render(creatures, game_map):
creature_pos = {c[1:3]: c[0] for c in creatures}
rendered = {}
for k in game_map:
if k in creature_pos:
rendered[k] = creature_pos[k]
else:
rendered[k] = '.'
R, C = max(rendered, key=lambda item: item[0])[0], max(rendered, key=lambda item: item[1])[1]
for r in range(R + 1):
for c in range(C + 1):
if (r, c) in rendered:
print(rendered[(r, c)], end='')
else:
print("#", end='')
print()
print()
def in_contact_with_enemy(creature, creatures):
t, r, c, hp = creature
enemies = {cr[1:3]: cr for cr in creatures if cr[0] != t}
adjacent_enemies = []
for dr, dc in FOUR_DIRS:
if (r + dr, c + dc) in enemies:
adjacent_enemies.append(enemies[(r + dr, c + dc)])
return len(adjacent_enemies) > 0, adjacent_enemies
def move_only(creatures, game_map):
new_creatures = creatures[:]
move_order = sorted(creatures, key=lambda items: (items[1], items[2]))
for creature in move_order:
t, r, c, hp = creature
is_in_contact, enemies_in_contact = in_contact_with_enemy(creature, new_creatures)
if not is_in_contact:
cells_in_range = in_range(creature, new_creatures, game_map)
if len(cells_in_range) > 0:
reachable_cells, nearest_reachable_cells, chosen = choose_nearest_reachable(creature, cells_in_range,
new_creatures,
game_map)
dists, next_pos = choose_next_pos(chosen, creature, new_creatures, game_map)
new_creature = (t, *next_pos, hp)
new_creatures.remove(creature)
new_creatures.append(new_creature)
else:
# print(f"creature {creature} cannot move since no more cells in range")
pass
else:
# already at contact, no move to do
# print(f"creature {creature} is in contact")
pass
return new_creatures
creatures_step1, game_map = parse_input(test_move1.split())
creatures_step2, game_map = parse_input(test_move2.split())
creatures_step2_pred = move_only(creatures_step1, game_map)
assert sorted(creatures_step2_pred) == sorted(creatures_step2)
creatures_step3, game_map = parse_input(test_move3.split())
creatures_step3_pred = move_only(creatures_step2, game_map)
assert sorted(creatures_step3_pred, key=SORT_BY_TRC) == sorted(creatures_step3, key=SORT_BY_TRC)
creatures_step4, game_map = parse_input(test_move4.split())
creatures_step4_pred = move_only(creatures_step3, game_map)
assert sorted(creatures_step4_pred) == sorted(creatures_step4)
def battle_finished(creatures):
return len(set(cr[0] for cr in creatures)) == 1
def move_and_attack(creatures, game_map, elve_attack_power=3):
new_creatures = creatures[:] # let's work on a copy of creatures
move_order = sorted(creatures, key=lambda items: (items[1], items[2]))
dead_creatures = []
finished_during_turn = False
for creature in move_order:
# move phase
# ==========
if creature[:3] in dead_creatures:
continue
t, r, c, hp = creature
is_in_contact, enemies_in_contact = in_contact_with_enemy(creature, new_creatures)
if not is_in_contact:
cells_in_range = in_range(creature, new_creatures, game_map)
if len(cells_in_range) > 0:
try:
reachable_cells, nearest_reachable_cells, chosen = choose_nearest_reachable(creature,
cells_in_range,
new_creatures,
game_map)
dists, next_pos = choose_next_pos(chosen, creature, new_creatures, game_map)
new_creature = (t, *next_pos, hp)
new_creatures.remove(creature)
new_creatures.append(new_creature)
# we overwrite creature for the attack move
creature = new_creature
except ValueError:
pass
else:
#print(f"[MOVE] creature {creature} cannot move since no more cells in range")
pass
else:
# already at contact, no move to do
pass
#print(f"[MOVE] creature {creature} is already in contact with enemy")
# attack phase
# ============
if battle_finished(new_creatures):
finished_during_turn = True
is_in_contact, enemies_in_contact = in_contact_with_enemy(creature, new_creatures)
if is_in_contact:
#print(f"[ATTACK] creature {creature} can attack {len(enemies_in_contact)} enemies")
selected_target = min(enemies_in_contact, key=lambda item: (item[3], item[1], item[2]))
if creature[0] == 'E':
new_hp = selected_target[3] - elve_attack_power
else:
new_hp = selected_target[3] - 3
new_creatures.remove(selected_target)
if new_hp > 0:
new_creatures.append(selected_target[:3] + (new_hp,))
else:
dead_creatures.append(selected_target[:3])
return new_creatures, finished_during_turn
# unit test simple combat
creatures, game_map = parse_input(test_attack1.split())
creatures = [('G', 0, 0, 9), ('G', 1, 2, 4), ('E', 2, 2, 200), ('G', 2, 3, 2),
('G', 3, 2, 2), ('G', 4, 3, 1)]
creatures_after_attack, __ = move_and_attack(creatures, game_map)
assert len(creatures_after_attack) == len(creatures) - 1
# unit test complex combat
def parse_input_and_hp(lines, hps):
creatures, game_map = parse_input(lines)
creatures = [(t, r, c, hp_new) for (t, r, c, hp_old), hp_new in zip(creatures, hps)]
return creatures, game_map
creatures, game_map = parse_input(test_attack_move1.split())
creatures_attack2, game_map = parse_input_and_hp(test_attack_move2.split(), test_attack_move2_hps)
creatures_attack2_pred, __ = move_and_attack(creatures, game_map)
assert sorted(creatures_attack2, key=SORT_BY_TRC) == sorted(creatures_attack2_pred, key=SORT_BY_TRC)
creatures_attack3, game_map = parse_input_and_hp(test_attack_move3.split(), test_attack_move3_hps)
creatures_attack3_pred, __ = move_and_attack(creatures_attack2_pred, game_map)
assert sorted(creatures_attack3, key=SORT_BY_TRC) == sorted(creatures_attack3_pred, key=SORT_BY_TRC)
creatures = creatures_attack3_pred[:]
for _ in range(21):
creatures, __ = move_and_attack(creatures, game_map)
creatures_attack23, game_map = parse_input_and_hp(test_attack_move23.split(), test_attack_move23_hps)
assert sorted(creatures_attack23, key=SORT_BY_TRC) == sorted(creatures, key=SORT_BY_TRC)
for _ in range(24):
creatures, finished_during_turn = move_and_attack(creatures, game_map)
creatures_attack47, game_map = parse_input_and_hp(test_attack_move47.split(), test_attack_move47_hps)
assert sorted(creatures_attack47, key=SORT_BY_TRC) == sorted(creatures, key=SORT_BY_TRC)
# global tests
test_battle1 = """#######
#G..#E#
#E#E.E#
#G.##.#
#...#E#
#...E.#
#######"""
def sum_hit_points(creatures):
return sum(cr[3] for cr in creatures)
creatures, game_map = parse_input(test_attack_move1.split())
n_turns = 0
while not battle_finished(creatures):
creatures, finished_during_turn = move_and_attack(creatures, game_map)
n_turns += 1
outcome = n_turns * sum_hit_points(creatures)
assert n_turns == 47 and outcome == 27730
creatures, game_map = parse_input(test_battle1.split())
n_turns = 0
while not battle_finished(creatures):
creatures, finished_during_turn = move_and_attack(creatures, game_map)
n_turns += 1
if finished_during_turn:
n_turns -= 1
outcome = n_turns * sum_hit_points(creatures)
assert n_turns == 37 and outcome == 36334
test_battle2 = """#######
#E..EG#
#.#G.E#
#E.##E#
#G..#.#
#..E#.#
#######"""
creatures, game_map = parse_input(test_battle2.split())
n_turns | |
<gh_stars>0
import sys
import math
from abc import ABC
from functools import lru_cache
import numpy as np
import quaternion # adds to numpy
from astropy.time import Time
from astropy import constants as const
from astropy import units
from astropy.coordinates import SkyCoord
import configparser
from iotools import objloader
from settings import *
from algo import tools
class Parameter():
def __init__(self, min_val, max_val, def_val=None, estimate=True, is_gl_z=False):
self._min_val = min_val
self._max_val = max_val
self.estimate = estimate
self._def_val = def_val
self._value = self.def_val
self.is_gl_z = is_gl_z
self.real_value = None
self.change_callback = None
self.fire_change_events = True
self.debug = False
@property
def range(self):
return (self._min_val, self._max_val)
@range.setter
def range(self, range):
min_val, max_val = range
self._min_val = min_val
self._max_val = max_val
# NOTE: need fine rtol as time is in seconds (e.g. 1407258438)
if not np.isclose(self._min_val, min_val, rtol=1e-9) \
or not np.isclose(self._max_val, max_val, rtol=1e-9):
if self.fire_change_events:
try:
self.change_callback(self._value, self._min_val, self._max_val)
except TypeError:
pass
@property
def scale(self):
return abs(self._max_val - self._min_val)
@property
def def_val(self):
return (self._min_val + self._max_val)/2 \
if self._def_val is None \
else self._def_val
@def_val.setter
def def_val(self, def_val):
self._def_val = def_val
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
if self.debug:
print('o: %s, n: %s'%(self._value, value), flush=True)
# NOTE: need fine rtol as time is in seconds (e.g. 1407258438)
if not np.isclose(self._value, value, rtol=1e-9):
if self.debug:
print('value set: %s'%self._value, flush=True)
if self.fire_change_events:
try:
self.change_callback(value)
except TypeError:
pass
@property
def nvalue(self):
if self.is_gl_z:
scale = abs(1/self._min_val - 1/self._max_val)
offset = (1/self._min_val + 1/self._max_val)/2
return (-1/(self._value or 1e-6) + offset)/scale
return (self._value - self.def_val)/self.scale
@nvalue.setter
def nvalue(self, nvalue):
if self.is_gl_z:
scale = abs(1/self._min_val - 1/self._max_val)
offset = (1/self._min_val + 1/self._max_val)/2
self.value = -1/((nvalue or 1e-6)*scale - offset)
else:
self.value = nvalue*self.scale + self.def_val
def valid(self):
return self._value >= self._min_val and self._value < self._max_val
def __str__(self):
return '%.2f (%.2f) in [%.2f, %.2f]' % (
self._value,
self.real_value if self.real_value is not None else float('nan'),
self._min_val,
self._max_val,
)
class SystemModel(ABC):
(
OPENGL_FRAME,
SPACECRAFT_FRAME,
ASTEROID_FRAME,
OPENCV_FRAME,
) = range(4)
# from sc cam frame (axis: +x, up: +z) to opengl (axis -z, up: +y)
sc2gl_q = np.quaternion(0.5, 0.5, -0.5, -0.5)
# from opencv cam frame (axis: +z, up: -y) to opengl (axis -z, up: +y)
cv2gl_q = np.quaternion(0, 1, 0, 0)
def __init__(self, asteroid, camera, limits, *args, **kwargs):
super(SystemModel, self).__init__()
self.asteroid = asteroid
self.cam = camera
# mission limits
(
self.min_distance, # min_distance in km
self.min_med_distance, # min_med_distance in km
self.max_med_distance, # max_med_distance in km
self.max_distance, # max_distance in km
self.min_elong, # min_elong in deg
self.min_time # min time instant as astropy.time.Time
) = limits
assert self.min_altitude > 0, \
'min distance %.2fkm too small, possible collision as asteroid max_radius=%.0fm'%(self.min_distance, self.asteroid.max_radius)
self.mission_id = None # overridden by particular missions
self.view_width = VIEW_WIDTH
# spacecraft position relative to asteroid, z towards spacecraft,
# x towards right when looking out from s/c camera, y up
self.x_off = Parameter(-4, 4, estimate=False)
self.y_off = Parameter(-4, 4, estimate=False)
# whole view: 1.65km/tan(2.5deg) = 38km
# can span ~30px: 1.65km/tan(2.5deg * 30/1024) = 1290km
self.z_off = Parameter(-self.max_distance, -self.min_distance, def_val=-self.min_med_distance, is_gl_z=True) # was 120, 220
# spacecraft orientation relative to stars
self.x_rot = Parameter(-90, 90, estimate=False) # axis latitude
self.y_rot = Parameter(-180, 180, estimate=False) # axis longitude
self.z_rot = Parameter(-180, 180, estimate=False) # rotation
# asteroid zero orientation relative to stars
self.ast_x_rot = Parameter(-90, 90, estimate=False) # axis latitude
self.ast_y_rot = Parameter(-180, 180, estimate=False) # axis longitude
self.ast_z_rot = Parameter(-180, 180, estimate=False) # rotation
self.asteroid_rotation_from_model()
# time in seconds since 1970-01-01 00:00:00
self.time = Parameter(
self.min_time.unix,
self.min_time.unix + self.asteroid.rotation_period,
estimate=False
)
# override any default params
for n, v in kwargs.items():
setattr(self, n, v)
# set default values to params
for n, p in self.get_params():
p.value = p.def_val
@property
def min_altitude(self):
""" in km """
return self.min_distance - self.asteroid.max_radius/1000
@property
def view_height(self):
return int(self.cam.height * self.view_width/self.cam.width)
def get_params(self, all=False):
return (
(n, getattr(self, n))
for n in sorted(self.__dict__)
if isinstance(getattr(self, n), Parameter)
and (all or getattr(self, n).estimate)
)
def param_change_events(self, enabled):
for n, p in self.get_params(all=True):
p.fire_change_events = enabled
@property
def spacecraft_pos(self):
return self.x_off.value, self.y_off.value, self.z_off.value
@spacecraft_pos.setter
def spacecraft_pos(self, pos):
self.z_off.value = pos[2]
half_range = abs(pos[2] / 170 * 4)
self.x_off.range = (pos[0] - half_range, pos[0] + half_range)
self.x_off.value = pos[0]
self.y_off.range = (pos[1] - half_range, pos[1] + half_range)
self.y_off.value = pos[1]
@property
def spacecraft_rot(self):
return self.x_rot.value, self.y_rot.value, self.z_rot.value
@spacecraft_rot.setter
def spacecraft_rot(self, r):
self.x_rot.value, self.y_rot.value, self.z_rot.value = r
@property
def asteroid_axis(self):
return self.ast_x_rot.value, self.ast_y_rot.value, self.ast_z_rot.value
@asteroid_axis.setter
def asteroid_axis(self, r):
self.ast_x_rot.value, self.ast_y_rot.value, self.ast_z_rot.value = r
self.update_asteroid_model()
@property
def spacecraft_dist(self):
return math.sqrt(sum(x**2 for x in self.spacecraft_pos))
@property
def spacecraft_altitude(self):
sc_ast_v = tools.normalize_v(np.array(self.spacecraft_pos))
ast_vx = self.sc_asteroid_vertices()
min_distance = np.min(sc_ast_v.dot(ast_vx.T))
return min_distance
@property
def real_spacecraft_altitude(self):
sc_ast_v = tools.normalize_v(np.array(self.real_spacecraft_pos))
ast_vx = self.sc_asteroid_vertices(real=True)
min_distance = np.min(sc_ast_v.dot(ast_vx.T))
return min_distance
def asteroid_rotation_from_model(self):
self.ast_x_rot.value = math.degrees(self.asteroid.axis_latitude)
self.ast_y_rot.value = math.degrees(self.asteroid.axis_longitude)
self.ast_z_rot.value = (math.degrees(self.asteroid.rotation_pm) + 180) % 360 - 180
def update_asteroid_model(self):
self.asteroid.axis_latitude = math.radians(self.ast_x_rot.value)
self.asteroid.axis_longitude = math.radians(self.ast_y_rot.value)
self.asteroid.rotation_pm = math.radians(self.ast_z_rot.value)
def pixel_extent(self, distance=None):
distance = abs(self.z_off) if distance is None else distance
return self.cam.width * math.atan(self.asteroid.mean_radius/1000/distance)*2 / math.radians(self.cam.x_fov)
@property
def real_spacecraft_pos(self):
return self.x_off.real_value, self.y_off.real_value, self.z_off.real_value
@real_spacecraft_pos.setter
def real_spacecraft_pos(self, rv):
self.x_off.real_value, self.y_off.real_value, self.z_off.real_value = rv
@property
def real_spacecraft_rot(self):
return self.x_rot.real_value, self.y_rot.real_value, self.z_rot.real_value
@real_spacecraft_rot.setter
def real_spacecraft_rot(self, rv):
self.x_rot.real_value, self.y_rot.real_value, self.z_rot.real_value = rv
@property
def real_asteroid_axis(self):
return self.ast_x_rot.real_value, self.ast_y_rot.real_value, self.ast_z_rot.real_value
@real_asteroid_axis.setter
def real_asteroid_axis(self, rv):
self.ast_x_rot.real_value, self.ast_y_rot.real_value, self.ast_z_rot.real_value = rv
@property
def spacecraft_q(self):
return tools.ypr_to_q(*list(map(
math.radians,
(self.x_rot.value, self.y_rot.value, self.z_rot.value)
)))
@spacecraft_q.setter
def spacecraft_q(self, new_q):
self.x_rot.value, self.y_rot.value, self.z_rot.value = \
list(map(math.degrees, tools.q_to_ypr(new_q)))
@property
def real_spacecraft_q(self):
return tools.ypr_to_q(*list(map(
math.radians,
(self.x_rot.real_value, self.y_rot.real_value, self.z_rot.real_value)
)))
@real_spacecraft_q.setter
def real_spacecraft_q(self, new_q):
self.x_rot.real_value, self.y_rot.real_value, self.z_rot.real_value = \
list(map(math.degrees, tools.q_to_ypr(new_q)))
@property
def asteroid_q(self):
return self.asteroid.rotation_q(self.time.value)
@asteroid_q.setter
def asteroid_q(self, new_q):
ast = self.asteroid
sc2ast_q = SystemModel.frm_conv_q(SystemModel.SPACECRAFT_FRAME, SystemModel.ASTEROID_FRAME, ast=ast)
ast.axis_latitude, ast.axis_longitude, new_theta = tools.q_to_ypr(new_q * sc2ast_q)
old_theta = ast.rotation_theta(self.time.value)
ast.rotation_pm = tools.wrap_rads(ast.rotation_pm + new_theta - old_theta)
self.asteroid_rotation_from_model()
@property
def real_asteroid_q(self):
org_ast_axis = self.asteroid_axis
self.asteroid_axis = self.real_asteroid_axis
q = self.asteroid.rotation_q(self.time.real_value)
self.asteroid_axis = org_ast_axis
return q
@real_asteroid_q.setter
def real_asteroid_q(self, new_q):
org_ast_axis = self.asteroid_axis
self.asteroid_axis = self.real_asteroid_axis
self.asteroid_q = new_q
self.asteroid_axis = org_ast_axis
def gl_sc_asteroid_rel_q(self, discretize_tol=False):
""" rotation of asteroid relative to spacecraft in opengl coords """
assert not discretize_tol, 'discretize_tol deprecated at gl_sc_asteroid_rel_q function'
self.update_asteroid_model()
sc_ast_rel_q = SystemModel.sc2gl_q.conj() * self.sc_asteroid_rel_q()
if discretize_tol:
qq, _ = tools.discretize_q(sc_ast_rel_q, discretize_tol)
err_q = sc_ast_rel_q * qq.conj()
sc_ast_rel_q = qq
if not BATCH_MODE and DEBUG:
print('asteroid x-axis: %s'%tools.q_times_v(sc_ast_rel_q, np.array([1, 0, 0])))
return sc_ast_rel_q, err_q if discretize_tol else False
def sc_asteroid_rel_q(self, time=None):
""" rotation of asteroid relative to spacecraft in spacecraft coords """
ast_q = self.asteroid.rotation_q(time or self.time.value)
sc_q = self.spacecraft_q
return sc_q.conj() * ast_q
def real_sc_asteroid_rel_q(self):
org_sc_rot = self.spacecraft_rot
org_ast_axis = self.asteroid_axis
self.spacecraft_rot = self.real_spacecraft_rot
self.asteroid_axis = self.real_asteroid_axis
q_tot = self.sc_asteroid_rel_q(time=self.time.real_value)
self.spacecraft_rot = org_sc_rot
self.asteroid_axis = org_ast_axis
return q_tot
def rotate_spacecraft(self, q):
new_q = self.spacecraft_q * q
self.x_rot.value, self.y_rot.value, self.z_rot.value = \
list(map(math.degrees, tools.q_to_ypr(new_q)))
def rotate_asteroid(self, q):
""" rotate asteroid in spacecraft frame """
# global rotation q on asteroid in sc frame, followed by local rotation to asteroid frame
new_q = q * self.asteroid.rotation_q(self.time.value)
self.asteroid_q = new_q
def reset_to_real_vals(self):
for n, p in self.get_params(True):
assert p.real_value is not None, 'real value missing for %s'%n
p.value = p.real_value
def swap_values_with_real_vals(self):
for n, p in self.get_params(True):
assert p.real_value is not None, 'real value missing for %s'%n
assert p.value is not None, 'current value missing %s'%n
tmp = p.value
p.value = p.real_value
p.real_value = tmp
def calc_shift_err(self):
est_vertices = self.sc_asteroid_vertices()
self.swap_values_with_real_vals()
target_vertices = self.sc_asteroid_vertices()
self.swap_values_with_real_vals()
return tools.sc_asteroid_max_shift_error(est_vertices, target_vertices)
def sc_asteroid_vertices(self, real=False):
""" asteroid vertices rotated and translated to spacecraft frame """
if self.asteroid.real_shape_model is None:
return None
sc_ast_q = self.real_sc_asteroid_rel_q() if real else self.sc_asteroid_rel_q()
sc_pos = self.real_spacecraft_pos if real else self.spacecraft_pos
return tools.q_times_mx(sc_ast_q, np.array(self.asteroid.real_shape_model.vertices)) + sc_pos
def gl_light_rel_dir(self, err_q=False, discretize_tol=False):
""" direction of light relative to spacecraft in opengl coords """
assert not discretize_tol, 'discretize_tol deprecated at gl_light_rel_dir function'
light_v, err_angle = self.light_rel_dir(err_q=False, discretize_tol=False)
err_q = | |
0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0982332,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 1.74446,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0235332,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.221173,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.131048,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0544994,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.0879055,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0443717,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.186777,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.042239,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.13398,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0247578,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00228595,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0251794,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.016906,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0499372,
'Execution Unit/Register Files/Runtime Dynamic': 0.0191919,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0589305,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.160702,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 0.99322,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 4.28697e-05,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 4.28697e-05,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 3.74735e-05,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 1.45799e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000242856,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000366069,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.000406243,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0162522,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.03378,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0389039,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0551997,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.30246,
'Instruction Fetch Unit/Runtime Dynamic': 0.111128,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0449029,
'L2/Runtime Dynamic': 0.0125577,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 1.92245,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.345803,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0221721,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0221722,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.02715,
'Load Store Unit/Runtime Dynamic': 0.477321,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0546727,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.109346,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0194035,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0200754,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.0642763,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00638556,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.253717,
'Memory Management Unit/Runtime Dynamic': 0.0264609,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 13.3517,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0651267,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00325144,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0268901,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
| |
as a 'short text file')
@param fileName
@param obj the file type. Currently we support these file types (as defined
internally by Praat):
- Harmonicity 2
- PitchTier
- Intensity
- SpectrumTier
- Spectrum 2
- Cepstrum 1
@return a two-dimensional array of floats, the first row
(index = 0) representing the time offsets of data values, and the
second row representing the detected fundamental frequency values
"""
file = open(fileName, "r")
cnt = 0
numDataPoints = 0
offset = 0
dataX = []
dataY = []
dataIdx = 0
timeStep = 0
timeOffset = 0
arrFileTypes = [
'Harmonicity 2', 'PitchTier', 'Intensity', 'SpectrumTier', \
'Spectrum 2', 'Cepstrum 1'
]
if not obj in arrFileTypes:
raise Exception('readPraatShortTextFile - file type must be: '
+ ', '.join(arrFileTypes))
metaData = []
for line in file:
line = line.strip()
cnt += 1
#print cnt, line # debug information
if cnt > 6:
if obj == 'Harmonicity 2' or obj == 'Intensity 2':
if cnt > 13:
val = float(line)
if val > -100:
dataY.append(val)
else:
dataY.append(None)
dataX.append(timeOffset + float(dataIdx) * timeStep)
dataIdx += 1
else:
if cnt == 7:
timeStep = float(line)
if cnt == 8:
timeOffset = float(line)
else:
# read data here
if cnt % 2 == 0:
dataY.append(float(line))
dataIdx += 1
else:
dataX.append(float(line))
else:
if cnt > 3:
metaData.append(line)
# error checking and loop initialization
if cnt == 1:
if line != "File type = \"ooTextFile\"":
raise Exception ("file " + fileName \
+ " is not a Praat pitch" + " tier file")
if cnt == 2:
err = False
#print line
if obj == 'Harmonicity':
if line != "Object class = \"Harmonicity\"" \
and line != "Object class = \"Harmonicity 2\"":
err = True
elif obj == 'Intensity':
if line != "Object class = \"IntensityTier\"" \
and line != "Object class = \"Intensity 2\"":
err = True
else:
if line != "Object class = \"" + obj + "\"":
err = True
if err == True:
raise Exception ("file " + fileName + " is not a Praat "
+ obj + " file")
if cnt == 6:
if line[0:15] == 'points: size = ':
numDataPoints = int(line.split('=')[1].strip())
raise Exception (\
"only the 'short text file' type is supported. " \
+ " Save your Praat " + obj \
+ " with 'Write to short text file.")
else:
numDataPoints = int(line)
return (numpy.array(dataX), numpy.array(dataY), metaData)
######################################################################
class PraatFormants:
"""
a class to store/process Praat formants
"""
DO_DEBUG = False
# ---------------------------------------------------------------------- #
def __init__(self):
self.clear()
# ---------------------------------------------------------------------- #
def clear(self):
"""
resets the object's state
"""
self.xmin = None
self.xmax = None
self.nx = None
self.dx = None
self.x1 = None
self.arrX = []
self.arrData = []
# ---------------------------------------------------------------------- #
def getNumFrames(self):
"""
@return the number of frames
"""
return self.nx
# ---------------------------------------------------------------------- #
def get(self, idx):
"""
@param idx
@return a tuple containing the time offset and the formant data at
that particular temporal offset. the formant data is a list of
dictionaries (one for each formant), the latter contaning a
'bandwidth' and a 'frequency' parameter (both indicated in Hz)
"""
if idx < 0 or idx >= self.nx:
raise Exception("index out of range")
return self.arrX[idx], self.arrData[idx]
# ---------------------------------------------------------------------- #
def decodeParam(self, txt, param, line = -1, fileName = ''):
"""
internally used ("pseudo-private") function used for reading Praat
Formant files
@param txt the text (i.e., line from a file) that is being parsed.
must have the structure 'paramName = paramValue'
@param line only used for reporting errors (in case an error
actually arises during parsing txt)
@param fileName only used for reporting errors (in case an error
actually arises during parsing txt)
@return a floating point value
"""
data = txt.split('=')
errMsg = ''
if fileName != '':
errMsg = ' of file "' + fileName + '"'
if line > 0:
errMsg = ' in line ' + str(line) + errMsg
if len(data) != 2:
raise Exception('cannot decode text "' + txt \
+ '" - invalid structure' + errMsg)
if data[0].strip() != param:
raise Exception('expected parameter "' + param \
+ '" but found "' + data[0].strip() + '"' + errMsg)
return float(data[1])
# ---------------------------------------------------------------------- #
def readFile(self, fileName):
"""
@todo bug when opening a "long text file"
@todo refactor this code, it's ugly to look at (too many if
statements and indentations)
"""
f = open(fileName)
cnt = 0
maxnFormants = None
isShortTextFile = False
insideDataStructure = False
dataCnt = 0
intensity = None
nFormants = None
frequency = None
bandwidth = None
frameIdx = 0
arrFormants = []
self.clear()
for line in f:
cnt += 1
errMsg = ' in line ' + str(cnt) + ' of file "' + fileName + '"'
txt = line.strip()
#print str(cnt) + ' _' + txt + '_'
if cnt == 1:
if txt != 'File type = "ooTextFile"':
raise Exception('expected \'File type = "ooTextFile"\'' \
+ errMsg)
elif cnt == 2:
if txt != 'Object class = "Formant 2"':
raise Exception('expected \'Object class = "Formant 2"\'' \
+ errMsg)
elif cnt == 4: # xmin
if len(txt.split('=')) > 1:
isShortTextFile = False
if txt.split('=')[0].strip() != 'xmin':
raise Exception('invalid file structure.' + errMsg)
self.xmin = self.decodeParam(txt, 'xmin', cnt, fileName)
else:
isShortTextFile = True
self.xmin = float(txt)
elif cnt == 5: # xmax
if isShortTextFile: self.xmax = float(txt)
else: self.xmax = self.decodeParam(txt, 'xmax', cnt, fileName)
elif cnt == 6: # nx
if isShortTextFile: self.nx = int(txt)
else: self.nx = int(self.decodeParam(txt, 'nx', cnt, fileName))
elif cnt == 7: # dx
if isShortTextFile: self.dx = float(txt)
else: self.dx = self.decodeParam(txt, 'dx', cnt, fileName)
elif cnt == 8: # x1
if isShortTextFile: self.x1 = float(txt)
else: self.x1 = self.decodeParam(txt, 'x1', cnt, fileName)
elif cnt == 9: # maxnFormants
if isShortTextFile: self.maxnFormants = float(txt)
else: self.maxnFormants = self.decodeParam(txt, \
'maxnFormants', cnt, fileName)
elif cnt > 9:
if isShortTextFile:
# -------------------------------------------------- #
# short text file
# -------------------------------------------------- #
#print cnt, txt
if insideDataStructure:
dataCnt += 1
if dataCnt == 1:
nFormants = int(txt)
if self.DO_DEBUG: print ("\t\tnFormants:", \
nFormants)
else:
tmpCnt = dataCnt - 2
formantCount = tmpCnt / 2 + 1
if tmpCnt % 2 == 0:
frequency = float(txt)
if self.DO_DEBUG:
print ("\t\tformant:", formantCount)
if self.DO_DEBUG:
print ("\t\t\tfrequency:", frequency)
else:
bandwidth = float(txt)
if self.DO_DEBUG:
print ("\t\t\tbandwidth:", bandwidth)
arrFormants.append({
'frequency':frequency,
'bandwidth':bandwidth,
})
if formantCount == nFormants:
# add the data here
x = self.x1 + self.dx * (frameIdx - 1)
self.arrX.append(x)
self.arrData.append(arrFormants)
insideDataStructure = False
else:
dataCnt = 0
insideDataStructure = True
arrFormants = []
intensity = float(txt)
frameIdx += 1
if self.DO_DEBUG:
print ("\tframeIdx:", frameIdx)
print ("\t\tintensity:", intensity)
else:
# -------------------------------------------------- #
# long text file
# -------------------------------------------------- #
if cnt == 10:
if txt != 'frame []:':
raise Exception('invalid file structure' + errMsg)
else:
if insideDataStructure:
dataCnt += 1
#if self.DO_DEBUG: print "\t\t\t\t\t", cnt, dataCnt, txt
if dataCnt == 1:
intensity = self.decodeParam(txt, \
'intensity', cnt, fileName)
elif dataCnt == 2:
nFormants = int(self.decodeParam(txt, \
'nFormants', cnt, fileName))
elif dataCnt == 3:
if txt != 'formant []:':
raise Exception('invalid file structure' \
+ errMsg)
else:
tmpCnt = (dataCnt - 4)
formantCount = tmpCnt / 3 + 1
if tmpCnt % 3 == 0:
if txt[0:9] != 'formant [':
raise Exception('invalid file structure' \
+ errMsg)
formantIdx = int(txt.split('[')[1].split(']')[0])
if self.DO_DEBUG:
print ("\t\tformant:", formantIdx)
if formantIdx != formantCount:
raise Exception('invalid file structure' \
+ errMsg)
elif tmpCnt % 3 == 1:
frequency = self.decodeParam(txt, \
'frequency', cnt, fileName)
if self.DO_DEBUG:
print ("\t\t\tfrequency:", frequency)
elif tmpCnt % 3 == 2:
bandwidth = self.decodeParam(txt, \
'bandwidth', cnt, fileName)
if self.DO_DEBUG:
print ("\t\t\tbandwidth:", bandwidth)
arrFormants.append({
'frequency':frequency,
'bandwidth':bandwidth,
})
if formantCount == nFormants:
# add the data here
x = self.x1 + self.dx * (frameIdx - 1)
self.arrX.append(x)
self.arrData.append(arrFormants)
insideDataStructure = False
else:
dataCnt = 0
insideDataStructure = True
arrFormants = []
if txt[0:7] != 'frame [':
raise Exception('invalid file structure' \
+ errMsg)
frameIdx = int(txt.split('[')[1].split(']')[0])
if self.DO_DEBUG: print ("\tframeIdx:", frameIdx)
f.close()
# check the data
if len(self.arrX) != len(self.arrData):
raise Exception("data array sizes don't match!")
if self.nx != len(self.arrX):
raise Exception('file "' + fileName + '" promised to contain ' + \
str(self.nx) + ' frames, but only ' + str(len(self.arrX)) + \
' were found')
######################################################################
def readPraatFormantData(fileName):
"""
@deprecated This function is obsolete. Keep it for now for backwards
compatibility, but remove it soon.
"""
#print fileName
formants = PraatFormants()
formants.clear()
#arrX = []
#arrData = []
f = open(fileName, "r")
cnt = 0
for line in f:
line = line.strip()
if line != '':
cnt += 1
if cnt > 1:
arrTmp = []
data = line.split('\t')
if len(data) > 2:
t = data[0]
try:
if cnt == 2:
formants.x1 = float(t)
nformants = data[1]
for i in range(2, len(data)):
arrTmp.append({
'frequency':float(data[i]),
'bandwidth':1,
})
formants.arrX.append(float(t))
formants.arrData.append(arrTmp)
except Exception as e:
print (e)
print (t, data[i])
cnt -= 1
else:
cnt -= 1
formants.nx = len(formants.arrX)
return formants
######################################################################
def changeSamplingRate(
waveFileName,
newSamplingRate,
outputFileName = '',
sincWidth = 200,
normalize = False,
verbose = False
):
"""
utility function to change a wave file's sampling frequency
@param waveFileName name of the input file
@param newSamplingRate Hz
@param outputFileName (include a path!) if empty string, the existing
file is overwritten
@param sincWidth increase for better accuracy and less speed (Praat's default is 50)
@param normalize In some cases (when the input file is already
normalized to +-1) resampling can result in a clipped output file.
In these cases it is useful to normalize the generated file.
@todo refactor so that this function uses the new @ref runPraatScript(...)
function
@todo add tmpDataPath parameter
"""
fileNameOnly = '.'.join(waveFileName.split('/')[-1].split('.')[:-1])
inputPath = '/'.join(waveFileName.split('/')[:-1]) + '/'
praatControlFileName = inputPath + 'tmp.praat'
f = open(praatControlFileName, 'w')
f.write('Read from file... ' + waveFileName + '\n')
f.write('Resample... ' + str(newSamplingRate) + ' ' + str(sincWidth) + '\n')
if outputFileName == '': outputFileName = waveFileName
if normalize: f.write('Scale peak... 0.99\n')
f.write('Write to WAV file... ' + outputFileName + '\n')
f.close()
args = ['Praat', praatControlFileName]
if verbose: print ("\tcalling Praat to resample data")
msg | |
self.shared_token_embedder}
else:
self.token_embedder_factory: Callable[[], embedding.Embed]
self.token_embedder = self.token_embedder_factory()
embedders = {'token_ids': self.token_embedder}
if self.position_embedder_factory is not None:
self.position_embedder_factory: Callable[[], embedding.Embed]
self.position_embedder = self.position_embedder_factory()
embedders['position_ids'] = self.position_embedder
self.embedder = embedding.MultiEmbed(
embedders,
sow_intermediates=self.sow_intermediates,
capture_gradients=self.capture_gradients)
self.input_dropout = self.input_dropout_factory()
if self.scan_layers and self.shared_relative_position_bias_factory:
raise ValueError("Scanned layer mode doesn't support shared relative"
'position biases.')
self.relpos_bias = (
self.shared_relative_position_bias_factory()
if self.shared_relative_position_bias_factory is not None else None)
lyrf = lambda: self.layer_factory( # pylint: disable=g-long-lambda
shared_relative_position_bias=self.relpos_bias)
lyrf = maybe_remat(
lyrf, self.layer_remat, self.scan_layers, static_argnums=(3,))
if not self.scan_layers:
self.layers = [lyrf() for _ in range(self.num_layers)]
self.encoder = common.TransparentLayerSequence(self.layers)
else:
initializing = self.is_mutable_collection('params')
# We scan the parameters along axis scan_axis (default=1)
# as an XLA layout optimization.
params_spec = self.scan_axis if initializing else transforms.ScanIn(
self.scan_axis)
cache_spec = 0
scan_annotation = (
self.spmd_annotations['encoder']
if self.spmd_annotations is not None else None)
lyrf = transforms.factory_scan(
lyrf,
in_axes=(nn.broadcast, nn.broadcast, nn.broadcast),
variable_axes={
'params': params_spec,
'cache': cache_spec
},
split_rngs={
'params': True,
'dropout': True
},
length=self.num_layers,
data_transform=transforms.inner_scan_spmd(scan_annotation,
self.scan_axis),
)
self.encoder = lyrf()
self.encoder_norm = self.layer_norm_factory()
self.output_dropout = self.output_dropout_factory()
def embed_and_combine_inputs(self,
inputs,
inputs_positions=None,
*,
segment_ids: Optional[Array] = None,
enable_dropout: bool = True):
"""Returns the combined embedded inputs for further encoding."""
assert inputs.ndim == 2 # (batch, len)
embedder_inputs = {'token_ids': inputs}
if 'position_ids' in self.embedder.embedders:
if inputs_positions is None:
seq_length = inputs.shape[-1]
inputs_positions = jnp.arange(seq_length)[None, :]
embedder_inputs['position_ids'] = inputs_positions
# TODO: Pass `deterministic=not enable_dropout`?
embedded_inputs = self.embedder(segment_ids=segment_ids, **embedder_inputs)
embedded_inputs = self.input_dropout(
embedded_inputs, deterministic=not enable_dropout)
# TODO: Revert this cast or move to embedder.
embedded_inputs = embedded_inputs.astype(self.dtype)
return embedded_inputs
def encode_from_continuous_inputs(self,
inputs,
encoder_mask=None,
logit_mask=None,
*,
enable_dropout: bool = True):
"""Applies all the layers starting from the continuous (embedded) inputs."""
# Apply all encoder layers. Because of residual connection, the width of the
# network is kept at `cfg.emb_dim` throughout.
encoder_outputs = self.encoder(
inputs,
encoder_mask,
logit_mask=logit_mask,
enable_dropout=enable_dropout)
if self.scan_layers:
encoder_outputs = encoder_outputs[0]
# Post-process the outputs of the final encoder layer.
# TODO: We could do this in the common encoder.
encoder_outputs = self.encoder_norm(encoder_outputs)
encoder_outputs = self.output_dropout(
encoder_outputs, deterministic=not enable_dropout)
if logit_mask is not None:
encoder_outputs = logit_mask * encoder_outputs
return encoder_outputs
def __call__(self,
inputs,
inputs_positions=None,
encoder_mask=None,
*,
segment_ids: Optional[Array] = None,
enable_dropout: bool = True):
"""Applies Transformer model on the inputs.
Args:
inputs: input data
inputs_positions: input subsequence positions for packed examples.
encoder_mask: decoder self-attention mask.
segment_ids: Input segmentation info for packed examples.
enable_dropout: Enables dropout if set to True.
Returns:
output of a transformer encoder.
"""
embedded_inputs = self.embed_and_combine_inputs(
inputs,
inputs_positions=inputs_positions,
segment_ids=segment_ids,
enable_dropout=enable_dropout)
logit_mask = jnp.expand_dims(
jnp.array((inputs > 0), dtype=embedded_inputs.dtype), axis=-1)
encoder_outputs = self.encode_from_continuous_inputs(
embedded_inputs,
encoder_mask=encoder_mask,
logit_mask=logit_mask,
enable_dropout=enable_dropout)
return encoder_outputs
class Decoder(nn.Module, param_remapping.ParameterRemappable):
"""A stack of decoder layers.
This module can be used with or without the encoder stack. To use without an
encoder, pass in encoded=None. This will bypass the encoder-decoder attention.
Attributes:
layer_factory: A callable that returns a DecoderLayer.
dropout_factory: A callable that returns the dropout to apply to the input
and before the final logits.
layer_norm_factory: A callable that returns a layer norm.
output_logits_factory: A callable that returns the output logits. If not
provided, then the token embedders are used.
num_layers: Number of layers to generate.
dtype: DType to cast the embedded inputs.
layer_remat: whether and how to apply jax.remat to each layer to perform
recomputation in the backward pass. Supported values are 'none', for no
use of jax.remat; 'minimal', for a policy that recomputes only non-matmul
operations (typically optimal); and 'full', for full recomputation of each
layer. The (legacy) default is to use 'none' when `scan_layers=False` and
and 'full' when `scan_layers=True`.
scan_layers: whether to scan over layers.
spmd_annotations: spmd annotations needed for scanned layers.
shared_relative_position_bias_factory: A callable that returns a relative
position bias instance which will be shared for all encoder layers. Only
set this if using shared relative position biases.
token_embedder_factory: A callable that returns a token embedder. Please
provide either this or `shared_token_embedder`.
shared_token_embedder: A callable that returns a token embedder shared
between both encoder and decoder.
position_embedder_factory: A callable that returns an absolute position
embedder. Only provide this if you want absolute position embeddings.
sow_intermediates: whether to track intermediates using Module.sow.
scan_axis: axis over which to do scan over layers.
capture_gradients: whether to track input gradients using a variable in the
`grads` collection. This captures the gradient of the (combined) embedded
inputs, i.e. the input to the first encoder layer.
"""
layer_factory: MakeDecoderLayerFn
dropout_factory: Callable[[], nn.Module]
layer_norm_factory: Callable[[], nn.Module]
num_layers: int
dtype: DType = jnp.float32
layer_remat: str = 'legacy'
scan_layers: bool = False
spmd_annotations: Any = None
shared_relative_position_bias_factory: Optional[Callable[[],
nn.Module]] = None
output_logits_factory: Optional[Callable[[], nn.Module]] = None
# Embedders: Either a token_embedder_factory factory or shared token embedder
# must be provided. The position embedder is optional and provided when
# absolute position embeddings are desired.
token_embedder_factory: Optional[Callable[[],
embedding.Embedder[Array]]] = None
shared_token_embedder: Optional[embedding.Embed] = None
position_embedder_factory: Optional[Callable[
[], embedding.Embedder[Array]]] = None
sow_intermediates: bool = False
scan_axis: int = 1
capture_gradients: bool = False
def setup(self):
# Set up the embedders.
if (self.token_embedder_factory,
self.shared_token_embedder).count(None) != 1:
raise ValueError(
'Please set exactly one of token_embedder_factory or '
'shared_token_embedder. token_embedder_factory was %s, and '
'shared_token_embedder was %s.' %
(self.token_embedder_factory, self.shared_token_embedder))
if self.shared_token_embedder is not None:
embedders = {'token_ids': self.shared_token_embedder}
else:
self.token_embedder_factory: Callable[[], embedding.Embed]
self.token_embedder = self.token_embedder_factory()
embedders = {'token_ids': self.token_embedder}
if self.position_embedder_factory is not None:
self.position_embedder_factory: Callable[[], embedding.Embed]
self.position_embedder = self.position_embedder_factory()
embedders['position_ids'] = self.position_embedder
self.embedder = embedding.MultiEmbed(
embedders,
sow_intermediates=self.sow_intermediates,
capture_gradients=self.capture_gradients)
self.input_dropout = self.dropout_factory()
if self.scan_layers and self.shared_relative_position_bias_factory:
raise ValueError("Scanned layer mode doesn't support shared relative"
'position biases.')
self.relpos_bias = (
self.shared_relative_position_bias_factory()
if self.shared_relative_position_bias_factory is not None else None)
lyrf = lambda: self.layer_factory( # pylint: disable=g-long-lambda
shared_relative_position_bias=self.relpos_bias)
lyrf = maybe_remat(
lyrf,
self.layer_remat,
self.scan_layers,
static_argnums=(5, 6, 7, 8, 9))
if not self.scan_layers:
self.layers = [lyrf() for _ in range(self.num_layers)]
self.decoder = common.TransparentLayerSequence(self.layers)
else:
initializing = self.is_mutable_collection('params')
# We scan the parameters along scan_axis (default =1) as
# an XLA layout optimization.
params_spec = self.scan_axis if initializing else transforms.ScanIn(
self.scan_axis)
cache_spec = 0
scan_annotation = (
self.spmd_annotations['decoder']
if self.spmd_annotations is not None else None)
lyrf = transforms.factory_scan(
lyrf,
in_axes=(nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast,
nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast,
nn.broadcast),
variable_axes={
'params': params_spec,
'cache': cache_spec
},
split_rngs={
'params': True,
'dropout': True
},
length=self.num_layers,
data_transform=transforms.inner_scan_spmd(scan_annotation,
self.scan_axis),
axis_name='layers',
)
self.decoder = lyrf()
self.decoder_norm = self.layer_norm_factory()
self.output_dropout = self.dropout_factory()
self.setup_output_logits()
@nn.nowrap
def setup_output_logits(self):
"""Sets up output logits; this method provides flexiblity for subclasses."""
# TODO: Re-merge with setup() once it's easier to Gin-configure
# shared modules, and directly pass submodules (instead of using factories).
if self.output_logits_factory:
# TODO: Consider renaming to "output_logits".
self.output_logits_factory: Callable[[], nn.Module]
self.logits_dense = self.output_logits_factory()
else:
self.logits_dense = None
def embed_and_combine_inputs(
self,
decoder_input_tokens,
decoder_positions=None,
*,
segment_ids: Optional[Array] = None,
enable_dropout: bool = True,
decode: bool = False,
):
"""Returns the combined embedded decoder inputs for further processing."""
assert decoder_input_tokens.ndim == 2 # (batch, len)
embedder_inputs = {'token_ids': decoder_input_tokens}
if 'position_ids' in self.embedder.embedders:
if decoder_positions is None:
seq_length = decoder_input_tokens.shape[-1]
decoder_positions = jnp.arange(seq_length)[None, :]
embedder_inputs['position_ids'] = decoder_positions
# TODO: Pass `deterministic=not enable_dropout`?
embedded_inputs = self.embedder(
segment_ids=segment_ids, decode=decode, **embedder_inputs)
embedded_inputs = self.input_dropout(
embedded_inputs, deterministic=not enable_dropout)
# TODO: Revert this cast or move to embedder.
embedded_inputs = embedded_inputs.astype(self.dtype)
return embedded_inputs
def decode_from_continuous_inputs(
self,
embedded_inputs,
encoder_outputs,
decoder_positions=None,
decoder_mask=None,
encoder_decoder_mask=None,
logit_mask=None,
*,
enable_dropout: bool = True,
decode: bool = False,
max_decode_length: Optional[int] = None,
prefill: bool = False,
prefill_lengths: Optional[Array] = None,
):
"""Applies the decoder on the continuous (embedded) inputs."""
# If encoded is not given, this block is decoder only and does not contain
# attention from decoder to encoder.
if encoder_outputs is not None:
assert encoder_outputs.ndim == 3 # (batch, len, depth)
# Apply the decoder layers, attending to the encoder outputs (if provided),
# and attending to previous decoder inputs (by masking future inputs).
decoder_outputs = self.decoder(
embedded_inputs,
encoder_outputs,
decoder_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask,
logit_mask=logit_mask,
enable_dropout=enable_dropout,
decode=decode,
max_decode_length=max_decode_length,
prefill=prefill,
prefill_lengths=prefill_lengths)
if self.scan_layers:
decoder_outputs = decoder_outputs[0]
# Post-process final decoder layer outputs.
# TODO: We could do this in the common decoder.
decoder_outputs = self.decoder_norm(decoder_outputs)
decoder_outputs = | |
label_create_button = T("Add New Summary Request Option"),
label_delete_button = T("Delete Summary Request Option"),
msg_record_created = T("Summary Request Option added"),
msg_record_modified = T("Summary Request Option updated"),
msg_record_deleted = T("Summary Request Option deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no Summary Request Options defined"))
self.configure(tablename,
deduplicate = self.req_summary_option_duplicate)
# ---------------------------------------------------------------------
# Pass variables back to global scope (s3.*)
#
return Storage(
)
# -------------------------------------------------------------------------
@staticmethod
def req_summary_option_duplicate(job):
"""
De-duplicate Request Summary Options
"""
if job.tablename == "req_summary_option":
table = job.table
name = job.data.get("name", None)
query = (table.name == name)
_duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if _duplicate:
job.id = _duplicate.id
job.data.id = _duplicate.id
job.method = job.METHOD.UPDATE
# =============================================================================
class S3RequestRecurringModel(S3Model):
"""
"""
names = ["req_job",
]
def model(self):
T = current.T
s3 = current.response.s3
# -----------------------------------------------------------------
# Request Job
#
# Jobs for Scheduling Recurring Requests
#
tablename = "req_job"
table = self.define_table(tablename,
self.req_req_id(),
s3.scheduler_task_id(),
*s3_meta_fields())
# CRUD Strings
ADD_JOB = T("Add Job")
s3.crud_strings[tablename] = Storage(
title_create = ADD_JOB,
title_display = T("Request Job"),
title_list = T("Request Schedule"),
title_update = T("Edit Job"),
title_search = T("Search for Job"),
subtitle_create = ADD_JOB,
label_list_button = T("List Jobs"),
label_create_button = ADD_JOB,
msg_record_created = T("Job added"),
msg_record_modified = T("Job updated updated"),
msg_record_deleted = T("Job deleted"),
msg_list_empty = T("No jobs configured yet"),
msg_no_match = T("No jobs configured"))
# Resource Configuration
self.set_method("req", "req",
component_name="job",
method="reset",
action=req_job_reset)
# Resource Configuration
self.set_method("req", "req",
component_name="job",
method="run",
action=req_job_run)
# ---------------------------------------------------------------------
# Pass variables back to global scope (s3.*)
#
return Storage(
)
# -------------------------------------------------------------------------
@staticmethod
def req_recurring_duplicate(job):
"""
De-duplicate Request Summary Options
"""
if job.tablename == "req_recurring":
table = job.table
name = job.data.get("name", None)
query = (table.name == name)
_duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if _duplicate:
job.id = _duplicate.id
job.data.id = _duplicate.id
job.method = job.METHOD.UPDATE
# =============================================================================
class S3CommitModel(S3Model):
"""
"""
names = ["req_commit",
"req_commit_id",
]
def model(self):
T = current.T
db = current.db
auth = current.auth
add_component = self.add_component
# ---------------------------------------------------------------------
# Commitments (Pledges)
tablename = "req_commit"
table = self.define_table(tablename,
self.super_link("site_id", "org_site",
label = T("From Facility"),
default = auth.user.site_id if auth.is_logged_in() else None,
# Non-Item Requests make False in the prep
writable = True,
readable = True,
# Comment these to use a Dropdown & not an Autocomplete
#widget = S3SiteAutocompleteWidget(),
#comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("From Facility"),
# T("Enter some characters to bring up a list of possible matches"))),
represent = self.org_site_represent),
# Non-Item Requests make True in the prep
self.org_organisation_id(
readable = False,
writable = False
),
self.req_req_id(),
Field("type", "integer",
# These are copied automatically from the Req
readable=False,
writable=False),
s3_date(default = "now"),
s3_date("date_available",
label = T("Date Available")),
self.pr_person_id("committer_id",
default = auth.s3_logged_in_person(),
label = T("Committed By"),
comment = self.pr_person_comment(child="committer_id")),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_COMMIT = T("Make Commitment")
current.response.s3.crud_strings[tablename] = Storage(
title_create = ADD_COMMIT,
title_display = T("Commitment Details"),
title_list = T("Commitments"),
title_update = T("Edit Commitment"),
title_search = T("Search Commitments"),
subtitle_create = ADD_COMMIT,
label_list_button = T("List Commitments"),
label_create_button = ADD_COMMIT,
label_delete_button = T("Delete Commitment"),
msg_record_created = T("Commitment Added"),
msg_record_modified = T("Commitment Updated"),
msg_record_deleted = T("Commitment Canceled"),
msg_list_empty = T("No Commitments"))
# Reusable Field
commit_id = S3ReusableField("commit_id", table, sortby="date",
requires = IS_NULL_OR(
IS_ONE_OF(db,
"req_commit.id",
self.commit_represent,
orderby="req_commit.date",
sort=True)),
represent = self.commit_represent,
label = T("Commitment"),
ondelete = "CASCADE")
self.configure(tablename,
# Commitments should only be made to a specific request
listadd = False,
onvalidation = self.commit_onvalidation,
onaccept = self.commit_onaccept)
# Components
# Commitment Items as component of Commitment
add_component("req_commit_item",
req_commit="commit_id")
# Commitment Persons as component of Commitment
add_component("req_commit_person",
req_commit="commit_id")
# ---------------------------------------------------------------------
# Pass variables back to global scope (s3.*)
#
return Storage(
req_commit_id = commit_id,
)
# -------------------------------------------------------------------------
@staticmethod
def commit_represent(id, row=None):
"""
Represent a Commit
"""
if row:
table = current.db.req_commit
elif not id:
return current.messages.NONE
else:
db = current.db
table = db.req_commit
row = db(table.id == id).select(table.type,
table.date,
table.organisation_id,
table.site_id,
limitby=(0, 1)).first()
if row.type == 1:
# Items
return "%s - %s" % (table.site_id.represent(row.site_id),
table.date.represent(row.date))
else:
return "%s - %s" % (table.organisation_id.represent(row.organisation_id),
table.date.represent(row.date))
# -------------------------------------------------------------------------
@staticmethod
def commit_onvalidation(form):
"""
Copy the request_type to the commitment
"""
req_id = current.manager.get_session("req", "req")
if req_id:
rtable = current.s3db.req_req
query = (rtable.id == req_id)
req_record = current.db(query).select(rtable.type,
limitby=(0, 1)).first()
if req_record:
form.vars.type = req_record.type
# -------------------------------------------------------------------------
@staticmethod
def commit_onaccept(form):
"""
"""
vars = form.vars
s3db = current.s3db
if vars.type == 3: # People
# If no organisation_id, then this is a single person commitment, so create the commit_person record automatically
table = s3db.req_commit_person
table.insert(commit_id = vars.id,
#skill_id = ???,
person_id = auth.s3_logged_in_person())
# @ToDo: Mark Person's allocation status as 'Committed'
elif vars.type == 9:
# Non-Item requests should have commitment status updated if a commitment is made
db = current.db
table = s3db.req_commit
rtable = s3db.req_req
query = (table.id == vars.id) & \
(rtable.id == table.req_id)
req_record = db(query).select(rtable.id,
rtable.commit_status,
limitby=(0, 1)).first()
if req_record and req_record.commit_status == REQ_STATUS_NONE:
# Assume Partial not Complete
# @ToDo: Provide a way for the committer to specify this
query = (rtable.id == req_record.id)
db(query).update(commit_status=REQ_STATUS_PARTIAL)
# =============================================================================
class S3CommitItemModel(S3Model):
"""
"""
names = ["req_commit_item"]
def model(self):
T = current.T
# -----------------------------------------------------------------
# Commitment Items
# @ToDo: Update the req_item_id in the commit_item if the req_id of the commit is changed
tablename = "req_commit_item"
table = self.define_table(tablename,
self.req_commit_id(),
#item_id,
#supply_item_id(),
self.req_item_id(),
self.supply_item_pack_id(),
Field("quantity",
"double",
label = T("Quantity"),
notnull = True),
s3_comments(),
*s3_meta_fields())
# pack_quantity virtual field
table.virtualfields.append(self.supply_item_pack_virtualfields(tablename=tablename))
# CRUD strings
ADD_COMMIT_ITEM = T("Add Item to Commitment")
current.response.s3.crud_strings[tablename] = Storage(
title_create = ADD_COMMIT_ITEM,
title_display = T("Commitment Item Details"),
title_list = T("Commitment Items"),
title_update = T("Edit Commitment Item"),
title_search = T("Search Commitment Items"),
subtitle_create = T("Add New Commitment Item"),
label_list_button = T("List Commitment Items"),
label_create_button = ADD_COMMIT_ITEM,
label_delete_button = T("Delete Commitment Item"),
msg_record_created = T("Commitment Item added"),
msg_record_modified = T("Commitment Item updated"),
msg_record_deleted = T("Commitment Item deleted"),
msg_list_empty = T("No Commitment Items currently registered"))
self.configure(tablename,
onaccept = self.commit_item_onaccept )
# ---------------------------------------------------------------------
# Pass variables back to global scope (s3.*)
#
return Storage(
# Used by commit_req() controller
req_commit_item_onaccept = self.commit_item_onaccept
)
# -------------------------------------------------------------------------
@staticmethod
def commit_item_onaccept(form):
"""
"""
db = current.db
s3mgr = current.manager
table = db.req_commit_item
# Try to get req_item_id from the form
req_item_id = 0
if form:
req_item_id = form.vars.get("req_item_id")
if not req_item_id:
commit_item_id = s3mgr.get_session("req", "commit_item")
r_commit_item = table[commit_item_id]
req_item_id = r_commit_item.req_item_id
query = (table.req_item_id == req_item_id) & \
(table.deleted == False)
commit_items = db(query).select(table.quantity ,
table.item_pack_id)
quantity_commit = 0
for commit_item in commit_items:
quantity_commit += commit_item.quantity * commit_item.pack_quantity
ritable = db.req_req_item
r_req_item = ritable[req_item_id]
quantity_commit = quantity_commit / r_req_item.pack_quantity
ritable[req_item_id] = dict(quantity_commit = quantity_commit)
# Update status_commit of the req record
s3mgr.store_session("req", "req_item", r_req_item.id)
dummy_form = Storage(vars = Storage(req_id = r_req_item.req_id))
req_item_onaccept(dummy_form)
# =============================================================================
class S3CommitPersonModel(S3Model):
"""
"""
names = ["req_commit_person"]
def model(self):
T = current.T
# -----------------------------------------------------------------
# Committed Persons
#
tablename = "req_commit_person"
table = self.define_table(tablename,
self.req_commit_id(),
# For reference
self.hrm_multi_skill_id(writable=False, comment=None),
# This should be person not hrm as we want to mark them as allocated
self.pr_person_id(),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_COMMIT_PERSON = T("Add Person to Commitment")
current.response.s3.crud_strings[tablename] = Storage(
title_create = ADD_COMMIT_PERSON,
title_display = T("Committed Person Details"),
title_list = T("Committed People"),
title_update = T("Edit Committed Person"),
title_search = T("Search Committed People"),
subtitle_create = T("Add New Person to Commitment"),
label_list_button = T("List Committed People"),
label_create_button = ADD_COMMIT_PERSON,
label_delete_button = T("Remove Person from Commitment"),
msg_record_created = T("Person added to Commitment"),
msg_record_modified = T("Committed Person updated"),
msg_record_deleted = T("Person removed from Commitment"),
msg_list_empty = T("No People currently committed"))
# @ToDo: Fix this before enabling
#self.configure(tablename,
# onaccept = self.commit_person_onaccept)
# ---------------------------------------------------------------------
# Pass variables back to global scope (s3.*)
#
return Storage()
# -------------------------------------------------------------------------
@staticmethod
def commit_person_onaccept(form):
"""
Not working
"""
db = current.db
s3db = current.s3db
s3mgr = current.manager
table = db.req_commit_person
rstable = s3db.req_req_skill
# Try to get req_skill_id from the form
req_skill_id = 0
if form:
req_skill_id = form.vars.get("req_skill_id", None)
if not req_skill_id:
commit_skill_id = s3mgr.get_session("req", "commit_skill")
r_commit_skill = table[commit_skill_id]
req_skill_id = r_commit_skill.req_skill_id
query = (table.req_skill_id == req_skill_id) & \
(table.deleted == False)
commit_skills = db(query).select(table.quantity)
quantity_commit = 0
for commit_skill in | |
def write_to_out(out, incoming_data, map_out_combined, region):
bires = map_out_combined[0] + region
global_slice = tuple(
[slice(bires[0][x], bires[1][x] + 1) for x in range(bires.shape[1])]
)
local_slice = shardview.slice_to_local(out.subspace, global_slice)
dprint(
2,
"Receiving Data:",
self.worker_num,
map_out_combined,
out.bcontainer.shape,
incoming_data.shape,
bires,
global_slice,
local_slice,
region,
)
out.bcontainer[local_slice] = incoming_data
for gid, remote_id, region, map_out_combined in to_ranges:
other_local = self.numpy_map[gid]
the_slice = tuple(
[slice(region[0][x], region[1][x] + 1) for x in range(region.shape[1])]
)
local_slice = shardview.slice_to_local(other_local.subspace, the_slice)
dprint(
2,
"Sending Data:",
other_local.remote.worker_num,
remote_id,
the_slice,
local_slice,
map_out_combined,
)
if other_local.remote.worker_num == remote_id:
local_send += 1
write_to_out(
out, other_local.bcontainer[local_slice], map_out_combined, region
)
else:
self.comm_queues[remote_id].put(
(
send_recv_uuid,
other_local.bcontainer[local_slice],
map_out_combined,
region,
)
)
for _ in range(len(from_ranges) - local_send):
try:
get_result = self.comm_queues[self.worker_num].get(
gfilter=lambda x: x[0] == send_recv_uuid, timeout=5
)
in_send_recv_uuid, incoming_data, map_out_combined, region = get_result
# incoming_data, map_out_combined, region = self.comm_queues[self.worker_num].get(timeout = 5)
except:
print("some exception!", sys.exc_info()[0])
print("get_result:", get_result)
assert 0
write_to_out(out, incoming_data, map_out_combined, region)
def sstencil(
self, stencil_op_uuid, out_gid, neighborhood, first_gid, args, func, create_flag
):
func = func_loads(func)
first = self.numpy_map[first_gid]
if create_flag:
lnd = first.init_like(out_gid)
self.numpy_map[out_gid] = lnd
else:
lnd = self.numpy_map[out_gid]
new_bcontainer = lnd.bcontainer
for i, x in enumerate(args):
if isinstance(x, uuid.UUID):
lnd_arg = self.numpy_map[x]
lnd_arg.getborder(str(stencil_op_uuid) + str(i))
# print("sstencil:", first.remote.worker_num, func, type(func), first.subspace, first.bcontainer.shape, neighborhood)
assert isinstance(func, StencilMetadata)
# Convert from StencilMetadata to Numba StencilFunc
fargs = [
self.numpy_map[x].bcontainer if isinstance(x, uuid.UUID) else x
for x in args
]
worker_neighborhood = [
(
min(0, int(shardview.get_start(lnd.subspace)[x] + neighborhood[x][0])),
max(
2 * lnd.border,
int(
shardview.get_start(lnd.subspace)[x]
+ shardview.get_size(lnd.subspace)[x]
- lnd.whole[x]
+ 2 * lnd.border
+ neighborhood[x][1]
),
),
)
for x in range(len(lnd.dim_lens))
]
sfunc = func.compile({"neighborhood": tuple(worker_neighborhood)})
# print("sstencil fargs:", first.remote.worker_num, sfunc, type(sfunc), worker_neighborhood, "\n", fargs)
t0 = timer()
if create_flag:
sout = sfunc(*fargs)
new_bcontainer[first.core_slice] = sout[first.core_slice]
else:
sfunc(*fargs, out=new_bcontainer)
t1 = timer()
dprint(1, "SStencil item", t1 - t0)
# print("sstencil sout:", first.remote.worker_num, sout)
# new_bcontainer[first.core_slice] = sout[first.core_slice]
# new_bcontainer[:] = sfunc(*fargs, out=new_bcontainer)
# print("sstencil done", first.remote.worker_num, "\n", new_bcontainer)
# TODO: should use get_view
def scumulative_local(self, out_gid, in_gid, func):
func = func_loads(func)
in_bcontainer = self.numpy_map[in_gid]
self.numpy_map[out_gid] = in_bcontainer.init_like(out_gid)
new_bcontainer = self.numpy_map[out_gid].bcontainer
in_array = in_bcontainer.bcontainer
new_bcontainer[0] = in_array[0]
for index in range(1, in_bcontainer.dim_lens[0]):
new_bcontainer[index] = func(new_bcontainer[index - 1], in_array[index])
# TODO: should use get_view
def scumulative_final(self, array_gid, boundary_value, func):
func = func_loads(func)
in_bcontainer = self.numpy_map[array_gid]
in_array = in_bcontainer.bcontainer
boundary_value = boundary_value[0]
for index in range(in_bcontainer.dim_lens[0]):
in_array[index] = func(in_array[index], boundary_value)
## still needed?
def array_binop(self, lhs_gid, rhs_gid, out_gid, op):
lhs = self.numpy_map[lhs_gid]
if isinstance(rhs_gid, uuid.UUID):
rhs = self.numpy_map[rhs_gid].bcontainer[lhs.core_slice]
else:
rhs = rhs_gid
binop = getattr(lhs.bcontainer[lhs.core_slice], op)
if out_gid is not None:
lnd = lhs.init_like(out_gid)
self.numpy_map[out_gid] = lnd
new_bcontainer = lnd.bcontainer
new_bcontainer[lnd.core_slice] = binop(rhs)
else:
binop(rhs)
def spmd(self, func, args):
dprint(2, "Starting remote spmd", self.worker_num)
try:
func = func_loads(func)
fargs = [self.numpy_map[x] if isinstance(x, uuid.UUID) else x for x in args]
fmfunc = FunctionMetadata(func, [], {})
dprint(3, "Before local spmd fmfunc call")
fmfunc(*fargs)
dprint(3, "After local spmd fmfunc call")
except:
print("some exception in remote spmd")
traceback.print_exc()
pass
sys.stdout.flush()
def run_deferred_ops(
self, uuid, arrays, delete_gids, pickledvars, exec_dist, fname, code, imports
):
times = [timer()]
dprint(4, "HERE - deferredops; arrays:", arrays.keys())
subspace = shardview.clean_range(
exec_dist[self.worker_num]
) # our slice of work range
# create array shards if needed
# TODO: This has become very complicated and ugly. Consider restructuring, use class instead of tuple.
# info tuple is (size, distribution, local_border, from_border, to_border, dtype)
# TODO: Need to check array construction -- this code may break for views/slices; assumes first view of gid is the canonical, full array
# [ self.create_array(g, subspace, info[0], None, info[2], info[1], None if info[3] is None else info[3][self.worker_num], None if info[4] is None else info[4][self.worker_num]) for (g,(v,info,bdist,pad)) in arrays.items() if g not in self.numpy_map ]
for (g, (l, bdist, pad, _)) in arrays.items():
if g in self.numpy_map:
continue
v = l[0][0]
info = l[0][1]
self.create_array(
g,
subspace,
info[0],
None,
info[2],
info[5],
info[1],
None if info[3] is None else info[3][self.worker_num],
None if info[4] is None else info[4][self.worker_num],
)
times.append(timer())
# check if function exists, else exec to create it
ldict = {}
gdict = globals()
for imp in imports:
the_module = __import__(imp)
gdict[imp] = the_module
# gdict=sys.modules['__main__'].__dict__
if fname not in gdict:
dprint(2, "function does not exist, creating it")
exec(code, gdict, ldict)
gdict[fname] = FunctionMetadata(ldict[fname], [], {})
# print (gdict.keys())
func = gdict[fname]
times.append(timer())
# Send data to other workers as needed, count how many items to recieve later
# TODO: optimize by combining messages of overlapping array parts
arr_parts = (
{}
) # place to keep array parts received and local parts, indexed by variable name
expected_parts = (
0 # count of parts that will be sent to this worker from others
)
to_send = {} # save up list of messages to node i, send all as single message
from_set = {} # nodes to recieve from
overlap_time = 0.0
getview_time = 0.0
arrview_time = 0.0
copy_time = 0.0
for (g, (l, bdist, pad, _)) in arrays.items():
for (v, info) in l:
arr_parts[v] = []
if shardview.is_compat(
subspace, info[1][self.worker_num]
): # whole compute range is local
sl = self.get_view(g, info[1][self.worker_num])
arr_parts[v].append((subspace, sl))
continue
overlap_time -= timer()
overlap_workers = shardview.get_overlaps(
self.worker_num, info[1], exec_dist
)
overlap_time += timer()
# for i in range(num_workers):
for i in overlap_workers:
if i != self.worker_num:
# part = shardview.intersect(exec_dist[self.worker_num],info[1][i]) # part of what we need but located at worker i
part = shardview.intersect(
info[1][i], exec_dist[self.worker_num]
) # part of what we need but located at worker i
if not shardview.is_empty(part):
expected_parts += 1
from_set[i] = 1
# part = shardview.intersect(exec_dist[i],info[1][self.worker_num]) # part of what we have needed at worker i
part = shardview.intersect(
info[1][self.worker_num], exec_dist[i]
) # part of what we have needed at worker i
if shardview.is_empty(part):
continue
getview_time -= timer()
sl = self.get_partial_view(
g,
shardview.to_slice(part),
info[1][self.worker_num],
remap_view=False,
)
getview_time += timer()
if i == self.worker_num:
# arr_parts[v].append( (part, sl) )
# arr_parts[v].append( (part, shardview.array_to_view(part, sl)) )
arrview_time -= timer()
arr_parts[v].append(
(
shardview.clean_range(part),
shardview.array_to_view(part, sl),
)
)
arrview_time += timer()
dprint(
2,
"Worker",
self.worker_num,
"Keeping local part",
part,
sl,
info[1],
)
else:
# deferred send data to worker i
# self.comm_queues[i].put( (uuid, v, part, sl) )
if i not in to_send:
to_send[i] = []
copy_time -= timer()
# to_send[i].append( (v, part, sl) )
to_send[i].append(
(v, part, sl.copy())
) # pickling a slice is slower than copy+pickle !!
copy_time += timer()
dprint(
2,
"Worker",
self.worker_num,
"Sending to worker",
i,
part,
sl,
info[1],
)
times.append(timer())
# actual sends
for (i, v) in to_send.items():
self.comm_queues[i].put((uuid, v, self.worker_num))
expected_parts = len(
from_set
) # since messages are coalesced, expect 1 from each node sending to us
times.append(timer())
othervars = {v: pickle.loads(val) for (v, val) in pickledvars}
# Convert 0d arrays to scalars since 0d support may otherwise be spotty in Numba.
for k, v in othervars.items():
if isinstance(v, np.ndarray) and v.shape == (): # 0d array
# convert to scalar
othervars[k] = v.item()
times.append(timer())
# Receive data from other workers
# for i in range(expected_parts):
# try:
# _, v, part, sl = self.comm_queues[self.worker_num].get(gfilter=lambda x: x[0]==uuid, timeout=5)
# arr_parts[v].append( (part, sl) )
# except:
# print("some exception", sys.exc_info()[0])
# assert(0)
msgs = []
while expected_parts > 0:
m = self.comm_queues[self.worker_num].multi_get(
expected_parts,
gfilter=lambda x: x[0] == uuid,
timeout=5,
print_times=(ntiming >= 1 and self.worker_num == timing_debug_worker),
msginfo=lambda x: "[from " + str(x[2]) + "]",
)
msgs += m
expected_parts -= len(m)
if expected_parts > 0:
print("Still waiting for", expected_parts, "items")
# for _,v,part,sl in msgs:
# arr_parts[v].append( (part, sl) )
for _, l, _ in msgs:
for v, part, sl in l:
# arr_parts[v].append( (part, sl) )
# arr_parts[v].append( (part, shardview.array_to_view(part, sl)) )
arrview_time -= timer()
arr_parts[v].append(
(shardview.clean_range(part), shardview.array_to_view(part, sl))
)
arrview_time += timer()
times.append(timer())
# Construct set of ranges and array parts to use in each
vparts = numba.typed.List()
for _, pl in arr_parts.items():
for vpart, _ in pl:
vparts.append(vpart)
ranges = shardview.get_range_splits_list(vparts)
times.append(timer())
rangedvars = [{} | |
"""Plots predictors on full NARR grid."""
import argparse
import numpy
import matplotlib
matplotlib.use('agg')
import matplotlib.colors
import matplotlib.pyplot as pyplot
from gewittergefahr.gg_utils import time_conversion
from gewittergefahr.gg_utils import time_periods
from gewittergefahr.gg_utils import nwp_model_utils
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.plotting import plotting_utils
from gewittergefahr.plotting import nwp_plotting
from gewittergefahr.plotting import imagemagick_utils
from generalexam.ge_io import processed_narr_io
from generalexam.ge_io import fronts_io
from generalexam.ge_io import wpc_bulletin_io
from generalexam.ge_utils import front_utils
from generalexam.ge_utils import utils
from generalexam.plotting import front_plotting
DEFAULT_TIME_FORMAT = '%Y%m%d%H'
NICE_TIME_FORMAT = '%H00 UTC %-d %b %Y'
NARR_TIME_INTERVAL_SEC = 10800
KG_TO_GRAMS = 1000.
ZERO_CELSIUS_IN_KELVINS = 273.15
VALID_THERMAL_FIELD_NAMES = [
processed_narr_io.TEMPERATURE_NAME,
processed_narr_io.SPECIFIC_HUMIDITY_NAME,
processed_narr_io.WET_BULB_THETA_NAME
]
WIND_FIELD_NAMES = [
processed_narr_io.U_WIND_GRID_RELATIVE_NAME,
processed_narr_io.V_WIND_GRID_RELATIVE_NAME
]
MIN_LATITUDE_DEG = 20.
MIN_LONGITUDE_DEG = 220.
MAX_LATITUDE_DEG = 80.
MAX_LONGITUDE_DEG = 290.
PARALLEL_SPACING_DEG = 10.
MERIDIAN_SPACING_DEG = 20.
FRONT_LINE_WIDTH = 8
BORDER_COLOUR = numpy.full(3, 0.)
WARM_FRONT_COLOUR = numpy.array([30, 120, 180], dtype=float) / 255
COLD_FRONT_COLOUR = numpy.array([166, 206, 227], dtype=float) / 255
WIND_COLOUR = numpy.full(3, 152. / 255)
MIN_COLOUR_WIND_SPEED_KT = -1.
MAX_COLOUR_WIND_SPEED_KT = 0.
WIND_COLOUR_MAP_OBJECT = matplotlib.colors.ListedColormap([WIND_COLOUR])
WIND_COLOUR_MAP_OBJECT.set_under(WIND_COLOUR)
WIND_COLOUR_MAP_OBJECT.set_over(WIND_COLOUR)
WIND_BARB_LENGTH = 8
EMPTY_WIND_BARB_RADIUS = 0.1
PLOT_EVERY_KTH_WIND_BARB = 8
PRESSURE_SYSTEM_FONT_SIZE = 50
PRESSURE_SYSTEM_COLOUR = numpy.full(3, 0.)
# FIGURE_RESOLUTION_DPI = 300
FIGURE_RESOLUTION_DPI = 600
NARR_DIR_ARG_NAME = 'input_narr_dir_name'
FRONT_DIR_ARG_NAME = 'input_front_line_dir_name'
BULLETIN_DIR_ARG_NAME = 'input_wpc_bulletin_dir_name'
FIRST_TIME_ARG_NAME = 'first_time_string'
LAST_TIME_ARG_NAME = 'last_time_string'
PRESSURE_LEVEL_ARG_NAME = 'pressure_level_mb'
THERMAL_FIELD_ARG_NAME = 'thermal_field_name'
THERMAL_CMAP_ARG_NAME = 'thermal_colour_map_name'
MAX_PERCENTILE_ARG_NAME = 'max_thermal_prctile_for_colours'
FIRST_LETTER_ARG_NAME = 'first_letter_label'
LETTER_INTERVAL_ARG_NAME = 'letter_interval'
OUTPUT_DIR_ARG_NAME = 'output_dir_name'
NARR_DIR_HELP_STRING = (
'Name of top-level directory with NARR files (containing predictors). '
'Files therein will be found by `processed_narr_io.find_file_for_one_time` '
'and read by `processed_narr_io.read_fields_from_file`.')
FRONT_DIR_HELP_STRING = (
'Name of top-level directory with fronts (represented as polylines). Files'
' therein will be found by `fronts_io.find_file_for_one_time` and read by '
'`fronts_io.read_polylines_from_file`.')
BULLETIN_DIR_HELP_STRING = (
'Name of top-level directory with WPC bulletins. Files therein will be '
'found by `wpc_bulletin_io.find_file` and read by '
'`wpc_bulletin_io.read_highs_and_lows`. If you do not want to plot high- '
'and low-pressure centers, leave this argument alone.')
TIME_HELP_STRING = (
'Time (format "yyyymmddHH"). Predictors will be plotted for all NARR times'
' in the period `{0:s}`...`{1:s}`.'
).format(FIRST_TIME_ARG_NAME, LAST_TIME_ARG_NAME)
PRESSURE_LEVEL_HELP_STRING = 'Pressure level (millibars) for NARR predictors.'
THERMAL_FIELD_HELP_STRING = (
'Name of thermal field (to be plotted with fronts and wind barbs). Valid '
'options are listed below.\n{0:s}'
).format(str(VALID_THERMAL_FIELD_NAMES))
THERMAL_CMAP_HELP_STRING = (
'Name of colour map for thermal field. For example, if name is "YlGn", the'
' colour map used will be `pyplot.cm.YlGn`. This argument supports only '
'pyplot colour maps.')
MAX_PERCENTILE_HELP_STRING = (
'Determines min/max values in colour scheme for thermal field. Max value '
'at time t will be [q]th percentile of thermal field at time t, where '
'q = `{0:s}`. Minimum value will be [100 - q]th percentile.'
).format(MAX_PERCENTILE_ARG_NAME)
FIRST_LETTER_HELP_STRING = (
'Letter label for first time step. If this is "a", the label "(a)" will be'
' printed at the top left of the figure. If you do not want labels, leave '
'this argument alone.')
LETTER_INTERVAL_HELP_STRING = (
'Interval between letter labels for successive time steps.')
OUTPUT_DIR_HELP_STRING = (
'Name of output directory. Figures will be saved here.')
TOP_NARR_DIR_NAME_DEFAULT = '/condo/swatwork/ralager/narr_data/processed'
TOP_FRONT_DIR_NAME_DEFAULT = '/condo/swatwork/ralager/fronts/polylines'
# TOP_BULLETIN_DIR_NAME_DEFAULT = '/condo/swatwork/ralager/wpc_bulletins/hires'
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + NARR_DIR_ARG_NAME, type=str, required=False,
default=TOP_NARR_DIR_NAME_DEFAULT, help=NARR_DIR_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + FRONT_DIR_ARG_NAME, type=str, required=False,
default=TOP_FRONT_DIR_NAME_DEFAULT, help=FRONT_DIR_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + BULLETIN_DIR_ARG_NAME, type=str, required=False, default='',
help=BULLETIN_DIR_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + FIRST_TIME_ARG_NAME, type=str, required=True, help=TIME_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + LAST_TIME_ARG_NAME, type=str, required=True, help=TIME_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + PRESSURE_LEVEL_ARG_NAME, type=int, required=False, default=1000,
help=PRESSURE_LEVEL_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + THERMAL_FIELD_ARG_NAME, type=str, required=False,
default=processed_narr_io.WET_BULB_THETA_NAME,
help=THERMAL_FIELD_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + THERMAL_CMAP_ARG_NAME, type=str, required=False, default='YlOrRd',
help=THERMAL_CMAP_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + MAX_PERCENTILE_ARG_NAME, type=float, required=False, default=99.,
help=MAX_PERCENTILE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + FIRST_LETTER_ARG_NAME, type=str, required=False, default='',
help=FIRST_LETTER_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + LETTER_INTERVAL_ARG_NAME, type=int, required=False, default=3,
help=LETTER_INTERVAL_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + OUTPUT_DIR_ARG_NAME, type=str, required=True,
help=OUTPUT_DIR_HELP_STRING)
def _plot_one_time(
predictor_matrix, predictor_names, front_polyline_table, high_low_table,
thermal_colour_map_object, max_thermal_prctile_for_colours,
narr_row_limits, narr_column_limits, title_string, letter_label,
output_file_name):
"""Plots predictors at one time.
M = number of rows in grid
N = number of columns in grid
C = number of channels (predictors)
:param predictor_matrix: M-by-N-by-C numpy array of predictor values.
:param predictor_names: length-C list of predictor names.
:param front_polyline_table: pandas DataFrame returned by
`fronts_io.read_polylines_from_file`.
:param high_low_table: pandas DataFrame returned by
`wpc_bulletin_io.read_highs_and_lows`.
:param thermal_colour_map_object: See documentation at top of file.
:param max_thermal_prctile_for_colours: Same.
:param narr_row_limits: length-2 numpy array, indicating the first and last
NARR rows in `predictor_matrix`. If narr_row_limits = [i, k],
`predictor_matrix` spans rows i...k of the full NARR grid.
:param narr_column_limits: Same but for columns.
:param title_string: Title (will be placed above figure).
:param letter_label: Letter label. If this is "a", the label "(a)" will be
printed at the top left of the figure.
:param output_file_name: Path to output file (figure will be saved here).
"""
_, axes_object, basemap_object = nwp_plotting.init_basemap(
model_name=nwp_model_utils.NARR_MODEL_NAME,
first_row_in_full_grid=narr_row_limits[0],
last_row_in_full_grid=narr_row_limits[1],
first_column_in_full_grid=narr_column_limits[0],
last_column_in_full_grid=narr_column_limits[1]
)
plotting_utils.plot_coastlines(
basemap_object=basemap_object, axes_object=axes_object,
line_colour=BORDER_COLOUR
)
plotting_utils.plot_countries(
basemap_object=basemap_object, axes_object=axes_object,
line_colour=BORDER_COLOUR
)
plotting_utils.plot_states_and_provinces(
basemap_object=basemap_object, axes_object=axes_object,
line_colour=BORDER_COLOUR
)
plotting_utils.plot_parallels(
basemap_object=basemap_object, axes_object=axes_object,
bottom_left_lat_deg=-90., upper_right_lat_deg=90.,
parallel_spacing_deg=PARALLEL_SPACING_DEG
)
plotting_utils.plot_meridians(
basemap_object=basemap_object, axes_object=axes_object,
bottom_left_lng_deg=0., upper_right_lng_deg=360.,
meridian_spacing_deg=MERIDIAN_SPACING_DEG
)
num_predictors = len(predictor_names)
for j in range(num_predictors):
if predictor_names[j] in WIND_FIELD_NAMES:
continue
min_colour_value = numpy.percentile(
predictor_matrix[..., j], 100. - max_thermal_prctile_for_colours)
max_colour_value = numpy.percentile(
predictor_matrix[..., j], max_thermal_prctile_for_colours)
nwp_plotting.plot_subgrid(
field_matrix=predictor_matrix[..., j],
model_name=nwp_model_utils.NARR_MODEL_NAME, axes_object=axes_object,
basemap_object=basemap_object, colour_map=thermal_colour_map_object,
min_value_in_colour_map=min_colour_value,
max_value_in_colour_map=max_colour_value,
first_row_in_full_grid=narr_row_limits[0],
first_column_in_full_grid=narr_column_limits[0]
)
plotting_utils.add_linear_colour_bar(
axes_object_or_list=axes_object,
values_to_colour=predictor_matrix[..., j],
colour_map=thermal_colour_map_object, colour_min=min_colour_value,
colour_max=max_colour_value, orientation='horizontal',
extend_min=True, extend_max=True, fraction_of_axis_length=0.9)
u_wind_index = predictor_names.index(
processed_narr_io.U_WIND_GRID_RELATIVE_NAME)
v_wind_index = predictor_names.index(
processed_narr_io.V_WIND_GRID_RELATIVE_NAME)
nwp_plotting.plot_wind_barbs_on_subgrid(
u_wind_matrix_m_s01=predictor_matrix[..., u_wind_index],
v_wind_matrix_m_s01=predictor_matrix[..., v_wind_index],
model_name=nwp_model_utils.NARR_MODEL_NAME,
axes_object=axes_object, basemap_object=basemap_object,
first_row_in_full_grid=narr_row_limits[0],
first_column_in_full_grid=narr_column_limits[0],
plot_every_k_rows=PLOT_EVERY_KTH_WIND_BARB,
plot_every_k_columns=PLOT_EVERY_KTH_WIND_BARB,
barb_length=WIND_BARB_LENGTH, empty_barb_radius=EMPTY_WIND_BARB_RADIUS,
fill_empty_barb=False, colour_map=WIND_COLOUR_MAP_OBJECT,
colour_minimum_kt=MIN_COLOUR_WIND_SPEED_KT,
colour_maximum_kt=MAX_COLOUR_WIND_SPEED_KT)
if high_low_table is None:
num_pressure_systems = 0
else:
num_pressure_systems = len(high_low_table.index)
for i in range(num_pressure_systems):
this_system_type_string = high_low_table[
wpc_bulletin_io.SYSTEM_TYPE_COLUMN].values[i]
if this_system_type_string == wpc_bulletin_io.HIGH_PRESSURE_STRING:
this_string = 'H'
else:
this_string = 'L'
this_x_coord_metres, this_y_coord_metres = basemap_object(
high_low_table[wpc_bulletin_io.LONGITUDE_COLUMN].values[i],
high_low_table[wpc_bulletin_io.LATITUDE_COLUMN].values[i]
)
axes_object.text(
this_x_coord_metres, this_y_coord_metres, this_string,
fontsize=PRESSURE_SYSTEM_FONT_SIZE, color=PRESSURE_SYSTEM_COLOUR,
fontweight='bold', horizontalalignment='center',
verticalalignment='center')
num_fronts = len(front_polyline_table.index)
for i in range(num_fronts):
this_front_type_string = front_polyline_table[
front_utils.FRONT_TYPE_COLUMN].values[i]
if this_front_type_string == front_utils.WARM_FRONT_STRING_ID:
this_colour = WARM_FRONT_COLOUR
else:
this_colour = COLD_FRONT_COLOUR
front_plotting.plot_front_with_markers(
line_latitudes_deg=front_polyline_table[
front_utils.LATITUDES_COLUMN].values[i],
line_longitudes_deg=front_polyline_table[
front_utils.LONGITUDES_COLUMN].values[i],
axes_object=axes_object, basemap_object=basemap_object,
front_type_string=front_polyline_table[
front_utils.FRONT_TYPE_COLUMN].values[i],
marker_colour=this_colour)
pyplot.title(title_string)
if letter_label is not None:
plotting_utils.annotate_axes(
axes_object=axes_object,
annotation_string='({0:s})'.format(letter_label)
)
print 'Saving figure to: "{0:s}"...'.format(output_file_name)
pyplot.savefig(output_file_name, dpi=FIGURE_RESOLUTION_DPI)
pyplot.close()
imagemagick_utils.trim_whitespace(input_file_name=output_file_name,
output_file_name=output_file_name)
def _run(top_narr_dir_name, top_front_line_dir_name, top_wpc_bulletin_dir_name,
first_time_string, last_time_string, pressure_level_mb,
thermal_field_name, thermal_colour_map_name,
max_thermal_prctile_for_colours, first_letter_label, letter_interval,
output_dir_name):
"""Plots predictors on full NARR grid.
This is effectively the main method.
:param top_narr_dir_name: See documentation at top of file.
:param top_front_line_dir_name: Same.
:param top_wpc_bulletin_dir_name: Same.
:param first_time_string: Same.
:param last_time_string: Same.
:param pressure_level_mb: Same.
:param thermal_field_name: Same.
:param thermal_colour_map_name: Same.
:param max_thermal_prctile_for_colours: Same.
:param first_letter_label: Same.
:param letter_interval: Same.
:param output_dir_name: Same.
:raises: ValueError: if
`thermal_field_name not in VALID_THERMAL_FIELD_NAMES`.
"""
# Check input args.
if top_wpc_bulletin_dir_name in ['', 'None']:
top_wpc_bulletin_dir_name = None
if first_letter_label in ['', 'None']:
first_letter_label = None
if thermal_field_name not in VALID_THERMAL_FIELD_NAMES:
error_string = (
'\n{0:s}\nValid thermal fields (listed above) do not include '
'"{1:s}".'
).format(str(VALID_THERMAL_FIELD_NAMES), thermal_field_name)
raise ValueError(error_string)
thermal_colour_map_object = pyplot.cm.get_cmap(thermal_colour_map_name)
file_system_utils.mkdir_recursive_if_necessary(
directory_name=output_dir_name)
first_time_unix_sec = time_conversion.string_to_unix_sec(
first_time_string, DEFAULT_TIME_FORMAT)
last_time_unix_sec = time_conversion.string_to_unix_sec(
last_time_string, DEFAULT_TIME_FORMAT)
valid_times_unix_sec = time_periods.range_and_interval_to_list(
start_time_unix_sec=first_time_unix_sec,
end_time_unix_sec=last_time_unix_sec,
time_interval_sec=NARR_TIME_INTERVAL_SEC, include_endpoint=True)
# Read metadata for NARR grid.
narr_latitude_matrix_deg, narr_longitude_matrix_deg = (
nwp_model_utils.get_latlng_grid_point_matrices(
model_name=nwp_model_utils.NARR_MODEL_NAME)
)
narr_rotation_cos_matrix, narr_rotation_sin_matrix = (
nwp_model_utils.get_wind_rotation_angles(
latitudes_deg=narr_latitude_matrix_deg,
longitudes_deg=narr_longitude_matrix_deg,
model_name=nwp_model_utils.NARR_MODEL_NAME)
)
narr_row_limits, narr_column_limits = (
nwp_plotting.latlng_limits_to_rowcol_limits(
min_latitude_deg=MIN_LATITUDE_DEG,
max_latitude_deg=MAX_LATITUDE_DEG,
min_longitude_deg=MIN_LONGITUDE_DEG,
max_longitude_deg=MAX_LONGITUDE_DEG,
model_name=nwp_model_utils.NARR_MODEL_NAME)
)
narr_rotation_cos_matrix = narr_rotation_cos_matrix[
narr_row_limits[0]:(narr_row_limits[1] + 1),
narr_column_limits[0]:(narr_column_limits[1] + 1)
]
narr_rotation_sin_matrix = narr_rotation_sin_matrix[
narr_row_limits[0]:(narr_row_limits[1] + 1),
narr_column_limits[0]:(narr_column_limits[1] + 1)
]
# Do plotting.
narr_field_names = [
processed_narr_io.U_WIND_GRID_RELATIVE_NAME,
processed_narr_io.V_WIND_GRID_RELATIVE_NAME,
thermal_field_name
]
this_letter_label = None
for this_time_unix_sec in valid_times_unix_sec:
this_file_name = fronts_io.find_file_for_one_time(
top_directory_name=top_front_line_dir_name,
file_type=fronts_io.POLYLINE_FILE_TYPE,
valid_time_unix_sec=this_time_unix_sec)
print 'Reading data from: "{0:s}"...'.format(this_file_name)
this_polyline_table = fronts_io.read_polylines_from_file(this_file_name)
if top_wpc_bulletin_dir_name is None:
this_high_low_table = None
else:
this_file_name = wpc_bulletin_io.find_file(
top_directory_name=top_wpc_bulletin_dir_name,
valid_time_unix_sec=this_time_unix_sec)
print 'Reading data from: "{0:s}"...'.format(this_file_name)
this_high_low_table = wpc_bulletin_io.read_highs_and_lows(
this_file_name)
this_predictor_matrix = None
for this_field_name in narr_field_names:
this_file_name = processed_narr_io.find_file_for_one_time(
top_directory_name=top_narr_dir_name,
field_name=this_field_name,
pressure_level_mb=pressure_level_mb,
valid_time_unix_sec=this_time_unix_sec)
print 'Reading data from: "{0:s}"...'.format(this_file_name)
this_field_matrix = processed_narr_io.read_fields_from_file(
this_file_name
)[0][0, ...]
this_field_matrix = utils.fill_nans(this_field_matrix)
this_field_matrix = this_field_matrix[
narr_row_limits[0]:(narr_row_limits[1] + 1),
narr_column_limits[0]:(narr_column_limits[1] + 1)
]
if this_field_name in [processed_narr_io.TEMPERATURE_NAME,
processed_narr_io.WET_BULB_THETA_NAME]:
this_field_matrix -= ZERO_CELSIUS_IN_KELVINS
if this_field_name == processed_narr_io.SPECIFIC_HUMIDITY_NAME:
this_field_matrix = this_field_matrix * KG_TO_GRAMS
this_field_matrix = numpy.expand_dims(this_field_matrix, axis=-1)
if this_predictor_matrix is None:
this_predictor_matrix = this_field_matrix + 0.
else:
this_predictor_matrix = numpy.concatenate(
(this_predictor_matrix, this_field_matrix), axis=-1)
u_wind_index = narr_field_names.index(
processed_narr_io.U_WIND_GRID_RELATIVE_NAME)
v_wind_index = narr_field_names.index(
processed_narr_io.V_WIND_GRID_RELATIVE_NAME)
(this_predictor_matrix[..., u_wind_index],
this_predictor_matrix[..., v_wind_index]
) = nwp_model_utils.rotate_winds_to_earth_relative(
u_winds_grid_relative_m_s01=this_predictor_matrix[
..., u_wind_index],
v_winds_grid_relative_m_s01=this_predictor_matrix[
..., v_wind_index],
rotation_angle_cosines=narr_rotation_cos_matrix,
rotation_angle_sines=narr_rotation_sin_matrix)
this_title_string = time_conversion.unix_sec_to_string(
this_time_unix_sec, NICE_TIME_FORMAT)
if pressure_level_mb == 1013:
this_title_string += ' at surface'
else:
this_title_string += ' at {0:d} mb'.format(pressure_level_mb)
this_default_time_string = time_conversion.unix_sec_to_string(
this_time_unix_sec, DEFAULT_TIME_FORMAT)
this_output_file_name = '{0:s}/predictors_{1:s}.jpg'.format(
output_dir_name, this_default_time_string)
if first_letter_label is not None:
if this_letter_label is None:
this_letter_label = first_letter_label
else:
this_letter_label = chr(
ord(this_letter_label) + letter_interval
)
_plot_one_time(
predictor_matrix=this_predictor_matrix,
predictor_names=narr_field_names,
front_polyline_table=this_polyline_table,
high_low_table=this_high_low_table,
thermal_colour_map_object=thermal_colour_map_object,
max_thermal_prctile_for_colours=max_thermal_prctile_for_colours,
narr_row_limits=narr_row_limits,
narr_column_limits=narr_column_limits,
title_string=this_title_string, letter_label=this_letter_label,
output_file_name=this_output_file_name)
print '\n'
if __name__ | |
<reponame>JinGyeSetBirdsFree/FudanOCR
import os
import cv2
import numpy as np
from math import ceil
import matplotlib.pyplot as plt
from scipy import signal, misc
class BlurImage(object):
def blur_image_path(self, img_path, PSFs=None, part=None, path_to_save=None, show=False):
"""
:param image_path: path to square, RGB image.
:param PSFs: array of Kernels.
:param part: int number of kernel to use.
:param path__to_save: folder to save results.
"""
if os.path.isfile(img_path):
original = misc.imread(image_path)
result = self.blur_image(self, img, PSFs, part, path_to_save, show)
else:
raise Exception('Not correct path to image.')
def blur_image(self, img, PSFs=None, part=None, path_to_save=None, show=False):
"""
:param img: square, RGB image.
:param PSFs: array of Kernels.
:param part: int number of kernel to use.
:param path_to_save: folder to save results.
"""
img_shape = img.shape
if len(img_shape) < 3:
# raise Exception('We support only RGB images yet.')
print('We support only RGB images yet.')
return None
elif img_shape[0] != img_shape[1]:
# raise Exception('We support only square images yet.')
print('We support only square images yet.')
return None
if PSFs is None:
if path_to_save is None:
PSFs = PSF(canvas=img_shape[0]).fit()
else:
PSFs = PSF(canvas=img_shape[0], path_to_save=os.path.join(path_to_save,
'PSFs.png')).fit(save=True)
if part is None:
psf = PSFs
else:
psf = [PSFs[part]]
yN, xN, channel = img_shape
key, kex = PSFs[0].shape
delta = yN - key
if delta < 0:
print('resolution of image should be higher than kernel')
return None
# assert delta >= 0, 'resolution of image should be higher than kernel'
result = []
if len(psf) > 1:
for p in psf:
tmp = np.pad(p, delta // 2, 'constant')
cv2.normalize(tmp, tmp, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
# blured = np.zeros(img_shape)
blured = cv2.normalize(img, img, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_32F)
blured[:, :, 0] = np.array(signal.fftconvolve(blured[:, :, 0], tmp, 'same'))
blured[:, :, 1] = np.array(signal.fftconvolve(blured[:, :, 1], tmp, 'same'))
blured[:, :, 2] = np.array(signal.fftconvolve(blured[:, :, 2], tmp, 'same'))
blured = cv2.normalize(blured, blured, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
blured = cv2.cvtColor(blured, cv2.COLOR_RGB2BGR)
result.append(np.abs(blured))
else:
psf = psf[0]
tmp = np.pad(psf, delta // 2, 'constant')
cv2.normalize(tmp, tmp, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
blured = cv2.normalize(img, img, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_32F)
blured[:, :, 0] = np.array(signal.fftconvolve(blured[:, :, 0], tmp, 'same'))
blured[:, :, 1] = np.array(signal.fftconvolve(blured[:, :, 1], tmp, 'same'))
blured[:, :, 2] = np.array(signal.fftconvolve(blured[:, :, 2], tmp, 'same'))
blured = cv2.normalize(blured, blured, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
blured = cv2.cvtColor(blured, cv2.COLOR_RGB2BGR)
result.append(np.abs(blured))
if show:
self.__plot_canvas(result)
return result
def __plot_canvas(self, result):
if len(result) == 0:
raise Exception('Please run blur_image() method first.')
else:
plt.close()
plt.axis('off')
fig, axes = plt.subplots(1, len(result), figsize=(10, 10))
if len(result) > 1:
for i in range(len(result)):
axes[i].imshow(result[i])
else:
plt.axis('off')
plt.imshow(result[0])
plt.show()
class Trajectory(object):
def __init__(self, canvas=64, iters=2000, max_len=60, expl=None, path_to_save=None):
"""
Generates a variety of random motion trajectories in continuous domain as in [Boracchi and Foi 2012]. Each
trajectory consists of a complex-valued vector determining the discrete positions of a particle following a
2-D random motion in continuous domain. The particle has an initial velocity vector which, at each iteration,
is affected by a Gaussian perturbation and by a deterministic inertial component, directed toward the
previous particle position. In addition, with a small probability, an impulsive (abrupt) perturbation aiming
at inverting the particle velocity may arises, mimicking a sudden movement that occurs when the user presses
the camera button or tries to compensate the camera shake. At each step, the velocity is normalized to
guarantee that trajectories corresponding to equal exposures have the same length. Each perturbation (
Gaussian, inertial, and impulsive) is ruled by its own parameter. Rectilinear Blur as in [Boracchi and Foi
2011] can be obtained by setting anxiety to 0 (when no impulsive changes occurs
:param canvas: size of domain where our trajectory os defined.
:param iters: number of iterations for definition of our trajectory.
:param max_len: maximum length of our trajectory.
:param expl: this param helps to define probability of big shake. Recommended expl = 0.005.
:param path_to_save: where to save if you need.
"""
self.canvas = canvas
self.iters = iters
self.max_len = max_len
if expl is None:
self.expl = 0.1 * np.random.uniform(0, 1)
else:
self.expl = expl
if path_to_save is None:
pass
else:
self.path_to_save = path_to_save
self.tot_length = None
self.big_expl_count = None
self.x = None
def fit(self, show=False, save=False):
"""
Generate motion, you can save or plot, coordinates of motion you can find in x property.
Also you can fin properties tot_length, big_expl_count.
:param show: default False.
:param save: default False.
:return: x (vector of motion).
"""
tot_length = 0
big_expl_count = 0
# how to be near the previous position
# TODO: I can change this paramether for 0.1 and make kernel at all image
centripetal = 0.7 * np.random.uniform(0, 1)
# probability of big shake
prob_big_shake = 0.2 * np.random.uniform(0, 1)
# term determining, at each sample, the random component of the new direction
gaussian_shake = 10 * np.random.uniform(0, 1)
init_angle = 360 * np.random.uniform(0, 1)
img_v0 = np.sin(np.deg2rad(init_angle))
real_v0 = np.cos(np.deg2rad(init_angle))
v0 = complex(real=real_v0, imag=img_v0)
v = v0 * self.max_len / (self.iters - 1)
if self.expl > 0:
v = v0 * self.expl
x = np.array([complex(real=0, imag=0)] * (self.iters))
for t in range(0, self.iters - 1):
if np.random.uniform() < prob_big_shake * self.expl:
next_direction = 2 * v * (np.exp(complex(real=0, imag=np.pi + (np.random.uniform() - 0.5))))
big_expl_count += 1
else:
next_direction = 0
dv = next_direction + self.expl * (
gaussian_shake * complex(real=np.random.randn(), imag=np.random.randn()) - centripetal * x[t]) * (
self.max_len / (self.iters - 1))
v += dv
v = (v / float(np.abs(v))) * (self.max_len / float((self.iters - 1)))
x[t + 1] = x[t] + v
tot_length = tot_length + abs(x[t + 1] - x[t])
# centere the motion
x += complex(real=-np.min(x.real), imag=-np.min(x.imag))
x = x - complex(real=x[0].real % 1., imag=x[0].imag % 1.) + complex(1, 1)
x += complex(real=ceil((self.canvas - max(x.real)) / 2), imag=ceil((self.canvas - max(x.imag)) / 2))
self.tot_length = tot_length
self.big_expl_count = big_expl_count
self.x = x
if show or save:
self.__plot_canvas(show, save)
return self
def __plot_canvas(self, show, save):
if self.x is None:
raise Exception("Please run fit() method first")
else:
plt.close()
plt.plot(self.x.real, self.x.imag, '-', color='blue')
plt.xlim((0, self.canvas))
plt.ylim((0, self.canvas))
if show and save:
plt.savefig(self.path_to_save)
plt.show()
elif save:
if self.path_to_save is None:
raise Exception('Please create Trajectory instance with path_to_save')
plt.savefig(self.path_to_save)
elif show:
plt.show()
class PSF(object):
def __init__(self, canvas=None, trajectory=None, fraction=None, path_to_save=None):
if canvas is None:
self.canvas = (canvas, canvas)
else:
self.canvas = (canvas, canvas)
if trajectory is None:
self.trajectory = Trajectory(canvas=canvas, expl=0.005).fit(show=False, save=False)
else:
self.trajectory = trajectory.x
if fraction is None:
self.fraction = [1/100, 1/10, 1/2, 1]
else:
self.fraction = fraction
self.path_to_save = path_to_save
self.PSFnumber = len(self.fraction)
self.iters = len(self.trajectory)
self.PSFs = []
def fit(self, show=False, save=False):
PSF = np.zeros(self.canvas)
triangle_fun = lambda x: np.maximum(0, (1 - np.abs(x)))
triangle_fun_prod = lambda x, y: np.multiply(triangle_fun(x), triangle_fun(y))
for j in range(self.PSFnumber):
if j == 0:
prevT = 0
else:
prevT = self.fraction[j - 1]
for t in range(len(self.trajectory)):
# print(j, t)
if (self.fraction[j] * self.iters >= t) and (prevT * self.iters < t - 1):
t_proportion = 1
elif (self.fraction[j] * self.iters >= t - 1) and (prevT * self.iters < t - 1):
t_proportion = self.fraction[j] * self.iters - (t - 1)
elif (self.fraction[j] * self.iters >= t) and (prevT * self.iters < t):
t_proportion = t - (prevT * self.iters)
elif (self.fraction[j] * self.iters >= t - 1) and (prevT * self.iters < t):
t_proportion = (self.fraction[j] - prevT) * self.iters
else:
t_proportion = 0
m2 = int(np.minimum(self.canvas[1] - 1, np.maximum(1, np.math.floor(self.trajectory[t].real))))
M2 = int(m2 + 1)
m1 = int(np.minimum(self.canvas[0] - 1, np.maximum(1, np.math.floor(self.trajectory[t].imag))))
M1 = int(m1 + 1)
PSF[m1, m2] += t_proportion * triangle_fun_prod(
self.trajectory[t].real - m2, self.trajectory[t].imag - m1
)
PSF[m1, M2] += t_proportion * triangle_fun_prod(
self.trajectory[t].real - M2, self.trajectory[t].imag - m1
)
PSF[M1, m2] += t_proportion * triangle_fun_prod(
self.trajectory[t].real - m2, self.trajectory[t].imag - M1
)
PSF[M1, M2] += t_proportion * triangle_fun_prod(
self.trajectory[t].real - M2, self.trajectory[t].imag - M1
)
self.PSFs.append(PSF / (self.iters))
if show or save:
self.__plot_canvas(show, save)
return self.PSFs
def __plot_canvas(self, show, save):
if len(self.PSFs) == 0:
raise Exception("Please run fit() method first.")
else:
| |
profile.get('cache-insert-age-header', 'disabled')
if age_header == 'enabled':
cache_config['age_header'] = True
else:
cache_config['age_header'] = False
cache_config['enabled'] = True
cache_config['default_expire'] = \
profile.get('cache-max-age', final.DEFAULT_CACHE_MAX_AGE)
max_entities = profile.get('cache-max-entries', 0)
cache_config['max_cache_size'] = \
(int(max_entities) * int(cache_config['max_object_size']))
exclude_uri = profile.get("cache-uri-exclude", None)
include_uri = profile.get("cache-uri-include", None)
if exclude_uri and isinstance(exclude_uri, dict):
exclude_uri = exclude_uri.keys() + exclude_uri.values()
if None in exclude_uri:
exclude_uri.remove(None)
cache_config['mime_types_black_list'] = exclude_uri
if include_uri and isinstance(include_uri, dict):
include_uri = include_uri.keys() + include_uri.values()
if None in include_uri:
include_uri.remove(None)
cache_config['mime_types_list'] = include_uri
http_profile = dict()
http_profile["cache_config"] = cache_config
app_profile["http_profile"] = http_profile
# code to merge application profile count.
if self.object_merge_check:
conv_utils.update_skip_duplicates(
app_profile, avi_config['ApplicationProfile'],
'app_profile', converted_objs, name, default_profile_name,
merge_object_mapping, profile_type, self.prefix,
sys_dict['ApplicationProfile'])
self.app_count += 1
else:
converted_objs.append({'app_profile': app_profile})
avi_config['ApplicationProfile'].append(app_profile)
elif profile_type == 'http-compression':
supported_attr = self.supported_hc
u_ignore = user_ignore.get('http-compression', [])
na_list = self.na_hc
indirect = self.indirect_hc
skipped = [attr for attr in profile.keys()
if attr not in supported_attr]
app_profile = dict()
app_profile['name'] = name
app_profile['tenant_ref'] = conv_utils.get_object_ref(
tenant, 'tenant')
app_profile['type'] = 'APPLICATION_PROFILE_TYPE_HTTP'
app_profile['description'] = profile.get('description', None)
compression_profile = dict()
compression_profile["type"] = "AUTO_COMPRESSION"
compression_profile["compression"] = True
encoding = profile.get("keep-accept-encoding", "disable")
if encoding == "disable":
encoding = True
else:
encoding = False
compression_profile["remove_accept_encoding_header"] = encoding
content_type = profile.get("content-type-include", "")
ct_exclude = profile.get("content-type-exclude", "")
ct_exclude = None if ct_exclude == 'none' else ct_exclude
http_profile = dict()
if content_type:
content_type = content_type.keys()+content_type.values()
elif ct_exclude:
content_type = final.DEFAULT_CONTENT_TYPE
if ct_exclude:
ct_exclude = ct_exclude.keys() + ct_exclude.values()
content_type = [ct for ct in content_type
if ct not in ct_exclude]
if content_type:
sg_obj = conv_utils.get_content_string_group(
name, content_type, tenant_ref)
avi_config['StringGroup'].append(sg_obj)
converted_objs.append({'string_group': sg_obj})
cc_ref = name + "-content_type"
cc_ref = conv_utils.get_object_ref(
cc_ref, 'stringgroup', tenant=tenant)
compression_profile["compressible_content_ref"] = cc_ref
http_profile["compression_profile"] = compression_profile
app_profile["http_profile"] = http_profile
# code to merge application profile count.
if self.object_merge_check:
conv_utils.update_skip_duplicates(
app_profile, avi_config['ApplicationProfile'],
'app_profile', converted_objs, name, default_profile_name,
merge_object_mapping, profile_type, self.prefix,
sys_dict['ApplicationProfile'])
self.app_count +=1
else:
converted_objs.append({'app_profile': app_profile})
avi_config['ApplicationProfile'].append(app_profile)
elif profile_type == 'fastl4':
supported_attr = self.supported_l4
indirect = self.indirect_l4
na_list = self.na_l4
u_ignore = user_ignore.get('fastl4', [])
skipped = [attr for attr in profile.keys()
if attr not in supported_attr]
hw_syn_protection = (profile.get("hardware-syn-cookie",
None) == 'enabled')
sw_syn_protection = (profile.get("software-syn-cookie",
None) == 'enabled')
syn_protection = (hw_syn_protection or sw_syn_protection)
timeout = profile.get("idle-timeout", final.MIN_SESSION_TIMEOUT)
if timeout < 60:
timeout = final.MIN_SESSION_TIMEOUT
LOG.warn("idle-timeout for profile: %s is less" % name +
" than minimum, changed to Avis minimum value")
elif timeout > final.MAX_SESSION_TIMEOUT:
timeout = final.MAX_SESSION_TIMEOUT
LOG.warn("idle-timeout for profile: %s is grater" % name +
" than maximum, changed to Avis maximum value")
description = profile.get('description', None)
ntwk_profile = {
"profile": {
"tcp_fast_path_profile": {
"session_idle_timeout": timeout,
"enable_syn_protection": syn_protection
},
"type": "PROTOCOL_TYPE_TCP_FAST_PATH"
},
"name": name,
'tenant_ref': conv_utils.get_object_ref(tenant, 'tenant'),
"description": description
}
app_profile = dict()
app_profile['name'] = name
app_profile['type'] = 'APPLICATION_PROFILE_TYPE_L4'
app_profile['description'] = description
app_profile['tenant_ref'] = conv_utils.get_object_ref(
tenant, 'tenant')
explicit_tracking = profile.get("explicit-flow-migration", None)
l4_profile = {"rl_profile": {
"client_ip_connections_rate_limit": {
"explicit_tracking": (explicit_tracking == 'enabled'),
"action": {
"type": "RL_ACTION_NONE"
}
}}
}
app_profile['dos_rl_profile'] = l4_profile
# code to merge application profile count.
if self.object_merge_check:
conv_utils.update_skip_duplicates(
app_profile, avi_config['ApplicationProfile'],
'app_profile', converted_objs, name, default_profile_name,
merge_object_mapping, profile_type, self.prefix,
sys_dict['ApplicationProfile'])
self.app_count +=1
else:
converted_objs.append({'app_profile': app_profile})
avi_config['ApplicationProfile'].append(app_profile)
# code to get merge count of network profile.
if self.object_merge_check:
conv_utils.update_skip_duplicates(
ntwk_profile, avi_config['NetworkProfile'],
'network_profile', converted_objs, name,
default_profile_name, merge_object_mapping, profile_type,
self.prefix, sys_dict['NetworkProfile'])
self.net_count +=1
else:
converted_objs.append({'network_profile': ntwk_profile})
avi_config['NetworkProfile'].append(ntwk_profile)
elif profile_type == 'fasthttp':
supported_attr = self.supported_fh
indirect = self.indirect_fh
na_list = self.na_fh
u_ignore = user_ignore.get('fasthttp', [])
skipped = [attr for attr in f5_config['profile'][key].keys()
if attr not in supported_attr]
app_profile = dict()
app_profile['name'] = name
app_profile['type'] = 'APPLICATION_PROFILE_TYPE_HTTP'
app_profile['description'] = profile.get('description', None)
http_profile = dict()
insert_xff = profile.get('insert-xforwarded-for', 'disabled')
insert_xff = True if insert_xff == 'enabled' else False
http_profile['x_forwarded_proto_enabled'] = insert_xff
http_profile['xff_enabled'] = insert_xff
header_size = profile.get('max-header-size',
final.DEFAULT_MAX_HEADER)
http_profile['client_max_header_size'] = \
int(header_size)/final.BYTES_IN_KB
app_profile["http_profile"] = http_profile
# code to merge application profile count.
if self.object_merge_check:
conv_utils.update_skip_duplicates(
app_profile, avi_config['ApplicationProfile'],
'app_profile', converted_objs, name, default_profile_name,
merge_object_mapping, profile_type, self.prefix,
sys_dict['ApplicationProfile'])
self.app_count +=1
else:
converted_objs.append({'app_profile': app_profile})
avi_config['ApplicationProfile'].append(app_profile)
receive_window = profile.get("receive-window-size",
final.DEFAULT_RECV_WIN)
if not (final.MIN_RECV_WIN <= int(receive_window) <=
final.MAX_RECV_WIN):
receive_window = final.DEFAULT_RECV_WIN
timeout = profile.get("idle-timeout", 0)
ntwk_profile = {
"profile": {
"tcp_proxy_profile": {
"receive_window": receive_window,
"idle_connection_timeout": timeout,
'automatic': False
},
"type": "PROTOCOL_TYPE_TCP_PROXY"
},
"name": name
}
app_profile['tenant_ref'] = conv_utils.get_object_ref(
tenant, 'tenant')
ntwk_profile['tenant_ref'] = conv_utils.get_object_ref(
tenant, 'tenant')
# code to get merge count of network profile.
if self.object_merge_check:
conv_utils.update_skip_duplicates(
ntwk_profile, avi_config['NetworkProfile'],
'network_profile', converted_objs, name,
default_profile_name, merge_object_mapping, profile_type,
self.prefix, sys_dict['NetworkProfile'])
self.net_count +=1
else:
converted_objs.append({'network_profile': ntwk_profile})
avi_config['NetworkProfile'].append(ntwk_profile)
elif profile_type == 'one-connect':
supported_attr = self.supported_oc
indirect = []
u_ignore = user_ignore.get('one-connect', [])
skipped = [attr for attr in profile.keys()
if attr not in supported_attr]
mask = profile.get('source-mask', 'any')
if not mask == 'any':
skipped.append('source-mask')
converted_objs = \
'Maps Indirectly to : HTTP Profile -> Connection Multiplex'
LOG.warn('one-connect profile %s will be mapped indirectly to HTTP '
'Profile -> Connection Multiplex of the same VS if '
'oneconnect-transformations is enabled' % name)
avi_config['OneConnect'].append(name)
elif profile_type == 'tcp':
supported_attr = self.supported_tcp
indirect = self.indirect_tcp
na_list = self.na_tcp
u_ignore = user_ignore.get('tcp', [])
skipped = [attr for attr in profile.keys()
if attr not in supported_attr]
timeout = profile.get("idle-timeout", 0)
nagle = profile.get("nagle", 'disabled')
nagle = False if nagle == 'disabled' else True
retrans = profile.get("max-retrans", final.MIN_SYN_RETRANS)
retrans = final.MIN_SYN_RETRANS if \
int(retrans) < final.MIN_SYN_RETRANS else retrans
retrans = final.MAX_SYN_RETRANS if \
int(retrans) > final.MAX_SYN_RETRANS else retrans
syn_retrans = profile.get("syn-max-retrans",
final.MIN_SYN_RETRANS)
syn_retrans = final.MIN_SYN_RETRANS if \
int(syn_retrans) < final.MIN_SYN_RETRANS else syn_retrans
syn_retrans = final.MAX_SYN_RETRANS if \
int(syn_retrans) > final.MAX_SYN_RETRANS else syn_retrans
conn_type = profile.get("time-wait-recycle", "disabled")
conn_type = "CLOSE_IDLE" if \
conn_type == "disabled" else "KEEP_ALIVE"
delay = profile.get("time-wait-timeout", 0)
window = profile.get("receive-window-size",
(final.MIN_RECV_WIN * final.BYTES_IN_KB))
window = int(int(window)/final.BYTES_IN_KB)
cc_algo = profile.get("congestion-control", "")
cc_algo = conv_utils.get_cc_algo_val(cc_algo)
ip_dscp = profile.get("ip-tos-to-client", None)
ntwk_profile = {
"profile": {
"tcp_proxy_profile": {
"idle_connection_timeout": timeout,
"nagles_algorithm": nagle,
"max_syn_retransmissions": syn_retrans,
"max_retransmissions": retrans,
"idle_connection_type": conn_type,
"time_wait_delay": delay,
"receive_window": window,
"cc_algo": cc_algo,
'automatic': False
},
"type": "PROTOCOL_TYPE_TCP_PROXY"
},
"name": name
}
if ip_dscp:
is_ip_dscp = True
if ip_dscp == 'pass-through':
ip_dscp = 2147483647
elif ip_dscp == 'mimic':
is_ip_dscp = False
skipped.append('ip-tos-to-client')
if is_ip_dscp:
ntwk_profile["profile"]["tcp_proxy_profile"]["ip_dscp"] = \
ip_dscp
ntwk_profile['tenant_ref'] = conv_utils.get_object_ref(
tenant, 'tenant')
# code to get merge count of network profile.
if self.object_merge_check:
conv_utils.update_skip_duplicates(
ntwk_profile, avi_config['NetworkProfile'],
'network_profile', converted_objs, name,
default_profile_name, merge_object_mapping, profile_type,
self.prefix, sys_dict['NetworkProfile'])
self.net_count +=1
else:
converted_objs.append({'network_profile': ntwk_profile})
avi_config['NetworkProfile'].append(ntwk_profile)
elif profile_type == 'udp':
supported_attr = self.supported_udp
indirect = self.indirect_udp
u_ignore = user_ignore.get('udp', [])
skipped = [attr for attr in profile.keys()
if attr not in supported_attr]
per_pkt = profile.get("datagram-load-balancing", 'disabled')
timeout = str(profile.get("idle-timeout", 0))
if not timeout.isdigit():
timeout = 0
ntwk_profile = {
"profile": {
"type": "PROTOCOL_TYPE_UDP_FAST_PATH",
"udp_fast_path_profile": {
"per_pkt_loadbalance": (per_pkt == 'enabled'),
"session_idle_timeout": timeout
}
},
"name": name
}
ntwk_profile['tenant_ref'] = conv_utils.get_object_ref(
tenant, 'tenant')
# code to get merge count of network profile.
if self.object_merge_check:
conv_utils.update_skip_duplicates(
ntwk_profile, avi_config['NetworkProfile'],
'network_profile', converted_objs, name,
default_profile_name, merge_object_mapping, profile_type,
self.prefix, sys_dict['NetworkProfile'])
self.net_count +=1
else:
converted_objs.append({'network_profile': ntwk_profile})
avi_config['NetworkProfile'].append(ntwk_profile)
conv_status = conv_utils.get_conv_status(
skipped, indirect, default_ignore, profile, u_ignore, na_list)
conv_utils.add_conv_status('profile', profile_type, name, conv_status,
converted_objs)
class ProfileConfigConvV10(ProfileConfigConv):
def __init__(self, f5_profile_attributes, object_merge_check, prefix,
keypassphrase):
"""
:param f5_profile_attributes: f5 profile attributes from yaml file.
:param object_merge_check: flag for merging objects
:param prefix: prefix for objects
:param keypassphrase: keypassphrase yaml file location
"""
self.supported_types = f5_profile_attributes['Profile_supported_types']
self.default_key = "defaults from"
self.supported_ssl = f5_profile_attributes['Profile_supported_ssl']
self.na_ssl = f5_profile_attributes['Profile_na_ssl']
self.indirect_ssl = f5_profile_attributes['Profile_indirect_ssl']
self.ignore_for_defaults = \
f5_profile_attributes['Profile_ignore_for_defaults']
self.na_http = f5_profile_attributes['Profile_na_http']
self.supported_http = f5_profile_attributes['Profile_supported_http']
self.indirect_http = f5_profile_attributes['Profile_indirect_http']
self.na_dns = []
self.supported_dns = f5_profile_attributes['Profile_supported_dns']
self.indirect_dns = []
self.supported_l4 = f5_profile_attributes['Profile_supported_l4']
self.indirect_l4 = f5_profile_attributes['Profile_indirect_l4']
self.supported_fh = f5_profile_attributes['Profile_supported_fh']
self.indirect_fh = f5_profile_attributes['Profile_indirect_fh']
self.supported_tcp = f5_profile_attributes['Profile_supported_tcp']
self.indirect_tcp = f5_profile_attributes['Profile_indirect_tcp']
self.supported_udp = f5_profile_attributes['Profile_supported_udp']
self.indirect_udp = []
self.supported_oc = f5_profile_attributes['Profile_supported_oc']
if keypassphrase:
self.f5_passphrase_keys = yaml.safe_load(open(keypassphrase))
else:
self.f5_passphrase_keys = None
self.object_merge_check = object_merge_check
# code to get count to merge objects
self.app_count = 0
self.net_count = 0
self.pki_count = 0
self.certkey_count = 0
# Added prefix for objects
self.prefix = prefix
def convert_profile(self, profile, key, f5_config, profile_config,
avi_config, input_dir, user_ignore, tenant_ref,
key_and_cert_mapping_list, merge_object_mapping,
sys_dict):
"""
:param profile: parsed dict of profile
:param key: key which contain combination of profile type and name
:param f5_config: parsed f5 config dict
:param profile_config: dict of profile config
:param avi_config: dict for avi config | |
import matplotlib.pyplot as plt
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
from ..configure.default_config import CINRAD_COLORMAP, CINRAD_field_bins, \
CINRAD_field_normvar, CINRAD_field_mapping
import numpy as np
from ..configure.location_config import CN_shp_info
import cartopy.feature as cfeature
from ..core.transforms import geographic_to_cartesian_aeqd, cartesian_to_geographic_aeqd, antenna_vectors_to_cartesian
from .VerticalSectionPlot import VerticalSection
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
import cartopy, matplotlib
class Graph(object):
"""Improved mapping function, cartesian coords, recommended"""
def __init__(self, NRadar):
self.Radar = NRadar
def plot_ppi(self, ax, sweep_num, field_name, cmap=None, min_max=None, cmap_bins=None, cbar=True,
orientation="vertical",cbar_ticks=None, cbar_ticklabels=None, clabel=None, **kwargs):
"""
:param ax: axes.Axes object or array of Axes objects., eg: fig, ax = plt.subplots
:param sweep_num: The sweep_num volume scan to draw, from 0 start!
:param field_name: field dict to select data, eg: "dBZ" "V"
:param cmap: str or Colormap, optional, A Colormap instance or registered colormap name. to see cm.py!
:param min_max: The colorbar range(vmin, vmax). If None, suitable min/max values are automatically chosen by min max of data!
:param cmap_bins: bins of colormaps
:param cbar: if True, plot with colorbar, else not!
:param orientation: vertical or horizontal, if cbar is True , this is vaild!, colorbar oriention!
:param cbar_ticks: Set the locations of the tick marks from sequence ticks
:param cbar_ticklabels: Set the text values of the tick labels.
:param kwargs: other arguments for pcolormesh!
:return:
"""
assert isinstance(ax, matplotlib.axes._axes.Axes), "axes should be matplotlib axes not cartopy axes!"
if field_name == "V":
vmax = self.Radar.scan_info.nyquist_velocity[sweep_num].values
vmin = -1 * vmax
elif min_max is not None:
vmin, vmax = min_max
elif CINRAD_field_normvar[CINRAD_field_mapping[field_name]] == -1:
vmax = np.nanmax(self.Radar.fields[sweep_num][field_name])
vmin = np.nanmin(self.Radar.fields[sweep_num][field_name])
else:
vmin, vmax = CINRAD_field_normvar[CINRAD_field_mapping[field_name]]
if cmap is None:
cmap = CINRAD_COLORMAP[CINRAD_field_mapping[field_name]]
if cmap_bins is None:
cmap_bins = CINRAD_field_bins[CINRAD_field_mapping[field_name]]
ax.set_aspect("equal")
radar_data = self.Radar.fields[sweep_num][field_name]
x, y = radar_data.x, radar_data.y
cmaps = plt.get_cmap(cmap)
levels = MaxNLocator(nbins=cmap_bins).tick_values(vmin, vmax)
norm = BoundaryNorm(levels, ncolors=cmaps.N, clip=True)
gci = ax.pcolormesh(x / 1000., y / 1000., radar_data, cmap=cmaps, \
zorder=0, norm=norm, shading='auto', **kwargs)
if cbar:
cb=plt.colorbar(mappable=gci, ax=ax, orientation=orientation)
if cbar_ticks is None:
ticks = levels
else:
ticks = cbar_ticks
cb.set_ticks(ticks)
if cbar_ticklabels is not None:
if orientation == "vertical":
cb.ax.set_yticklabels(cbar_ticklabels)
else:
cb.ax.set_xticklabels(cbar_ticklabels)
if clabel is not None:
cb.set_label(clabel)
return gci
def plot_rhi(self, ax, sweep_num, field_name, cmap=None, min_max=None,cmap_bins=None, cbar=True,
orientation="vertical",cbar_ticks=None, cbar_ticklabels=None, clabel=None, **kwargs):
assert isinstance(ax, matplotlib.axes._axes.Axes), "axes should be matplotlib axes not cartopy axes!"
if field_name == "V":
vmax = self.Radar.scan_info.nyquist_velocity[0].values
vmin = -1 * vmax
elif min_max is not None:
vmin, vmax = min_max
elif CINRAD_field_normvar[CINRAD_field_mapping[field_name]] == -1:
vmax = np.nanmax(self.Radar.fields[0][field_name])
vmin = np.nanmin(self.Radar.fields[0][field_name])
else:
vmin, vmax = CINRAD_field_normvar[CINRAD_field_mapping[field_name]]
if cmap is None:
cmap = CINRAD_COLORMAP[CINRAD_field_mapping[field_name]]
if cmap_bins is None:
cmap_bins = CINRAD_field_bins[CINRAD_field_mapping[field_name]]
mesh_xy = (self.Radar.fields[sweep_num].x ** 2 + self.Radar.fields[sweep_num].y ** 2) ** 0.5
mesh_z = self.Radar.fields[sweep_num].z
field_data = self.Radar.fields[sweep_num][field_name]
cmaps = plt.get_cmap(cmap)
levels = MaxNLocator(nbins=cmap_bins).tick_values(vmin, vmax)
norm = BoundaryNorm(levels, ncolors=cmaps.N, clip=True)
gci = ax.pcolormesh(mesh_xy/1000., mesh_z/1000., field_data, cmap=cmaps,
norm=norm, shading='auto', **kwargs)
if cbar:
cb = plt.colorbar(mappable=gci, ax=ax, orientation=orientation)
if cbar_ticks is None:
ticks = levels
else:
ticks = cbar_ticks
cb.set_ticks(ticks)
if cbar_ticklabels is not None:
if orientation == "vertical":
cb.ax.set_yticklabels(cbar_ticklabels)
else:
cb.ax.set_xticklabels(cbar_ticklabels)
if clabel is not None:
cb.set_label(clabel)
return gci
def plot_vcs(self, ax, start_xy, end_xy, field_name, cmap=None, min_max=None,cmap_bins=None, cbar=True,
orientation="vertical",cbar_ticks=None, cbar_ticklabels=None, clabel=None, **kwargs):
"""
:param ax: axes.Axes object or array of Axes objects., eg: fig, ax = plt.subplots
:param start_xy: (start_x, start_y) units:km, VCS start position!
:param end_xy: (end_x, end_y) units:km, VCS end position!
:param field_name: field dict to select data, eg: "dBZ" "V"
:param cmap: str or Colormap, optional, A Colormap instance or registered colormap name. to see cm.py!
:param min_max: The colorbar range(vmin, vmax). If None, suitable min/max values are automatically chosen by min max of data!
:param cmap_bins: bins of colormap
:param cbar: bool, if True, plot with colorbar,
:param orientation: vertical or horizontal, it is vaild when cbar is True
:param cbar_ticks: Set the locations of the tick marks from sequence ticks
:param cbar_ticklabels: Set the text values of the tick labels.
:return:
"""
assert isinstance(ax, matplotlib.axes._axes.Axes), "axes should be matplotlib axes not cartopy axes!"
if field_name == "V":
vmax = self.Radar.scan_info.nyquist_velocity[0].values
vmin = -1 * vmax
elif min_max is not None:
vmin, vmax = min_max
elif CINRAD_field_normvar[CINRAD_field_mapping[field_name]] == -1:
vmax = np.nanmax(self.Radar.fields[0][field_name])
vmin = np.nanmin(self.Radar.fields[0][field_name])
else:
vmin, vmax = CINRAD_field_normvar[CINRAD_field_mapping[field_name]]
if cmap is None:
cmap = CINRAD_COLORMAP[CINRAD_field_mapping[field_name]]
if cmap_bins is None:
cmap_bins = CINRAD_field_bins[CINRAD_field_mapping[field_name]]
start_point = (start_xy[0] * 1000., start_xy[1] * 1000) ##km to meters
end_point = (end_xy[0] * 1000., end_xy[1] * 1000) ##km to meters
mesh_xy, mesh_z, field_data = self.Radar.get_vcs_data(start_point, end_point, field_name)
cmaps = plt.get_cmap(cmap)
levels = MaxNLocator(nbins=cmap_bins).tick_values(vmin, vmax)
norm = BoundaryNorm(levels, ncolors=cmaps.N, clip=True)
for isweep, _ in enumerate(mesh_xy):
gci = ax.pcolormesh(mesh_xy[isweep] / 1000., mesh_z[isweep] / 1000., field_data[isweep], cmap=cmaps,
norm=norm, shading='auto', **kwargs)
if cbar:
cb = plt.colorbar(mappable=gci, ax=ax, orientation=orientation)
if cbar_ticks is None:
ticks = levels
else:
ticks = cbar_ticks
cb.set_ticks(ticks)
if clabel is not None:
cb.set_label(clabel)
if cbar_ticklabels is not None:
if orientation == "vertical":
cb.ax.set_yticklabels(cbar_ticklabels)
else:
cb.ax.set_xticklabels(cbar_ticklabels)
return gci
def plot_crf(self, ax, cmap=CINRAD_COLORMAP[CINRAD_field_mapping["dBZ"]],
min_max=CINRAD_field_normvar[CINRAD_field_mapping["dBZ"]], cmap_bins=CINRAD_field_bins[CINRAD_field_mapping["dBZ"]],
cbar=True, orientation="vertical",cbar_ticks=None, cbar_ticklabels=None, clabel=None, **kwargs):
"""
显示组合反射率因子
:param ax: axes.Axes object or array of Axes objects., eg: fig, ax = plt.subplots
:param XRange: np.ndarray, 1d, units:meters
:param YRange: np.ndarray, 1d, units:meters
:param cmap: str or Colormap, optional, A Colormap instance or registered colormap name. to see cm.py!
:param min_max: The colorbar range(vmin, vmax). If None, suitable min/max values are automatically chosen by min max of data!
:param cmap_bins: bins of colormaps
:param cbar: if True, plot with colorbar, else not!
:param orientation: vertical or horizontal, if cbar is True , this is vaild!, colorbar oriention!
:param kwargs: other arguments for pcolormesh!
:param cbar_ticks: Set the locations of the tick marks from sequence ticks
:param cbar_ticklabels: Set the text values of the tick labels.
:return:
"""
max_range = int(self.Radar.fields[0].range.max().values)
XRange = np.arange(-1 * max_range, max_range+1, 1000.)
YRange = XRange
self.Radar.add_product_CR_xy(XRange, YRange)
assert isinstance(ax, matplotlib.axes._axes.Axes), "axes should be matplotlib axes not cartopy axes!"
vmin, vmax = min_max
ax.set_aspect("equal")
radar_data = self.Radar.product['CR'].values
x, y = np.meshgrid(self.Radar.product['CR'].x_cr.values,
self.Radar.product['CR'].y_cr.values, indexing="ij")
cmaps = plt.get_cmap(cmap)
levels = MaxNLocator(nbins=cmap_bins).tick_values(vmin, vmax)
norm = BoundaryNorm(levels, ncolors=cmaps.N, clip=True)
gci = ax.pcolormesh(x / 1000., y / 1000., radar_data, cmap=cmaps, \
zorder=0, norm=norm, shading='auto', **kwargs)
if cbar:
cb = plt.colorbar(mappable=gci, ax=ax, orientation=orientation)
if cbar_ticks is None:
ticks = levels
else:
ticks = cbar_ticks
cb.set_ticks(ticks)
if clabel is not None:
cb.set_label(clabel)
if cbar_ticklabels is not None:
if orientation == "vertical":
cb.ax.set_yticklabels(cbar_ticklabels)
else:
cb.ax.set_xticklabels(cbar_ticklabels)
return gci
def plot_cappi(self, ax, level_height=3000, cmap=CINRAD_COLORMAP[CINRAD_field_mapping["dBZ"]],
min_max=CINRAD_field_normvar[CINRAD_field_mapping["dBZ"]], cmap_bins=CINRAD_field_bins[CINRAD_field_mapping["dBZ"]],
cbar=True, orientation="vertical", cbar_ticks=None, cbar_ticklabels=None, clabel=None, **kwargs):
"""
显示CAPPI图像
:param ax: axes.Axes object or array of Axes objects., eg: fig, ax = plt.subplots
:param level_height: height of cappi, units:meters, default, 3000m
:param cmap: str or Colormap, optional, A Colormap instance or registered colormap name. to see cm.py!
:param min_max: The colorbar range(vmin, vmax). If None, suitable min/max values are automatically chosen by min max of data!
:param cmap_bins: bins of colormaps
:param cbar: if True, plot with colorbar, else not!
:param orientation: vertical or horizontal, if cbar is True , this is vaild!, colorbar oriention!
:param cbar_ticks: Set the locations of the tick marks from sequence ticks
:param cbar_ticklabels: Set the text values of the tick labels.
:param kwargs: other arguments for pcolormesh!
:return:
"""
max_range = int(self.Radar.fields[0].range.max().values)
XRange = np.arange(-1 * max_range, max_range + 1, 1000.)
YRange = XRange
self.Radar.add_product_CAPPI_xy(XRange, YRange, level_height)
assert isinstance(ax, matplotlib.axes._axes.Axes), "axes should be matplotlib axes not cartopy axes!"
vmin, vmax = min_max
ax.set_aspect("equal")
radar_data = self.Radar.product["CAPPI_%d"%level_height].values
x, y = np.meshgrid(self.Radar.product["CAPPI_%d"%level_height]['x_cappi_%d'%level_height].values,
self.Radar.product["CAPPI_%d"%level_height]['y_cappi_%d'%level_height].values, indexing="ij")
cmaps = plt.get_cmap(cmap)
levels = MaxNLocator(nbins=cmap_bins).tick_values(vmin, vmax)
norm = BoundaryNorm(levels, ncolors=cmaps.N, clip=True)
gci = ax.pcolormesh(x / 1000., y / 1000., radar_data, cmap=cmaps, \
zorder=0, norm=norm,shading='auto', **kwargs)
if cbar:
cb = plt.colorbar(mappable=gci, ax=ax, orientation=orientation)
if cbar_ticks is None:
ticks = levels
else:
ticks = cbar_ticks
cb.set_ticks(ticks)
if clabel is not None:
cb.set_label(clabel)
if cbar_ticklabels is not None:
if orientation == "vertical":
cb.ax.set_yticklabels(cbar_ticklabels)
else:
cb.ax.set_xticklabels(cbar_ticklabels)
return gci
def add_rings(self, ax, rings, color="#5B5B5B", linestyle='-', linewidth=0.6, **kwargs):
"""
:param ax: axes.Axes object or array of Axes objects., eg: fig, ax = plt.subplots
:param rings: distance from | |
<reponame>21vcloud/Controller
# -*- coding:utf-8 -*-
from django.db import models
from rest_framework import serializers
from rest_api.models import AccessKey
class ResponseNoneMeta(models.Model):
class Meta:
managed = False
db_table = 'NoneMeta'
class FlavorListInfoResponsesSerializer(serializers.ModelSerializer):
base_price = serializers.CharField(label="基础价格", help_text="示例: 3100")
count = serializers.CharField(label="机器数量", help_text="示例: 1")
cpu_core = serializers.CharField(label="内核数", help_text="示例: 6*2")
cpu_hz = serializers.CharField(label="主频", help_text="示例: 2.4GHz")
cpu_model = serializers.CharField(label="CPU", help_text="示例: Intel Xeon E5-2620 v3*2")
disk = serializers.CharField(label="磁盘规格", help_text="示例: 2*600G SAS System Disk RAID 1+ 6*600G SAS RAID 5")
flavor_info = serializers.CharField(label="机器类型描述", help_text="示例: 2*Intel Xeon E5-2620 v3 | 128G | 8*600G SAS")
id = serializers.CharField(label="机器id", help_text="示例: 11a2c533-73cc-4f95-8e7b-0055b7ec18a7")
material_number = serializers.CharField(label="物料编码", help_text="示例:CBMS-S-B111")
name = serializers.CharField(label="物料名称", help_text="示例: 戴尔")
openstack_flavor_name = serializers.CharField(label="openstack端的名称", help_text="示例:DELL_VBS_RAID")
ram = serializers.CharField(label="内存大小", help_text="示例: 128G")
resource_class = serializers.CharField(label="资源类型", help_text="示例: dell_baremetal")
status = serializers.CharField(label="状态", help_text="示例: active")
type = serializers.CharField(label="机器类型", help_text="示例: 基础")
type_name = serializers.CharField(label="机器类型名称", help_text="示例: base")
class Meta:
model = ResponseNoneMeta
fields = ["base_price", "count", "cpu_core", "cpu_hz", "cpu_model", "disk",
"flavor_info", "id", "material_number", "name", "openstack_flavor_name", "ram", "resource_class",
"status", "type", "type_name"]
class InstImageListInfoResponsesSerializer(serializers.ModelSerializer):
id = serializers.CharField(label="镜像id", help_text="示例: 92277119-5647-4525-8670-26460b91e357")
image_name = serializers.CharField(label="镜像名称", help_text="示例: Ubuntu14.04")
openstack_image_name = serializers.CharField(label="openstack端的镜像名称",
help_text="示例: uat-ubuntu-trusty-zabbix-agent.qcow2")
status = serializers.CharField(label="镜像状态", help_text="示例: active")
type = serializers.CharField(label="镜像类型", help_text="示例: Ubuntu")
class Meta:
model = ResponseNoneMeta
fields = ["id", "image_name", "openstack_image_name", "status", "type"]
class InstQosPolicyDictInfoResponsesSerializer(serializers.ModelSerializer):
id = serializers.CharField(label="带宽id", help_text="示例: 2425feb3-f324-44f6-b161-69bcebbb6cb6")
qos_policy_count = serializers.CharField(label="带宽大小", help_text="示例: 1")
qos_policy_name = serializers.CharField(label="带宽大小名称", help_text="示例: 1M")
class Meta:
model = ResponseNoneMeta
fields = ["id", "qos_policy_count", "qos_policy_name"]
class InstQosPolicyListInfoResponsesSerializer(serializers.ModelSerializer):
end = serializers.CharField(label="带宽范围", help_text="示例: 300(代表1-300)")
qos_policy_dict = InstQosPolicyDictInfoResponsesSerializer(label="带宽字典信息")
class Meta:
model = ResponseNoneMeta
fields = ["qos_policy_dict", "end"]
class InstFeedBackInfoResponsesSerializer(serializers.ModelSerializer):
status = serializers.CharField(label="更新后的实例状态", help_text="示例: active")
task = serializers.CharField(label="更新后的任务执行结果", help_text="示例:success")
update_at = serializers.CharField(label="更新时间", help_text="2019-10-11 13:54:37.220506")
class Meta:
model = ResponseNoneMeta
fields = ["status", "task", "update_at"]
class InstMachineGetnfoResponsesSerializer(serializers.ModelSerializer):
create_at = serializers.CharField(label="创建时间", help_text="示例:2019-08-23 16:00:13 ")
deleted = serializers.CharField(label="删除时间", help_text="示例:2019-08-23 16:00:13 ")
disk_info = serializers.CharField(label="磁盘信息", help_text="示例: []")
firewall_id = serializers.CharField(label="防火墙id", help_text="示例: null")
firewall_name = serializers.CharField(label="防火墙名称", help_text="示例: null")
flavor_id = serializers.CharField(label="flavor id", help_text="示例: 11a2c533-73cc-4f95-8e7b-0055b7ec18a7")
flavor_name = serializers.CharField(label="flavor 名称", help_text="示例: 戴尔")
floating_ip_allocation = serializers.CharField(label="弹性公网IP可用量", help_text="示例:null ")
floating_ip_bandwidth = serializers.CharField(label="弹性公网IP带宽", help_text="示例:null ")
floating_ip_info = serializers.CharField(label="弹性公网IP信息", help_text="示例: []")
floating_ip_line = serializers.CharField(label="弹性公网IP类型", help_text="示例: null")
id = serializers.CharField(label="machine id", help_text="示例:5d5f9d0c419049990ddd19d2 ")
image_id = serializers.CharField(label="镜像id", help_text="示例:198e0048-c8b2-4db9-9f08-395ea005af21 ")
image_name = serializers.CharField(label="镜像名称", help_text="示例:Windows2016 ")
login_method = serializers.CharField(label="登陆方式", help_text="示例:user_password ")
monitoring = serializers.CharField(label="是否携带监控", help_text="示例:False/True ")
network = serializers.CharField(label="网络名称", help_text="示例:zx_vpc2_net ")
network_id = serializers.CharField(label="网络id", help_text="示例:2a9b9e14-e9f8-4a04-819f-d9f68d4bfbe9 ")
network_path_type = serializers.CharField(label="网络类型", help_text="示例:private ")
order_id = serializers.CharField(label="订单id", help_text="示例:BMS201908231600122765752 ")
public_key = serializers.CharField(label="公钥信息", help_text="示例: null")
service_name = serializers.CharField(label="所生成服务器名称", help_text="示例:zhouxiao ")
service_password = serializers.CharField(label="所生成服务器密码", help_text="示例:<PASSWORD> ")
service_username = serializers.CharField(label="所生成服务器登录名", help_text="示例:root ")
status = serializers.CharField(label="机器状态", help_text="示例:null ")
update_at = serializers.CharField(label="更新时间", help_text="示例:2019-08-26 09:57:29 ")
uuid = serializers.CharField(label="实例所绑定的实例id", help_text="示例:c4130c54-bc4b-4249-928d-c014827653db ")
vulnerability_scanning = serializers.CharField(label="是否携带漏洞扫描", help_text="示例:False/True ")
class Meta:
model = ResponseNoneMeta
fields = ["create_at", "deleted", "disk_info", "firewall_id", "firewall_name", "flavor_id", "flavor_id",
"flavor_name", "floating_ip_allocation", "floating_ip_bandwidth", "floating_ip_info", "id",
"network_id", "image_id", "image_name", "login_method", "monitoring", "network",
"network_path_type", "order_id", "public_key", "service_name", "service_password",
"service_username", "status", "update_at", "uuid", "vulnerability_scanning",
"floating_ip_line"]
class InstMachineInfoFeedbackInfoResponsesSerializer(serializers.ModelSerializer):
create_at = serializers.CharField(label="创建时间", help_text="示例:2019-08-23 16:00:13 ")
deleted = serializers.CharField(label="删除时间", help_text="示例:2019-08-23 16:00:13 ")
disk_info = serializers.CharField(label="磁盘信息", help_text="示例: []")
firewall_id = serializers.CharField(label="防火墙id", help_text="示例: null")
firewall_name = serializers.CharField(label="防火墙名称", help_text="示例: null")
flavor_id = serializers.CharField(label="flavor id", help_text="示例: 11a2c533-73cc-4f95-8e7b-0055b7ec18a7")
flavor_name = serializers.CharField(label="flavor 名称", help_text="示例: 戴尔")
floating_ip_allocation = serializers.CharField(label="弹性公网IP可用量", help_text="示例:null ")
floating_ip_bandwidth = serializers.CharField(label="弹性公网IP带宽", help_text="示例:null ")
floating_ip_info = serializers.CharField(label="弹性公网IP信息", help_text="示例: []")
floating_ip_line = serializers.CharField(label="弹性公网IP类型", help_text="示例: null")
id = serializers.CharField(label="machine id", help_text="示例:5d5f9d0c419049990ddd19d2 ")
image_id = serializers.CharField(label="镜像id", help_text="示例:198e0048-c8b2-4db9-9f08-395ea005af21 ")
image_name = serializers.CharField(label="镜像名称", help_text="示例:Windows2016 ")
login_method = serializers.CharField(label="登陆方式", help_text="示例:user_password ")
monitoring = serializers.CharField(label="是否携带监控", help_text="示例:False/True ")
network = serializers.CharField(label="网络名称", help_text="示例:zx_vpc2_net ")
network_id = serializers.CharField(label="网络id", help_text="示例:2a9b9e14-e9f8-4a04-819f-d9f68d4bfbe9 ")
network_path_type = serializers.CharField(label="网络类型", help_text="示例:private ")
order_id = serializers.CharField(label="订单id", help_text="示例:BMS201908231600122765752 ")
public_key = serializers.CharField(label="公钥信息", help_text="示例: null")
service_name = serializers.CharField(label="所生成服务器名称", help_text="示例:zhouxiao ")
service_password = serializers.CharField(label="所生成服务器密码", help_text="示例:<PASSWORD> ")
service_username = serializers.CharField(label="所生成服务器登录名", help_text="示例:root ")
status = serializers.CharField(label="机器状态", help_text="示例:null ")
update_at = serializers.CharField(label="更新时间", help_text="示例:2019-08-26 09:57:29 ")
uuid = serializers.CharField(label="实例所绑定的实例id", help_text="示例:c4130c54-bc4b-4249-928d-c014827653db ")
vulnerability_scanning = serializers.CharField(label="是否携带漏洞扫描", help_text="示例:False/True ")
class Meta:
model = ResponseNoneMeta
fields = ["create_at", "deleted", "disk_info", "firewall_id", "firewall_name", "flavor_id", "flavor_id",
"flavor_name", "floating_ip_allocation", "floating_ip_bandwidth", "floating_ip_info", "id",
"network_id", "image_id", "image_name", "login_method", "monitoring", "network",
"network_path_type", "order_id", "public_key", "service_name", "service_password",
"service_username", "status", "update_at", "uuid", "vulnerability_scanning",
"floating_ip_line"]
class InstAllOrderInfoResponsesSerializer(serializers.ModelSerializer):
account_id = serializers.CharField(label="用户id", help_text="示例:589d0d6781314a8b8c28cf3982ce344c")
billing_model = serializers.CharField(label="合同类型", help_text="示例:框架合同")
contract_number = serializers.CharField(label="合同编码", help_text="示例:wer234234324")
create_at = serializers.CharField(label="合同创建时间", help_text="示例: 2019-10-10 17:06:08")
deleted = serializers.CharField(label="合同删除时间", help_text="示例: null")
delivery_status = serializers.CharField(label="订单交付状态", help_text="示例: rejected/delivered")
id = serializers.CharField(label="订单id", help_text="示例: BMS201910101706051447155")
order_price = serializers.CharField(label="订单价格", help_text="示例:1")
order_type = serializers.CharField(label="订单类型", help_text="示例:新购 ")
product_info = serializers.CharField(label="订单所购买产品信息", help_text="示例: []")
product_type = serializers.CharField(label="产品类型", help_text="示例: 云硬盘")
project_id = serializers.CharField(label="项目 id", help_text="示例:55e40f6dbb6549a19e235e1a7f2d88cf ")
project_name = serializers.CharField(label="项目名称", help_text="示例:wei_forbidden22")
region = serializers.CharField(label="可用区", help_text="示例:regionOne ")
service_count = serializers.CharField(label="订单所购买服务个数", help_text="示例:1 ")
update_at = serializers.CharField(label="更新时间", help_text="示例:null")
class Meta:
model = ResponseNoneMeta
fields = ["account_id", "billing_model", "contract_number", "create_at", "deleted", "delivery_status",
"id", "order_price", "order_type", "product_info", "product_type", "project_id",
"project_name", "region", "service_count", "update_at"]
class InstOrderDeriveryUpdateInfoResponsesSerializer(serializers.ModelSerializer):
account_id = serializers.CharField(label="用户id", help_text="示例:589d0d6781314a8b8c28cf3982ce344c")
billing_model = serializers.CharField(label="合同类型", help_text="示例:框架合同")
contract_number = serializers.CharField(label="合同编码", help_text="示例:wer234234324")
create_at = serializers.CharField(label="合同创建时间", help_text="示例: 2019-10-10 17:06:08")
deleted = serializers.CharField(label="合同删除时间", help_text="示例: null")
delivery_status = serializers.CharField(label="订单交付状态", help_text="示例: rejected/delivered")
id = serializers.CharField(label="订单id", help_text="示例: BMS201910101706051447155")
order_price = serializers.CharField(label="订单价格", help_text="示例:1")
order_type = serializers.CharField(label="订单类型", help_text="示例:新购 ")
product_info = serializers.CharField(label="订单所购买产品信息", help_text="示例: []")
product_type = serializers.CharField(label="产品类型", help_text="示例: 云硬盘")
project_id = serializers.CharField(label="项目 id", help_text="示例:55e40f6dbb6549a19e235e1a7f2d88cf")
region = serializers.CharField(label="可用区", help_text="示例:regionOne ")
service_count = serializers.CharField(label="订单所购买服务个数", help_text="示例:1 ")
update_at = serializers.CharField(label="更新时间", help_text="示例:null")
class Meta:
model = ResponseNoneMeta
fields = ["account_id", "billing_model", "contract_number", "create_at", "deleted", "delivery_status",
"id", "order_price", "order_type", "product_info", "product_type", "project_id",
"region", "service_count", "update_at"]
class InstOrderByAccInfoResponsesSerializer(serializers.ModelSerializer):
account_id = serializers.CharField(label="用户id", help_text="示例:589d0d6781314a8b8c28cf3982ce344c")
billing_model = serializers.CharField(label="合同类型", help_text="示例:框架合同")
contract_number = serializers.CharField(label="合同编码", help_text="示例:wer234234324")
create_at = serializers.CharField(label="合同创建时间", help_text="示例: 2019-10-10 17:06:08")
deleted = serializers.CharField(label="合同删除时间", help_text="示例: null")
delivery_status = serializers.CharField(label="订单交付状态", help_text="示例: rejected/delivered")
id = serializers.CharField(label="订单id", help_text="示例: BMS201910101706051447155")
order_price = serializers.CharField(label="订单价格", help_text="示例:1")
order_type = serializers.CharField(label="订单类型", help_text="示例:新购 ")
product_info = serializers.CharField(label="订单所购买产品信息", help_text="示例: []")
product_type = serializers.CharField(label="产品类型", help_text="示例: 云硬盘")
project_id = serializers.CharField(label="项目 id", help_text="示例:55e40f6dbb6549a19e235e1a7f2d88cf")
region = serializers.CharField(label="可用区", help_text="示例:regionOne ")
service_count = serializers.CharField(label="订单所购买服务个数", help_text="示例:1 ")
update_at = serializers.CharField(label="更新时间", help_text="示例:null")
class Meta:
model = ResponseNoneMeta
fields = ["account_id", "billing_model", "contract_number", "create_at", "deleted", "delivery_status",
"id", "order_price", "order_type", "product_info", "product_type", "project_id",
"region", "service_count", "update_at"]
class InstUpdateFlavorCountInfoResponsesSerializer(serializers.ModelSerializer):
DELL_VBS_RAID = serializers.CharField(label="基础型")
DELL_VGS_RAID = serializers.CharField(label="计算型")
DELL_VSS_RAID = serializers.CharField(label="存储型")
class Meta:
model = ResponseNoneMeta
fields = ["DELL_VBS_RAID", "DELL_VGS_RAID", "DELL_VSS_RAID"]
class FlavorListResponsesSerializer(serializers.ModelSerializer):
content = FlavorListInfoResponsesSerializer(label="查询到的机器信息")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息", help_text="示例:Erro info")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
model = ResponseNoneMeta
fields = ["content", "is_ok", "message", "no"]
class InstImageListResponsesSerializer(serializers.ModelSerializer):
content = InstImageListInfoResponsesSerializer(label="查询到的机器信息")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息", help_text="示例:Erro info")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
model = ResponseNoneMeta
fields = ["content", "is_ok", "message", "no"]
class InstQosPolicyListResponsesSerializer(serializers.ModelSerializer):
content = InstQosPolicyListInfoResponsesSerializer(label="获取到的带宽规则列表信息")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息", help_text="示例:Erro info")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
model = ResponseNoneMeta
fields = ["content", "is_ok", "message", "no"]
class InstFeedBackResponsesSerializer(serializers.ModelSerializer):
content = InstFeedBackInfoResponsesSerializer(label="更新实例后的返回信息")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息", help_text="示例:Erro info")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
model = ResponseNoneMeta
fields = ["content", "is_ok", "message", "no"]
class InstMachineGetResponsesSerializer(serializers.ModelSerializer):
content = InstMachineGetnfoResponsesSerializer(label="查询到的machine信息")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息", help_text="示例:Erro info")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
model = ResponseNoneMeta
fields = ["content", "is_ok", "message", "no"]
class InstMachineInfoFeedbackResponsesSerializer(serializers.ModelSerializer):
content = InstMachineInfoFeedbackInfoResponsesSerializer(label="更新订单服务器信息")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息", help_text="示例:Erro info")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
model = ResponseNoneMeta
fields = ["content", "is_ok", "message", "no"]
class InstAllOrderResponsesSerializer(serializers.ModelSerializer):
content = InstAllOrderInfoResponsesSerializer(label="查询到的所有订单信息")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息", help_text="示例:Erro info")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
model = ResponseNoneMeta
fields = ["content", "is_ok", "message", "no"]
class InstOrderDeliveryUpdateResponsesSerializer(serializers.ModelSerializer):
content = InstOrderDeriveryUpdateInfoResponsesSerializer(label="查询到的所有订单信息")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息", help_text="示例:该订单 BMS201908231116166874034 无对应服务器交付信息")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
model = ResponseNoneMeta
fields = ["content", "is_ok", "message", "no"]
class OrderByAccoInfoResponsesSerializer(serializers.ModelSerializer):
content = InstOrderByAccInfoResponsesSerializer(label="查询到的订单信息")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息", help_text="示例:该订单 BMS201908231116166874034 无对应服务器交付信息")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
model = ResponseNoneMeta
fields = ["content", "is_ok", "message", "no"]
class InstUpdateFlavorCountResponsesSerializer(serializers.ModelSerializer):
content = InstUpdateFlavorCountInfoResponsesSerializer(label="更新后的Flavor个数信息")
is_ok = serializers.BooleanField(label="成功标识", help_text="示例:{成功:True、失败:False}")
message = serializers.CharField(label="错误信息", help_text="示例:该订单 BMS201908231116166874034 无对应服务器交付信息")
no = serializers.IntegerField(label="返回码", help_text="示例:200、400、500、505")
class Meta:
model = ResponseNoneMeta
fields = ["content", "is_ok", "message", "no"]
class FipFirewallInfoResponsesSerializer(serializers.ModelSerializer):
create_at = serializers.CharField(label="防火墙创建时间", help_text="示例:")
deleted_at = serializers.CharField(label="防火墙删除时间", help_text="示例:")
description = serializers.CharField(label="描述信息")
enabled = serializers.CharField(label="是否启动防火墙", help_text="示例:1/0")
id = serializers.CharField(label="防火墙id", help_text="示例:5d5f59d7af7b14da70048a6f")
name = serializers.CharField(label="防火墙名称", help_text="示例:默认防火墙")
project_id = serializers.CharField(label="项目id", help_text="592a7b130b0c4b48ba3a1f9da70fc86e")
region = serializers.CharField(label="可用区", help_text="示例:regionOne")
update_at = serializers.CharField(label="更新时间", help_text="示例:2019-09-24 14:39:14")
class Meta:
model = ResponseNoneMeta
fields = ["create_at", "deleted_at", "description", "enabled", "id", "name", "project_id",
"region", "update_at"]
class FipFloatingIpAssociateFirewallInfoResponsesSerializer(serializers.ModelSerializer):
firewall = FipFirewallInfoResponsesSerializer(label="弹性公网IP成功关联后的防火墙返回信息")
floating_ip_id = serializers.CharField(label="弹性公网IP id",
help_text="示例:1458e4d9-c0f0-46ae-96f6-3eac0cd34d33")
class Meta:
model = ResponseNoneMeta
fields = ["firewall", "floating_ip_id"]
class FipFloatingIpAssociateFirewallResponsesSerializer(serializers.ModelSerializer):
| |
<reponame>treymer/openshift-tools
#!/usr/bin/env python
""" Create application check for v3 """
# We just want to see any exception that happens
# don't want the script to die under any cicumstances
# script must try to clean itself up
# pylint: disable=broad-except
# main() function has a lot of setup and error handling
# pylint: disable=too-many-statements
# main() function raises a captured exception if there is one
# pylint: disable=raising-bad-type
# Adding the ignore because it does not like the naming of the script
# to be different than the class name
# pylint: disable=invalid-name
# test() and main() have a lot of branches
# pylint: disable=too-many-branches
import argparse
import datetime
import json
import logging
import random
import string
import time
import urllib2
# Our jenkins server does not include these rpms.
# In the future we might move this to a container where these
# libs might exist
#pylint: disable=import-error
from openshift_tools.monitoring.ocutil import OCUtil
from openshift_tools.monitoring.metric_sender import MetricSender
logging.basicConfig(
format='%(asctime)s - %(relativeCreated)6d - %(levelname)-8s - %(message)s',
)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
ocutil = OCUtil()
commandDelay = 5 # seconds
# use parsed arg instead
#testLoopCountMax = 180 # * commandDelay = 15min
testCurlCountMax = 18 # * commandDelay = 1min30s
testNoPodCountMax = 18 # * commandDelay = 1min30s
def runOCcmd(cmd, base_cmd='oc'):
""" log commands through ocutil """
logger.info(base_cmd + " " + cmd)
oc_time = time.time()
oc_result = ocutil.run_user_cmd(cmd, base_cmd=base_cmd, )
logger.info("oc command took %s seconds", str(time.time() - oc_time))
return oc_result
def runOCcmd_yaml(cmd, base_cmd='oc'):
""" log commands through ocutil """
logger.info(base_cmd + " " + cmd)
ocy_time = time.time()
ocy_result = ocutil.run_user_cmd_yaml(cmd, base_cmd=base_cmd, )
logger.info("oc command took %s seconds", str(time.time() - ocy_time))
return ocy_result
def parse_args():
""" parse the args from the cli """
logger.debug("parse_args()")
parser = argparse.ArgumentParser(description='OpenShift app create end-to-end test')
parser.add_argument('-v', '--verbose', action='store_true', default=None, help='Verbose?')
parser.add_argument('--source', default="quay.io/openshift-sre/hello-openshift:v1.0.6",
help='source application to use')
parser.add_argument('--basename', default="test", help='base name, added to via openshift')
parser.add_argument('--loopcount', default="36",
help="how many 5 second loops before giving up on app creation")
parser.add_argument('--cpulimit', help='override default CPU limits in the app namespace')
parser.add_argument('--memlimit', help='override default Memory limits in the app namespace')
return parser.parse_args()
def send_metrics(build_ran, create_app, route_http_failed, service_http_failed, run_time):
""" send data to MetricSender"""
ms_time = time.time()
ms = MetricSender()
ms.add_metric({'openshift.master.app.create.route_http_failed': route_http_failed})
ms.add_metric({'openshift.master.app.create.service_http_failed': service_http_failed})
if build_ran == 1:
ms.add_metric({'openshift.master.app.build.create': create_app})
ms.add_metric({'openshift.master.app.build.create.time': run_time})
else:
ms.add_metric({'openshift.master.app.create': create_app})
ms.add_metric({'openshift.master.app.create.time': run_time})
logger.debug("Metrics being sent to zabbix:")
logger.debug(ms.print_unique_metrics())
ms.send_metrics()
logger.info("Data sent to Zagg in %s seconds", str(time.time() - ms_time))
def writeTmpFile(data, filename=None, outdir="/tmp"):
""" write string to file """
filename = ''.join([
outdir, '/',
filename,
])
with open(filename, 'w') as f:
f.write(data)
logger.info("wrote file: %s", filename)
def curl(ip_addr, port, timeout=30):
""" Open an http connection to the url and read """
url = 'http://%s:%s' % (ip_addr, port)
logger.debug("curl(%s timeout=%ss)", url, timeout)
try:
return urllib2.urlopen(url, timeout=timeout).getcode()
except urllib2.HTTPError as e:
return e.fp.getcode()
except Exception as e:
logger.exception("Curl failed to connect to host")
return 0
def get_all_limitranges(namespace):
""" get the names of all limitranges in a namespace """
lr_info = runOCcmd_yaml("get limitrange -n {}".format(namespace))
limitranges = []
try:
# If we can't find limitranges, just ignore them
limitranges = [item['metadata']['name'] for item in lr_info['items']]
except KeyError:
pass
return limitranges
def getPodStatus(pod):
""" get pod status for display """
#logger.debug("getPodStatus()")
if not pod:
return "no pod"
if not pod['status']:
return "no pod status"
return "%s %s" % (pod['metadata']['name'], pod['status']['phase'])
def getPod(name):
""" get Pod from all possible pods """
pods = ocutil.get_pods()
result = None
for pod in pods['items']:
if pod and pod['metadata']['name'] and pod['metadata']['name'].startswith(name):
# if we have a pod already, and this one is a build or deploy pod, don't worry about it
# we want podname-xyz12 to be priority
# if we dont already have a pod, then this one will do
if result:
if pod['metadata']['name'].endswith("build"):
continue
if pod['metadata']['name'].endswith("deploy"):
continue
result = pod
return result
def setup(config):
""" global setup for tests """
logger.info('setup()')
logger.debug(config)
project = None
try:
project = runOCcmd_yaml("get project {}".format(config.namespace))
logger.debug(project)
except Exception:
pass # don't want exception if project not found
# Create a new project using 'oc' instead of 'oc adm'.
# This will test the project-request template as part of project creation.
# Skip writing to kubeconfig, since we don't need to keep a record of every project created.
if not project:
try:
runOCcmd("new-project {} --skip-config-write=true".format(config.namespace),
base_cmd='oc')
time.sleep(commandDelay)
except Exception:
logger.exception('error creating new project')
# Apply limitrange defaults, if limitranges aren't being ignored
if len([x for x in (config.cpulimit, config.memlimit) if x is not None]) == 1:
logger.warning('--cpulimit and --memlimit must both be supplied '
' no limitrange change will be applied')
if config.cpulimit and config.memlimit:
logger.debug('Applying limitrange defaults cpu=%s,memory=%s',
config.cpulimit, config.memlimit)
limitranges = get_all_limitranges(config.namespace)
for limitrange in limitranges:
try:
# Create the patch in JSON form
limitpatch = {
"spec": {
"limits": [
{
"default": {
"cpu": config.cpulimit,
"memory": config.memlimit
},
"defaultRequest": {
"cpu": config.cpulimit,
"memory": config.memlimit
},
"type": "Container"
}
]
}
}
# Convert patch to string for supplying to oc
limitpatch_str = json.dumps(limitpatch)
occmd = "patch limitrange {} -n {} -p '{}'".format(limitrange, config.namespace,
limitpatch_str)
runOCcmd(occmd, base_cmd='oc')
except Exception:
logger.exception('error patching project limitrange')
# Create and build the application
runOCcmd("new-app {} --name={} -n {}".format(
config.source,
config.podname,
config.namespace,
))
# Expose the service to test route creation.
runOCcmd("expose svc {} -n {}".format(
config.podname,
config.namespace,
))
def testCurl(config):
""" run curl and return service_http_code and route_http_code, have retries """
logger.info('testCurl()')
logger.debug(config)
route_http_code = 0
service_http_code = 0
# attempt retries
for curlCount in range(testCurlCountMax):
# introduce small delay to give time for route to establish
time.sleep(commandDelay)
service = ocutil.get_service(config.podname)
if service:
logger.debug("service")
logger.debug(service)
service_http_code = curl(
service['spec']['clusterIP'],
service['spec']['ports'][0]['port']
)
logger.debug("service http code %s", service_http_code)
route = ocutil.get_route(config.podname)
if route:
logger.debug("route")
logger.debug(route)
route_http_code = curl(
route['spec']['host'],
80
)
logger.debug("route http code %s", route_http_code)
if route_http_code == 200 and service_http_code == 200:
logger.debug("route and service curl completed in %d tries", curlCount)
break
return route_http_code, service_http_code
def test(config):
""" run tests """
logger.info('test()')
logger.debug(config)
build_ran = 0
pod = None
noPodCount = 0
route_http_code = 0
service_http_code = 0
# assume these have failed, until we see a success.
route_http_failed = 1
service_http_failed = 1
for _ in range(int(config.loopcount)):
time.sleep(commandDelay)
pod = getPod(config.podname)
if not pod:
noPodCount = noPodCount + 1
if noPodCount > testNoPodCountMax:
logger.critical("cannot find pod, fail early")
break
logger.debug("cannot find pod")
continue # cannot test pod further
noPodCount = 0
if not pod['status']:
logger.error("no pod status")
continue # cannot test pod further
logger.info(getPodStatus(pod))
if pod['status']['phase']:
if pod['status']['phase'] == "Failed":
logger.error("Pod Failed")
break
if pod['status']['phase'] == "Error":
logger.error("Pod Error")
break
if pod['metadata']['name'].endswith("build"):
build_ran = 1
continue
if pod['metadata']['name'].endswith("deploy"):
continue
if pod['status']['phase'] == 'Running' \
and pod['status'].has_key('podIP') \
and not pod['metadata']['name'].endswith("build"):
route_http_code, service_http_code = testCurl(config)
# For the purpose of Zabbix alerting, it's easiest to do a
# pass/fail numeric value here. This helps when totaling
# the number of failures per hour.
if route_http_code == 200:
route_http_failed = 0
if service_http_code == 200:
service_http_failed = 0
return {
'build_ran': build_ran,
'create_app': 0, # app create succeeded
'route_http_code': route_http_code,
'service_http_code': service_http_code,
'route_http_failed': route_http_failed,
'service_http_failed': service_http_failed,
# route/service curl failures are reported independently of app-create now
'failed': False,
'pod': pod,
}
logger.info("Not running HTTP status checks because pod isn't running")
if build_ran:
logger.critical("build timed out, please check build log for last messages")
else:
logger.critical("app create timed out, please check event log for information")
return {
'build_ran': build_ran,
'create_app': 1, # app create failed
'route_http_code': route_http_code,
'service_http_code': service_http_code,
'route_http_failed': route_http_failed,
'service_http_failed': service_http_failed,
'failed': True,
'pod': pod,
}
def teardown(config):
""" clean up after testing """
logger.info('teardown()')
logger.debug(config)
time.sleep(commandDelay)
runOCcmd("delete project {}".format(
config.namespace,
))
def main():
""" setup / test / teardown with exceptions to ensure teardown """
exception = None
logger.info('################################################################################')
logger.info(' Starting App Create - %s', datetime.datetime.now().strftime("%Y-%m-%d %H:%M"))
logger.info('################################################################################')
logger.debug("main()")
args = parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
############# generate unique podname and namespace #############
args.uid = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(2))
args.timestamp = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S")
args.podname = '-'.join([args.basename, args.timestamp, args.uid]).lower()
args.namespace = '-'.join(["sre-app-check", args.timestamp, args.uid]).lower()
# This number is based on a few different facts:
# 1. The FQDN for a route must be no longer than 63 characters.
# 2. Namespaces must be no longer than 63 characters.
# 3. Pod | |
summary."""
if not self.pragma.comp_summary:
return
if len(self.comp_hist.mem_iter) == 0:
print('No computational summary to display')
return
mem = {
'min': Size.bytes2human(min (self.comp_hist.mem_iter)),
'max': Size.bytes2human(max (self.comp_hist.mem_iter)),
'mean': Size.bytes2human(statistics.mean (self.comp_hist.mem_iter)),
'median': Size.bytes2human(statistics.median (self.comp_hist.mem_iter)),
'stdev': Size.bytes2human(statistics.stdev (self.comp_hist.mem_iter)),
'sum': Size.bytes2human(sum (self.comp_hist.mem_iter))
}
t = {
'min': Time.tsdiff2human(min (self.comp_hist.t_iter)),
'max': Time.tsdiff2human(max (self.comp_hist.t_iter)),
'mean': Time.tsdiff2human(statistics.mean (self.comp_hist.t_iter)),
'median': Time.tsdiff2human(statistics.median (self.comp_hist.t_iter)),
'stdev': Time.tsdiff2human(statistics.stdev (self.comp_hist.t_iter))
}
print('Computational summary')
print(f' Iteration memory : Range: [{mem["min"]}, {mem["max"]}] Mean (SD): {mem["mean"]} ({mem["stdev"]}) Median: {mem["median"]} Sum: {mem["sum"]}')
print(f' Iteration time : Range: [{t ["min"]}, {t ["max"]}] Mean (SD): {t ["mean"]} ({t ["stdev"]}) Median: {t ["median"]}')
print(f' Simulation time : {Time.tsdiff2human(self.comp_hist.t_sim)}')
def _save(self, fpath, fn):
with fn(fpath, 'wb') as f:
pickle.dump(self, f)
def save(self, fpath):
"""Serialize simulation to a file.
Args:
fpath (str): Destination file path.
Returns:
``self``
"""
self._pickle(fpath, open)
return self
def save_bz2(self, fpath):
"""Serialize simulation to a bzip2-compressed file.
Args:
fpath (str): Destination file path.
Returns:
``self``
"""
self._pickle(fpath, bz2.BZ2File)
return self
def save_gz(self, fpath):
"""Serialize simulation to a gzip-compressed file.
Args:
fpath (str): Destination file path.
Returns:
``self``
"""
self._pickle(fpath, gzip.GzipFile)
return self
def save_state(self, mass_flow_specs=None):
"""Call the save simulation state callback function.
Args:
mass_flow_specs(MassFlowSpecs, optional): Mass flow specs.
Returns:
``self``
"""
# # if self.cb.save_state and mass_flow_specs:
# # self.cb.save_state(mass_flow_specs)
#
# if self.cb.save_state:
# self.cb.save_state(mass_flow_specs)
#
# if self.traj is None:
# return self
#
# if self.timer.i > 0: # we check timer not to save initial state of a simulation that's been run before
# self.traj.save_state(None)
# else:
# self.traj.save_state(mass_flow_specs)
if self.cb.save_state:
self.cb.save_state([{
'type' : 'state',
'host_name' : None,
'host_ip' : None,
'traj_id' : self.traj_id, # self.traj.id if self.traj else None,
'iter' : self.timer.i if self.timer.is_running else -1,
'pop_m' : self.pop.get_mass(),
'groups' : [{ 'hash': g.get_hash(), 'm': g.m, 'attr': g.attr, 'rel': g.rel } for g in self.pop.groups.values()], # self.pop.get_groups()
# 'mass_flow_specs' : mass_flow_specs if self.timer.i > 0 else None
'mass_flow_specs' : mass_flow_specs
}])
return self
def set(self):
"""Simulation element setter.
Returns:
SimulationSetter
"""
return SimulationSetter(self)
def set_cb_after_iter(self, fn):
"""Set the callback function.
See :meth:`~pram.sim.Simulation.reset_cb`.
Args:
fn (Callable): The function.
Returns:
``self``
"""
self.cb.after_iter = fn
return self
def set_cb_before_iter(self, fn):
"""Set the callback function.
See :meth:`~pram.sim.Simulation.reset_cb`.
Args:
fn (Callable): The function.
Returns:
``self``
"""
self.cb.before_iter = fn
return self
def set_cb_check_work(self, fn):
"""Set the callback function.
See :meth:`~pram.sim.Simulation.reset_cb`.
Args:
fn (Callable): The function.
Returns:
``self``
"""
self.cb.check_work = fn
return self
def set_cb_save_state(self, fn):
"""Set the callback function.
See :meth:`~pram.sim.Simulation.reset_cb`.
Args:
fn (Callable): The function.
Returns:
``self``
"""
self.cb.save_state = fn
return self
def set_cb_upd_progress(self, fn):
"""Set the callback function.
See :meth:`~pram.sim.Simulation.reset_cb`.
Args:
fn (Callable): The function.
Returns:
``self``
"""
self.cb.upd_progress = fn
return self
def set_fn_group_setup(self, fn):
"""Set the group setup function.
The group setup function is the last function that is called before the simulatin is deemed ready for
execution. An example of how that function could be used is to make a certain proportion of population
infected with a disease in a epidemiological modeling setting.
Args:
fn (Callable): The function.
Returns:
``self``
"""
self.fn.group_setup = fn
return self
def set_pragmas(self, analyze=None, autocompact=None, autoprune_groups=None, autostop=None, autostop_n=None, autostop_p=None, autostop_t=None, comp_summary=None, fractional_mass=None, live_info=None, live_info_ts=None, probe_capture_init=None, rule_analysis_for_db_gen=None):
"""Sets values of multiple pragmas.
See :meth:`~pram.sim.Simulation.get_pragma`.
Args:
Names of all pragmas; values set only when not None (which is also the default value).
Returns:
``self``
"""
if analyze is not None: self.set_pragma_analyze(analyze),
if autocompact is not None: self.set_pragma_autocompact(autocompact),
if autoprune_groups is not None: self.set_pragma_autoprune_groups(autoprune_groups),
if autostop is not None: self.set_pragma_autostop(autostop),
if autostop_n is not None: self.set_pragma_autostop_n(autostop_n),
if autostop_p is not None: self.set_pragma_autostop_p(autostop_p),
if autostop_t is not None: self.set_pragma_autostop_t(autostop_t),
if comp_summary is not None: self.set_pragma_comp_summary(comp_summary),
if fractional_mass is not None: self.set_pragma_fractional_mass(fractional_mass),
if live_info is not None: self.set_pragma_live_info(live_info),
if live_info_ts is not None: self.set_pragma_live_info_ts(live_info_ts),
if probe_capture_init is not None: self.set_pragma_probe_capture_init(probe_capture_init),
if rule_analysis_for_db_gen is not None: self.set_pragma_rule_analysis_for_db_gen(rule_analysis_for_db_gen)
return self
def set_pragma(self, name, value):
"""Set value of the designated pragma.
See :meth:`~pram.sim.Simulation.get_pragma`.
Args:
name(str): Pragma.
value(Any): Value.
Returns:
``self``
"""
fn = {
'analyze' : self.set_pragma_analyze,
'autocompact' : self.set_pragma_autocompact,
'autoprune_groups' : self.set_pragma_autoprune_groups,
'autostop' : self.set_pragma_autostop,
'autostop_n' : self.set_pragma_autostop_n,
'autostop_p' : self.set_pragma_autostop_p,
'autostop_t' : self.set_pragma_autostop_t,
'live_info' : self.set_pragma_live_info,
'live_info_ts' : self.set_pragma_live_info_ts,
'probe_capture_init' : self.set_pragma_probe_capture_init,
'rule_analysis_for_db_gen' : self.set_pragma_rule_analysis_for_db_gen
}.get(name, None)
if fn is None:
raise TypeError(f"Pragma '{name}' does not exist.")
fn(value)
return self
def set_pragma_analyze(self, value):
"""See :meth:`~pram.sim.Simulation.get_pragma`.
Returns:
``self``
"""
self.pragma.analyze = value
return self
def set_pragma_autocompact(self, value):
"""See :meth:`~pram.sim.Simulation.get_pragma`.
Returns:
``self``
"""
self.pragma.autocompact = value
return self
def set_pragma_autoprune_groups(self, value):
"""See :meth:`~pram.sim.Simulation.get_pragma`.
Returns:
``self``
"""
self.pragma.autoprune_groups = value
return self
def set_pragma_autostop(self, value):
"""See :meth:`~pram.sim.Simulation.get_pragma`.
Returns:
``self``
"""
self.pragma.autostop = value
return self
def set_pragma_autostop_n(self, value):
"""See :meth:`~pram.sim.Simulation.get_pragma`.
Returns:
``self``
"""
self.pragma.autostop_n = value
return self
def set_pragma_autostop_p(self, value):
"""See :meth:`~pram.sim.Simulation.get_pragma`.
Returns:
``self``
"""
self.pragma.autostop_p = value
return self
def set_pragma_autostop_t(self, value):
"""See :meth:`~pram.sim.Simulation.get_pragma`.
Returns:
``self``
"""
self.pragma.autostop_t = value
return self
def set_pragma_fractional_mass(self, value):
"""See :meth:`~pram.sim.Simulation.get_pragma`.
Returns:
``self``
"""
self.pragma.fractional_mass = value
return self
def set_pragma_comp_summary(self, value):
"""See :meth:`~pram.sim.Simulation.get_pragma`.
Returns:
``self``
"""
self.pragma.comp_summary = value
return self
def set_pragma_live_info(self, value):
"""See :meth:`~pram.sim.Simulation.get_pragma`.
Returns:
``self``
"""
self.pragma.live_info = value
return self
def set_pragma_live_info_ts(self, value):
"""See :meth:`~pram.sim.Simulation.get_pragma`.
Returns:
``self``
"""
self.pragma.live_info_ts = value
return self
def set_pragma_probe_capture_init(self, value):
"""See :meth:`~pram.sim.Simulation.get_pragma`.
Returns:
``self``
"""
self.pragma.probe_capture_init = value
return self
def set_pragma_rule_analysis_for_db_gen(self, value):
"""See :meth:`~pram.sim.Simulation.get_pragma`.
Returns:
``self``
"""
self.pragma.rule_analysis_for_db_gen = value
return self
def set_rand_seed(self, rand_seed=None):
"""Set pseudo-random generator seed.
Both the ``random`` and ``numpy`` generators' seeds are set.
Args:
rand_seed (int, optional): The seed.
Returns:
``self``
"""
self.rand_seed = rand_seed
if self.rand_seed is not None:
random.seed(self.rand_seed)
np.random.seed(self.rand_seed)
return self
def set_var(self, name, val):
"""Set value of a simulation variable.
Args:
name (str): The variable.
val (any): The value.
Returns:
``self``
"""
self.vars[name] = val
return self
def set_vars(self, vars):
"""Set values of multiple simulation variables.
Args:
vars (Mapping[str, Any]): A dictionary of variable name-value pairs.
Returns:
``self``
"""
for k,v in vars.items():
self.vars[k] = v
return self
def show_rule_analysis(self):
"""Display the results of both static and dynamic rule analyses.
See :class:`~pram.sim.StaticRuleAnalyzer` and :class:`~pram.sim.DynamicRuleAnalyzer`.
Returns:
``self``
"""
self.show_static_rule_analysis()
self.show_dynamic_rule_analysis()
return self
def show_rule_analysis_dynamic(self):
"""Display the results of dynamic rule analyses.
See :class:`~pram.sim.DynamicRuleAnalyzer`.
Returns:
``self``
"""
rd = self.analysis.rule_dynamic
print( 'Most recent simulation run')
print( ' Used')
print(f' Attributes : {list(rd.attr_used)}')
print(f' Relations : {list(rd.rel_used)}')
print( ' Groups')
print(f' Attributes : {list(rd.attr_groups)}')
print(f' Relations : {list(rd.rel_groups)}')
print( ' Superfluous')
print(f' Attributes : {list(rd.attr_unused)}')
print(f' Relations : {list(rd.rel_unused)}')
return self
def show_rule_analysis_static(self):
"""Display the results of static rule analyses.
See :class:`~pram.sim.StaticRuleAnalyzer`.
Returns:
``self``
"""
ra = self.analysis.rule_static
print( 'Rule analyzer')
print( ' Used')
print(f' Attributes : {list(ra.attr_used)}')
print(f' Relations : {list(ra.rel_used)}')
print( ' Superfluous')
print(f' Attributes : {list(ra.attr_unused)}')
print(f' Relations : {list(ra.rel_unused)}')
# print( ' Counts')
# print(f' Recognized : get_attr:{ra.cnt_rec["get_attr"]} get_rel:{ra.cnt_rec["get_rel"]} has_attr:{ra.cnt_rec["has_attr"]} has_rel:{ra.cnt_rec["has_rel"]}')
# print(f' Unrecognized : get_attr:{ra.cnt_unrec["get_attr"]} get_rel:{ra.cnt_unrec["get_rel"]} has_attr:{ra.cnt_unrec["has_attr"]} has_rel:{ra.cnt_unrec["has_rel"]}')
return self
def show_summary(self, do_header=True, n_groups=8, n_sites=8, n_rules=8, n_probes=8, end_line_cnt=(0,0)):
"""Display simulation summary.
Args:
do_header (bool): Display header?
n_groups (int): Maximum number of groups to be displayed.
n_sites (int): Maximum number of groups to be displayed.
n_rules (int): Maximum number of groups to be displayed.
n_probes (int): Maximum number of groups to be displayed.
end_line_cnt (tuple[int,int]): The number of endline characters before and after the summary.
Returns:
``self``
"""
print('\n' * end_line_cnt[0], end='')
if do_header:
print( 'Simulation')
print(f' Random seed: {self.rand_seed}')
print( ' Timing')
print(f' Timer: {self.timer}')
print( ' Population')
print(f' Mass : {"{:,.2f}".format(round(self.pop.get_mass(), 1))}')
print(f' Groups : {"{:,}".format(self.pop.get_group_cnt())}')
print(f' Groups (ne) : {"{:,}".format(self.pop.get_group_cnt(True))}')
print(f' Sites : {"{:,}".format(self.pop.get_site_cnt())}')
print(f' Rules : {"{:,}".format(len(self.rules))}')
print(f' Probes : {"{:,}".format(len(self.probes))}')
if self.pragma.analyze:
print( ' Static rule analysis')
print( ' Used')
print(f' Attributes : {list(self.analysis.rule_static.attr_used)}')
print(f' Relations : {list(self.analysis.rule_static.rel_used)}')
print( ' Superfluous')
print(f' Attributes : {list(self.analysis.rule_static.attr_unused)}')
print(f' Relations : {list(self.analysis.rule_static.rel_unused)}')
print( ' Dynamic rule analysis')
print( ' Used')
print(f' Attributes : {list(self.analysis.rule_dynamic.attr_used)}')
print(f' Relations : {list(self.analysis.rule_dynamic.rel_used)}')
# print(' Dynamic - Groups')
# print(f' Attributes : {list(self.analysis.rule_dynamic.attr_groups)}')
# print(f' Relations : {list(self.analysis.rule_dynamic.rel_groups)}')
print( ' Superfluous')
print(f' Attributes : {list(self.analysis.rule_dynamic.attr_unused)}')
print(f' Relations : {list(self.analysis.rule_dynamic.rel_unused)}')
| |
#!/usr/bin/env python
######################################################
# GUI to vizualize ROMS input/output files
# Sep 2021
# <EMAIL>
######################################################
import os
import wx
import datetime as dt
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg as Navbar
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
import scipy.io as sp
import netCDF4 as nc
from lib import *
# TO-DO LIST: ====================================================
# - correct bug with date selection: somehow the times re-start
# every 00z
# - need to decide which x-axis to use, lon or lat
# ================================================================
# NICE TIP TO DEBUG THIS PROGRAM: ================================
# - comment out app.MainLoop at the last line of this script
# - ipython --gui=wx
# - run pyromsgui.py
# - trigger the events and check out the objects in the shell
# ================================================================
global currentDirectory
currentDirectory = os.getcwd()
PROJECT_DIR = os.path.abspath(os.path.dirname(__file__))
DEFAULT_VMIN = 0
DEFAULT_VMAX = 1.5
DEFAULT_CMAP = plt.cm.BrBG
DEFAULT_DEPTH_FOR_LAND = -50
class App(wx.App):
def OnInit(self):
self.frame = Interface("PyRomsGUI 0.1.0", size=(1024, 800))
self.frame.Show()
return True
class Interface(wx.Frame):
def __init__(self, title=wx.EmptyString, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE,
*args, **kwargs):
wx.Frame.__init__(self, None, -1, "PyRomsGUI 0.1.0", pos=pos,
size=size, style=style, *args, **kwargs)
# Initializing toolbar
self.toolbar = MainToolBar(self)
# BASIC LAYOUT OF THE NESTED SIZERS ======================
panel1 = wx.Panel(self, wx.ID_ANY, style=wx.SUNKEN_BORDER)
mplpanel = wx.Panel(self, wx.ID_ANY, style=wx.SUNKEN_BORDER)
mplpanel.SetBackgroundColour("WHITE")
# BOX 1 is the main sizer
box1 = wx.BoxSizer(wx.HORIZONTAL)
box1.Add(panel1, 1, wx.EXPAND)
box1.Add(mplpanel, 4, wx.EXPAND)
# BOX 2 is the inner sizer of the left big control panel
box2 = wx.BoxSizer(wx.VERTICAL)
# BOX 3 is the sizer of the right big parent panel(panel1), the one that will
# serve as base for two child panels which will hold
# the two matplotlib canvas's
box3 = wx.BoxSizer(wx.VERTICAL)
# panel 1 content ========================================
variable = wx.StaticText(panel1, label="Variable")
box2.Add(variable, proportion=0, flag=wx.CENTER)
self.var_select = wx.ComboBox(panel1, value='Choose variable')
box2.Add(self.var_select, proportion=0, flag=wx.CENTER)
self.var_select.Bind(wx.EVT_COMBOBOX, self.toolbar.OnUpdateHslice)
time = wx.StaticText(panel1, label="Time record")
box2.Add(time, proportion=0, flag=wx.CENTER)
self.time_select = wx.ComboBox(panel1, value='Choose time step')
box2.Add(self.time_select, proportion=0, flag=wx.CENTER)
self.time_select.Bind(wx.EVT_COMBOBOX, self.toolbar.OnUpdateHslice)
# mplpanel content ========================================
self.mplpanel = SimpleMPLCanvas(mplpanel)
box3.Add(self.mplpanel.canvas, 1, flag=wx.CENTER)
# FINAL LAYOUT CONFIGURATIONS ============================
self.SetAutoLayout(True)
panel1.SetSizer(box2)
mplpanel.SetSizer(box3)
self.SetSizer(box1)
self.InitMenu()
self.Layout()
self.Centre()
def InitMenu(self):
menubar = wx.MenuBar()
fileMenu = wx.Menu()
fileMenu.Append(wx.ID_OPEN, u'&Open ROMS grid file')
fileMenu.Append(wx.ID_OPEN, u'&Open coastline file')
fileMenu.Append(wx.ID_SAVE, '&Save grid')
fileMenu.AppendSeparator()
qmi = wx.MenuItem(fileMenu, wx.ID_EXIT, '&Quit\tCtrl+W')
opf = wx.MenuItem(fileMenu, wx.ID_OPEN, '&Open\tCtrl+O')
opc = wx.MenuItem(fileMenu, wx.ID_OPEN, '&Open\tCtrl+O+C')
svf = wx.MenuItem(fileMenu, wx.ID_SAVE, '&Save\tCtrl+S')
fileMenu.AppendItem(qmi)
# fileMenu.AppendItem(svf)
self.Bind(wx.EVT_MENU, self.OnQuit, qmi)
self.Bind(wx.EVT_MENU, self.toolbar.OnLoadFile, opf)
self.Bind(wx.EVT_MENU, self.toolbar.OnLoadCoastline, opc)
self.Bind(wx.EVT_MENU, self.toolbar.OnPlotVslice, svf)
menubar.Append(fileMenu, u'&PyRomsGUI')
self.SetMenuBar(menubar)
def OnQuit(self, e):
"""Fecha o programa"""
self.Close()
self.Destroy()
def OnCloseWindow(self, e):
self.Destroy()
class SimpleMPLCanvas(object):
"""docstring for SimpleMPLCanvas"""
def __init__(self, parent):
super(SimpleMPLCanvas, self).__init__()
self.parent = parent
self.plot_properties()
self.make_navbar()
def make_navbar(self):
self.navbar = Navbar(self.canvas)
self.navbar.SetPosition(wx.Point(0, 0)) # this is not working !!
def plot_properties(self):
# Create matplotlib figure
self.fig = Figure(facecolor='w', figsize=(12, 8))
self.canvas = FigureCanvas(self.parent, -1, self.fig)
self.ax = self.fig.add_subplot(111)
# tit = self.ax1.set_title("ROMS mask_rho", fontsize=12, fontweight='bold')
# tit.set_position([0.9, 1.05])
class MainToolBar(object):
def __init__(self, parent):
self.currentDirectory = os.getcwd()
self.parent = parent
self.toolbar = parent.CreateToolBar(style=1, id=1,
name="Toolbar")
self.tools_params = {
'load_file': (load_bitmap('grid.png'), u"Load ROMS netcdf file",
"Load ocean_???.nc ROMS netcdf file"),
'load_coastline': (load_bitmap('coast.png'), u"Load coastline",
"Load *.mat coastline file [lon / lat poligons]"),
'plot_vslice': (load_bitmap('save.png'), u"Plot vertical slice",
"Plot vertical slice of some variable"),
'settings': (load_bitmap('settings.png'), u"PyRomsGUI settings",
"PyRomsGUI configurations"),
'quit': (load_bitmap('exit.png'), u"Quit",
"Quit PyRomsGUI"),
}
self.createTool(self.toolbar, self.tools_params['load_file'],
self.OnLoadFile)
self.createTool(self.toolbar, self.tools_params['load_coastline'],
self.OnLoadCoastline)
self.toolbar.AddSeparator()
# from IPython import embed; embed()
self.plot_vslice = self.createTool(self.toolbar,
self.tools_params['plot_vslice'],
self.OnPlotVslice)
self.toolbar.AddSeparator()
self.createTool(self.toolbar, self.tools_params['settings'],
self.OnSettings)
self.createTool(self.toolbar, self.tools_params['quit'],
self.parent.OnQuit)
self.toolbar.Realize()
def createTool(self, parent, params, evt, isToggle=False):
tool = parent.AddTool(wx.NewId(), 'a', params[0], shortHelp=params[1])
self.parent.Bind(wx.EVT_TOOL, evt, id=tool.GetId())
return tool
def OnLoadFile(self, evt):
openFileDialog = wx.FileDialog(self.parent, "Open roms netcdf file [*.nc]",
"/ops/hindcast/roms/", " ",
"netcdf files (*.nc)|*.nc",
wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if openFileDialog.ShowModal() == wx.ID_CANCEL:
return # the user changed idea...
filename = openFileDialog.GetPath()
self.ncfile = nc.Dataset(filename)
# this function is intended to return relevant information on the file
varlist, axeslist, time = taste_ncfile(self.ncfile)
timelist = romsTime2string(time)
app.frame.var_select.SetItems(varlist)
app.frame.time_select.SetItems(timelist)
app.frame.time_select.SetValue(timelist[0])
# opening ROMS grid
openFileDialog = wx.FileDialog(self.parent, "Open roms GRID netcdf file [*_grd.nc]",
"/ops/hindcast/roms/", " ",
"netcdf files (*.nc)|*.nc",
wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if openFileDialog.ShowModal() == wx.ID_CANCEL:
return # the user changed idea...
grdname = openFileDialog.GetPath()
self.grd = nc.Dataset(grdname)
lon = self.grd.variables['lon_rho'][:]
lat = self.grd.variables['lat_rho'][:]
h = self.grd.variables['h'][:]
mplpanel = app.frame.mplpanel
ax = mplpanel.ax
self.pcolor = ax.pcolormesh(lon, lat, h, cmap=plt.cm.terrain_r)
ax.set_xlim([lon.min(), lon.max()])
ax.set_ylim([lat.min(), lat.max()])
ax.set_aspect('equal')
mplpanel.canvas.draw()
def OnUpdateHslice(self, evt):
# from IPython import embed; embed()
varname = app.frame.var_select.GetValue()
var = self.ncfile.variables[varname]
dimensions = var.dimensions
grid = dimensions[-1].split('_')[-1]
lon = self.grd.variables['lon_'+grid][:]
lat = self.grd.variables['lat_'+grid][:]
# time index
varlist, axeslist, time = taste_ncfile(self.ncfile)
timestr = app.frame.time_select.GetValue()
selected_time = string2romsTime(timestr, self.ncfile)
# from IPython import embed; embed()
tindex = np.where(time[:] == selected_time)[0][0]
if len(dimensions) == 3:
arr = var[tindex, ...]
if len(dimensions) == 4:
arr = var[tindex, -1, ...]
mplpanel = app.frame.mplpanel
ax = mplpanel.ax
ax.clear()
ax.pcolormesh(lon, lat, arr, cmap=plt.cm.jet)
ax.set_xlim([lon.min(), lon.max()])
ax.set_ylim([lat.min(), lat.max()])
ax.set_title("%s %s" % (varname, timestr))
ax.set_aspect('equal')
mplpanel.canvas.draw()
def OnLoadCoastline(self, evt):
openFileDialog = wx.FileDialog(self.parent, "Open coastline file - MATLAB Seagrid-like format",
"/home/rsoutelino/metocean/projects/mermaid", " ",
"MAT files (*.mat)|*.mat",
wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if openFileDialog.ShowModal() == wx.ID_CANCEL:
return # the user changed idea...
filename = openFileDialog.GetPath()
coast = sp.loadmat(filename)
lon, lat = coast['lon'], coast['lat']
mplpanel = app.frame.mplpanel
ax = mplpanel.ax
ax.plot(lon, lat, 'k')
try:
ax.set_xlim([self.grd.lonr.min(), self.grd.lonr.max()])
ax.set_ylim([self.grd.latr.min(), self.grd.latr.max()])
except AttributeError: # just in case a grid was not loaded before
ax.set_xlim([np.nanmin(lon), np.nanmax(lon)])
ax.set_ylim([np.nanmin(lat), np.nanmax(lat)])
ax.set_aspect('equal')
mplpanel.canvas.draw()
def OnPlotVslice(self, evt):
mplpanel = app.frame.mplpanel
self.cid = mplpanel.canvas.mpl_connect(
'button_press_event', self.vslice)
def OnSettings(self, evt):
pass
def vslice(self, evt):
if evt.inaxes != app.frame.mplpanel.ax:
return
mplpanel = app.frame.mplpanel
ax = mplpanel.ax
x, y = evt.xdata, evt.ydata
button = evt.button
p = ax.plot(x, y, 'wo', markeredgecolor='k')
try:
self.points.append(p)
self.area.append((x, y))
except AttributeError:
self.points = [p]
self.area = [(x, y)]
if len(self.points) == 2:
ax.plot([self.area[0][0], self.area[1][0]],
[self.area[0][1], self.area[1][1]], 'k')
p1, p2 = self.area[0], self.area[1]
mplpanel.canvas.draw()
if len(self.points) == 2:
# assigning relevant variables
varname = app.frame.var_select.GetValue()
var = self.ncfile.variables[varname]
dimensions = var.dimensions
grid = dimensions[-1].split('_')[-1]
lon = self.grd.variables['lon_'+grid][:]
lat = self.grd.variables['lat_'+grid][:]
ts = self.ncfile.variables['theta_s'][:]
tb = self.ncfile.variables['theta_b'][:]
hc = self.ncfile.variables['hc'][:]
nlev = var.shape[1]
sc = (np.arange(1, nlev + 1) - nlev - 0.5) / nlev
sigma = self.ncfile.variables['Cs_r'][:]
dl = (np.gradient(lon)[1].mean() + np.gradient(lat)[0].mean()) / 2
siz = int(np.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2) / dl)
xs = np.linspace(p1[0], p2[0], siz)
ys = np.linspace(p1[1], p2[1], siz)
# time index
varlist, axeslist, time = taste_ncfile(self.ncfile)
timestr = app.frame.time_select.GetValue()
selected_time = string2romsTime(timestr, self.ncfile)
tindex = np.where(time[:] == selected_time)[0][0]
# getting nearest values
hsec, zeta, vsec = [], [], []
for ind in range(xs.size):
line, col = near2d(lon, lat, xs[ind], ys[ind])
vsec.append(var[tindex, :, line, col])
hsec.append(self.grd.variables['h'][line, col])
zeta.append(self.ncfile.variables['zeta'][tindex, line, col])
vsec = np.array(vsec).transpose()
hsec, zeta = np.array(hsec), np.array(zeta)
xs = xs.reshape(1, xs.size).repeat(nlev, axis=0)
ys = ys.reshape(1, ys.size).repeat(nlev, axis=0)
zsec = get_zlev(hsec, sigma, 5, sc, ssh=zeta, Vtransform=2)
xs = np.ma.masked_where(vsec > 1e20, xs)
ys = np.ma.masked_where(vsec > 1e20, ys)
zsec = np.ma.masked_where(vsec > 1e20, zsec)
vsec = np.ma.masked_where(vsec > 1e20, vsec)
self.vslice_dialog = VsliceDialog(app.frame, xs, ys, zsec, vsec)
del self.points, self.area
mplpanel.canvas.draw()
class VsliceDialog(wx.Dialog):
def __init__(self, parent, xs, ys, zsec, vsec, *args, **kwargs):
wx.Dialog.__init__(self, parent, -1, "VARIABLE Vertical Slice, TIMERECORD", pos=(0, 0),
size=(1200, 600), style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
self.xs, self.ys, self.zsec, self.vsec = xs, ys, zsec, vsec
# BASIC LAYOUT OF THE NESTED SIZERS ======================
panel1 = wx.Panel(self, wx.ID_ANY, style=wx.SUNKEN_BORDER)
mplpanel = wx.Panel(self, wx.ID_ANY, style=wx.SUNKEN_BORDER)
mplpanel.SetBackgroundColour("WHITE")
# BOX 1 is the main sizer
box1 = wx.BoxSizer(wx.HORIZONTAL)
box1.Add(panel1, 1, wx.EXPAND)
box1.Add(mplpanel, 4, wx.EXPAND)
# BOX 2 is the inner sizer of the left control panel
box2 = wx.BoxSizer(wx.VERTICAL)
# BOX 3 is the sizer of the panel1
box3 = wx.BoxSizer(wx.VERTICAL)
# panel 1 content ========================================
plot_type = wx.StaticText(panel1, label="Plot type")
box2.Add(plot_type, proportion=0, flag=wx.CENTER)
self.plot_select = wx.ComboBox(panel1, value='scatter')
box2.Add(self.plot_select, proportion=0, flag=wx.CENTER)
self.plot_select.Bind(wx.EVT_COMBOBOX, self.OnUpdatePlot)
self.plot_select.SetItems(['scatter', 'pcolormesh',
'contourf', 'contour'])
minmax = wx.StaticText(panel1, label="Range")
box2.Add(minmax, proportion=0, flag=wx.CENTER)
self.max = wx.TextCtrl(panel1, value=str(vsec.max()))
self.min = wx.TextCtrl(panel1, value=str(vsec.min()))
box2.Add(self.max, proportion=0, flag=wx.CENTER)
box2.Add(self.min, proportion=0, flag=wx.CENTER)
scale = wx.StaticText(panel1, label="Scatter scale")
box2.Add(scale, proportion=0, flag=wx.CENTER)
self.scatter_scale = wx.SpinCtrl(panel1, value='50')
box2.Add(self.scatter_scale, proportion=0, flag=wx.CENTER)
# mplpanel content ========================================
self.mplpanel = SimpleMPLCanvas(mplpanel)
box3.Add(self.mplpanel.canvas, 1, flag=wx.CENTER)
ax = self.mplpanel.ax
pl = ax.scatter(xs.ravel(), zsec.ravel(), s=50, c=vsec.ravel(),
edgecolors='none', cmap=plt.cm.jet)
| |
key = 'scsi{0}.pciSlotNumber'.format(pvscsi.key -
key_offset)
slot = [cfg for cfg in vm.config.extraConfig \
if cfg.key == key]
# If the given controller exists
if slot:
return slot[0].value
else:
return None
def dev_info(unit_number, pci_slot_number):
'''Return a dictionary with Unit/Bus for the vmdk (or error)'''
return {'Unit': str(unit_number),
'ControllerPciSlotNumber': pci_slot_number}
def reset_vol_meta(vmdk_path):
'''Clears metadata for vmdk_path'''
vol_meta = kv.getAll(vmdk_path)
if not vol_meta:
vol_meta = {}
logging.debug("Reseting meta-data for disk=%s", vmdk_path)
if set(vol_meta.keys()) & {kv.STATUS, kv.ATTACHED_VM_UUID, kv.ATTACHED_VM_NAME}:
logging.debug("Old meta-data for %s was (status=%s VM name=%s uuid=%s)",
vmdk_path, vol_meta[kv.STATUS],
vol_meta[kv.ATTACHED_VM_NAME],
vol_meta[kv.ATTACHED_VM_UUID])
vol_meta[kv.STATUS] = kv.DETACHED
vol_meta[kv.ATTACHED_VM_UUID] = None
vol_meta[kv.ATTACHED_VM_NAME] = None
if not kv.setAll(vmdk_path, vol_meta):
msg = "Failed to save volume metadata for {0}.".format(vmdk_path)
logging.warning("reset_vol_meta: " + msg)
return err(msg)
def setStatusAttached(vmdk_path, vm):
'''Sets metadata for vmdk_path to (attached, attachedToVM=uuid'''
logging.debug("Set status=attached disk=%s VM name=%s uuid=%s", vmdk_path,
vm.config.name, vm.config.uuid)
vol_meta = kv.getAll(vmdk_path)
if not vol_meta:
vol_meta = {}
vol_meta[kv.STATUS] = kv.ATTACHED
vol_meta[kv.ATTACHED_VM_UUID] = vm.config.uuid
vol_meta[kv.ATTACHED_VM_NAME] = vm.config.name
if not kv.setAll(vmdk_path, vol_meta):
logging.warning("Attach: Failed to save Disk metadata for %s", vmdk_path)
def setStatusDetached(vmdk_path):
'''Sets metadata for vmdk_path to "detached"'''
logging.debug("Set status=detached disk=%s", vmdk_path)
vol_meta = kv.getAll(vmdk_path)
if not vol_meta:
vol_meta = {}
vol_meta[kv.STATUS] = kv.DETACHED
# If attachedVMName is present, so is attachedVMUuid
try:
del vol_meta[kv.ATTACHED_VM_UUID]
del vol_meta[kv.ATTACHED_VM_NAME]
except:
pass
if not kv.setAll(vmdk_path, vol_meta):
logging.warning("Detach: Failed to save Disk metadata for %s", vmdk_path)
def getStatusAttached(vmdk_path):
'''Returns (attached, uuid, attach_as) tuple. For 'detached' status uuid is None'''
vol_meta = kv.getAll(vmdk_path)
try:
attach_as = vol_meta[kv.VOL_OPTS][kv.ATTACH_AS]
except:
attach_as = kv.DEFAULT_ATTACH_AS
if not vol_meta or kv.STATUS not in vol_meta:
return False, None, attach_as
attached = (vol_meta[kv.STATUS] == kv.ATTACHED)
try:
uuid = vol_meta[kv.ATTACHED_VM_UUID]
except:
uuid = None
return attached, uuid, attach_as
def handle_stale_attach(vmdk_path, kv_uuid):
'''
Clear volume state for cases where the VM that attached the disk
earlier is powered off or removed. Detach the disk from the VM
if it's powered off.
'''
cur_vm = findVmByUuid(kv_uuid)
if cur_vm:
# Detach the disk only if VM is powered off
if cur_vm.runtime.powerState == VM_POWERED_OFF:
logging.info("Detaching disk %s from VM(powered off) - %s\n",
vmdk_path, cur_vm.config.name)
device = findDeviceByPath(vmdk_path, cur_vm)
if device:
msg = disk_detach_int(vmdk_path, cur_vm, device)
if msg:
msg += " failed to detach disk {0} from VM={1}.".format(vmdk_path,
cur_vm.config.name)
return err(msg)
else:
logging.warning("Failed to find disk %s in powered off VM - %s, resetting volume metadata\n",
vmdk_path, cur_vm.config.name)
ret = reset_vol_meta(vmdk_path)
if ret:
return ret
else:
msg = "Disk {0} already attached to VM={1}".format(vmdk_path,
cur_vm.config.name)
return err(msg)
else:
logging.warning("Failed to find VM %s that attached the disk %s, resetting volume metadata",
cur_vm.config.name, vmdk_path)
ret = reset_vol_meta(vmdk_path)
if ret:
return ret
def add_pvscsi_controller(vm, controllers, max_scsi_controllers, offset_from_bus_number):
'''
Add a new PVSCSI controller, return (controller_key, err) pair
'''
# find empty bus slot for the controller:
taken = set([c.busNumber for c in controllers])
avail = set(range(0, max_scsi_controllers)) - taken
key = avail.pop() # bus slot
controller_key = key + offset_from_bus_number
disk_slot = 0
controller_spec = vim.VirtualDeviceConfigSpec(
operation='add',
device=vim.ParaVirtualSCSIController(key=controller_key,
busNumber=key,
sharedBus='noSharing', ), )
# changes spec content goes here
pvscsi_change = []
pvscsi_change.append(controller_spec)
spec = vim.vm.ConfigSpec()
spec.deviceChange = pvscsi_change
try:
wait_for_tasks(si, [vm.ReconfigVM_Task(spec=spec)])
except vim.fault.VimFault as ex:
msg=("Failed to add PVSCSI Controller: %s", ex.msg)
return None, err(msg)
logging.debug("Added a PVSCSI controller, controller_id=%d", controller_key)
return controller_key, None
def find_disk_slot_in_controller(vm, devices, pvsci, idx, offset_from_bus_number):
'''
Find an empty disk slot in the given controller, return disk_slot if an empty slot
can be found, otherwise, return None
'''
disk_slot = None
controller_key = pvsci[idx].key
taken = set([dev.unitNumber
for dev in devices
if type(dev) == vim.VirtualDisk and dev.controllerKey ==
controller_key])
# search in 15 slots, with unit_number 7 reserved for scsi controller
avail_slots = (set(range(0, 7)) | set(range(8, PVSCSI_MAX_TARGETS))) - taken
logging.debug("idx=%d controller_key=%d avail_slots=%d", idx, controller_key, len(avail_slots))
if len(avail_slots) != 0:
disk_slot = avail_slots.pop()
pci_slot_number = get_controller_pci_slot(vm, pvsci[idx],
offset_from_bus_number)
logging.debug("Find an available slot: controller_key = %d slot = %d", controller_key, disk_slot)
else:
logging.warning("No available slot in this controller: controller_key = %d", controller_key)
return disk_slot
def find_available_disk_slot(vm, devices, pvsci, offset_from_bus_number):
'''
Iterate through all the existing PVSCSI controllers attached to a VM to find an empty
disk slot. Return disk_slot is an empty slot can be found, otherwise, return None
'''
idx = 0
disk_slot = None
while ((disk_slot is None) and (idx < len(pvsci))):
disk_slot = find_disk_slot_in_controller(vm, devices, pvsci, idx, offset_from_bus_number)
if (disk_slot is None):
idx = idx + 1;
return idx, disk_slot
def disk_attach(vmdk_path, vm):
'''
Attaches *existing* disk to a vm on a PVSCI controller
(we need PVSCSI to avoid SCSI rescans in the guest)
return error or unit:bus numbers of newly attached disk.
'''
kv_status_attached, kv_uuid, attach_mode = getStatusAttached(vmdk_path)
logging.info("Attaching {0} as {1}".format(vmdk_path, attach_mode))
# If the volume is attached then check if the attach is stale (VM is powered off).
# Otherwise, detach the disk from the VM it's attached to.
if kv_status_attached and kv_uuid != vm.config.uuid:
ret_err = handle_stale_attach(vmdk_path, kv_uuid)
if ret_err:
return ret_err
# NOTE: vSphere is very picky about unit numbers and controllers of virtual
# disks. Every controller supports 15 virtual disks, and the unit
# numbers need to be unique within the controller and range from
# 0 to 15 with 7 being reserved (for older SCSI controllers).
# It is up to the API client to add controllers as needed.
# SCSI Controller keys are in the range of 1000 to 1003 (1000 + bus_number).
offset_from_bus_number = 1000
max_scsi_controllers = 4
devices = vm.config.hardware.device
# get all scsi controllers (pvsci, lsi logic, whatever)
controllers = [d for d in devices
if isinstance(d, vim.VirtualSCSIController)]
# Check if this disk is already attached, and if it is - skip the disk
# attach and the checks on attaching a controller if needed.
device = findDeviceByPath(vmdk_path, vm)
if device:
# Disk is already attached.
logging.warning("Disk %s already attached. VM=%s",
vmdk_path, vm.config.uuid)
setStatusAttached(vmdk_path, vm)
# Get that controller to which the device is configured for
pvsci = [d for d in controllers
if type(d) == vim.ParaVirtualSCSIController and
d.key == device.controllerKey]
return dev_info(device.unitNumber,
get_controller_pci_slot(vm, pvsci[0],
offset_from_bus_number))
# Disk isn't attached, make sure we have a PVSCI and add it if we don't
# check if we already have a pvsci one
pvsci = [d for d in controllers
if type(d) == vim.ParaVirtualSCSIController]
disk_slot = None
if len(pvsci) > 0:
idx, disk_slot = find_available_disk_slot(vm, devices, pvsci, offset_from_bus_number);
if (disk_slot is not None):
controller_key = pvsci[idx].key
pci_slot_number = get_controller_pci_slot(vm, pvsci[idx],
offset_from_bus_number)
logging.debug("Find an available disk slot, controller_key=%d, slot_id=%d",
controller_key, disk_slot)
if (disk_slot is None):
disk_slot = 0 # starting on a fresh controller
if len(controllers) >= max_scsi_controllers:
msg = "Failed to place new disk - The maximum number of supported volumes has been reached."
logging.error(msg + " VM=%s", vm.config.uuid)
return err(msg)
logging.info("Adding a PVSCSI controller")
controller_key, ret_err = add_pvscsi_controller(vm, controllers, max_scsi_controllers,
offset_from_bus_number)
if (ret_err):
return ret_err
# Find the controller just added
devices = vm.config.hardware.device
pvsci = [d for d in devices
if type(d) == vim.ParaVirtualSCSIController and
d.key == controller_key]
pci_slot_number = get_controller_pci_slot(vm, pvsci[0],
offset_from_bus_number)
logging.info("Added a PVSCSI controller, controller_key=%d pci_slot_number=%s",
controller_key, pci_slot_number)
# add disk as independent, so it won't be snapshotted with the Docker VM
disk_spec = vim.VirtualDeviceConfigSpec(
operation='add',
device=
vim.VirtualDisk(backing=vim.VirtualDiskFlatVer2BackingInfo(
fileName="[] " + vmdk_path,
diskMode=attach_mode, ),
deviceInfo=vim.Description(
# TODO: use docker volume name here. Issue #292
label="dockerDataVolume",
summary="dockerDataVolume", ),
unitNumber=disk_slot,
controllerKey=controller_key, ), )
disk_changes = []
disk_changes.append(disk_spec)
spec = vim.vm.ConfigSpec()
spec.deviceChange = disk_changes
try:
wait_for_tasks(si, [vm.ReconfigVM_Task(spec=spec)])
except vim.fault.VimFault as ex:
msg = ex.msg
# Use metadata (KV) for extra logging
if kv_status_attached:
# KV claims we are attached to a different VM'.
msg += " disk {0} already attached to VM={1}".format(vmdk_path,
kv_uuid)
if kv_uuid == vm.config.uuid:
msg += "(Current VM)"
return err(msg)
setStatusAttached(vmdk_path, vm)
logging.info("Disk %s successfully attached. controller pci_slot_number=%s, disk_slot=%d",
vmdk_path, pci_slot_number, disk_slot)
return dev_info(disk_slot, pci_slot_number)
def err(string):
return {u'Error': string}
def disk_detach(vmdk_path, vm):
"""detach disk (by full path) from a vm amd return None or err(msg)"""
device = findDeviceByPath(vmdk_path, | |
# Copyright (C) 2019-2020, TomTom (http://tomtom.com).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Collecting source files from different locations.
Sources are collected as packages. These can be local directories, or are downloaded from a remote
server. Each package can contain XML files containing the API reference documentation and/or
other files that can be directly included in the documentation.
"""
import aiohttp
import asyncio
import csv
import io
import logging
import os
import tarfile
import toml
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Sequence, Type, TypeVar, Union
from tqdm import tqdm
logger = logging.getLogger(__name__)
class CollectError(Exception):
"""Base class for errors while collecting packages.
Attributes:
name: Name of the package for which an error was encountered.
message: Details of the error.
"""
name: str
message: str
def __init__(self, name: str, message: str):
self.name = name
self.message = message
class DownloadError(CollectError):
"""Raised when downloading a package failed."""
def __str__(self) -> str:
return f"Failed to download package: {self.name}:\n{self.message}"
class InvalidPackageError(CollectError):
"""Raised when package contents are not valid."""
def __str__(self) -> str:
return f"Invalid package: {self.name}:\n{self.message}"
class SpecificationError(Exception):
"""Raised when the specification in the configuration files is not valid.
Attributes:
message: Details of the error.
"""
message: str
def __init__(self, message: str):
self.message = message
def __str__(self) -> str:
return f"Invalid specification: {self.message}"
class Package:
"""A package that is ready to be used by AsciiDoxy.
Attributes:
name: Name of the package.
xml_dirs: List of directories containing XML descriptions of the API.
include_dirs: List of directories containing files for inclusion in the documentation.
"""
name: str
xml_dirs: List[Path]
include_dirs: List[Path]
def __init__(self, name: str):
self.name = name
self.xml_dirs = []
self.include_dirs = []
PackageSpecT = TypeVar("PackageSpecT", bound="PackageSpec")
class PackageSpec(ABC):
"""Base class for package specifications.
Attributes:
name: Name of the package.
xml_subdir: Subdirectory in the package containing XML descriptions of the API.
include_subdir: Subdirectory in the package containing files for inclusion.
"""
name: str
xml_subdir: Optional[str] = None
include_subdir: Optional[str] = None
def __init__(self, name: str, **kwargs):
self.name = name
@abstractmethod
async def collect(self, download_dir: Path, session: aiohttp.ClientSession) -> Package:
"""Collect the package.
Args:
download_dir: Directory to store downloaded packages.
session: HTTP session to use for content that needs to be downloaded.
Returns:
Information about the package.
"""
pass
@classmethod
def from_toml(cls: Type[PackageSpecT], name: str, raw_spec: Mapping[str, Any],
**init_args) -> PackageSpecT:
"""Create a package specification from TOML file contents.
Args:
name: Name of the specification entry in TOML.
raw_spec: Specification entry from TOML.
init_args: Internal.
Returns:
A package specification based on the TOML contents.
Raises:
SpecificationError: The specification in TOML is invalid.
"""
get = cls._make_getter(name, raw_spec)
spec = cls(name, **init_args)
spec.xml_subdir = get("xml_subdir")
spec.include_subdir = get("include_subdir")
return spec
@staticmethod
def _make_getter(name, raw_spec):
def get(attr_name):
value = raw_spec.get(attr_name, None)
if value is None:
raise SpecificationError(f"Package {name}, or its source, has no `{attr_name}`.")
return value
return get
def _make_package(self, package_dir: Path) -> Package:
pkg = Package(self.name)
if self.xml_subdir:
xml_dir = package_dir / self.xml_subdir
if xml_dir.is_dir():
logger.debug(f"{self.name} has XML subdirectory")
pkg.xml_dirs.append(xml_dir)
if self.include_subdir:
include_dir = package_dir / self.include_subdir
if include_dir.is_dir():
logger.debug(f"{self.name} has include subdirectory")
pkg.include_dirs.append(include_dir)
if not pkg.xml_dirs and not pkg.include_dirs:
raise InvalidPackageError(self.name, "Package does not contain XML or include files.")
return pkg
class LocalPackageSpec(PackageSpec):
"""Specification of a package using a local directory.
Attributes:
package_dir: Directory containing the package.
"""
package_dir: Path
def __init__(self, name: str, package_dir: Union[os.PathLike, str]):
super().__init__(name)
self.package_dir = Path(package_dir)
async def collect(self, download_dir: Path, session: aiohttp.ClientSession) -> Package:
"""See PackageSpec.collect"""
return self._make_package(self.package_dir)
@classmethod
def from_toml(cls, name: str, raw_spec: Mapping[str, Any], **init_args) -> "LocalPackageSpec":
"""See PackageSpec.from_toml"""
get = cls._make_getter(name, raw_spec)
return super().from_toml(name, raw_spec, package_dir=Path(get("package_dir")), **init_args)
class HttpPackageSpec(PackageSpec):
"""Specification of a package downloaded from a remote server.
Expects to download (compressed) tar files. All tar files will be extracted to the same output
directory.
The `url_template` is used to create download URLs for all files from a generic template. You
can use the following placeholders in the template:
* `{name}`: Replaced with the name of the package.
* `{version}`: Replaced with the version of the package.
* `{file_name}`: Replaced with the file name.
Attributes:
version: Version number of the package.
url_template: Template for creating the URL to download the tar files from.
file_names: List of files to download.
"""
version: str
url_template: str
file_names: List[str]
def __init__(self, name: str, version: str, url_template: str):
super().__init__(name)
self.version = version
self.url_template = url_template
self.file_names = []
async def collect(self, download_dir: Path, session: aiohttp.ClientSession) -> Package:
"""See PackageSpec.collect"""
package_dir = download_dir / self.name / self.version
if not package_dir.is_dir():
await self._download_files(package_dir, session)
else:
logger.debug(f"Using cached version of {self.name}:{self.version}")
return self._make_package(package_dir)
async def _download_files(self, package_dir: Path, session: aiohttp.ClientSession):
package_dir.mkdir(parents=True, exist_ok=True)
jobs = []
for file_name in self.file_names:
url = self.url_template.format(name=self.name,
version=self.version,
file_name=file_name)
jobs.append(self._download(session, url, package_dir))
await asyncio.gather(*jobs)
async def _download(self, session: aiohttp.ClientSession, url: str, target_dir: Path):
try:
async with session.get(url) as response:
data = await response.read()
target_dir.mkdir(parents=True, exist_ok=True)
tar_file = tarfile.open(fileobj=io.BytesIO(data))
tar_file.extractall(target_dir)
except tarfile.ReadError as tar_error:
raise DownloadError(self.name, f"Cannot read tar file from {url}.") from tar_error
except aiohttp.client_exceptions.ClientResponseError as http_error:
raise DownloadError(self.name, f"Failed to download: {http_error}.") from http_error
@classmethod
def from_toml(cls, name: str, raw_spec: Mapping[str, Any], **init_args) -> "HttpPackageSpec":
"""See PackageSpec.from_toml"""
get = cls._make_getter(name, raw_spec)
spec = super().from_toml(name,
raw_spec,
version=get("version"),
url_template=get("url_template"),
**init_args)
spec.file_names = get("file_names")
if not isinstance(spec.file_names, list):
raise SpecificationError(f"Package {name} `file_names` must be a list.")
return spec
async def collect(specs: Sequence[PackageSpec],
download_dir: Path,
progress: Optional[tqdm] = None) -> List[Package]:
"""Collect the packages based on the list of specifications.
Args:
specs: A list of package specifications to collect.
download_dir: Directory to store downloaded packages.
Returns:
A list of packages matching the package specifications.
Raises:
InvalidPackageError: One of the collected packages is not valid. It does not contain the
required directories.
DownloadError: An error occurred while downloading a remote package.
"""
if progress is not None:
async def _progress_report(coro):
ret = await coro
progress.update()
return ret
else:
async def _progress_report(coro):
return await coro
conn = aiohttp.TCPConnector(limit=4)
async with aiohttp.ClientSession(connector=conn, raise_for_status=True) as session:
jobs = []
for spec in specs:
jobs.append(_progress_report(spec.collect(download_dir, session)))
return await asyncio.gather(*jobs)
def versions_from_file(version_file: Union[os.PathLike, str]) -> Mapping[str, str]:
"""Load package versions from a CSV file.
The package versions can be used if no version is specified in the package specification.
Args:
version_file: Path to the file containing the versions.
Returns:
A dictionairy where the key is the package name, and the value the version.
"""
version_file = Path(version_file)
with version_file.open(mode="r", encoding="utf-8") as version_file_handle:
reader = csv.DictReader(version_file_handle)
return {row['Component name']: row['Version'] for row in reader}
def _combine_dict(a: Dict[Any, Any], b: Dict[Any, Any]):
a = a.copy()
a.update(b)
return a
def specs_from_file(
spec_file: Union[os.PathLike, str],
version_file: Optional[Union[os.PathLike, str]] = None) -> Sequence[PackageSpec]:
"""Load package specifications from a file.
Optionally a version CSV file is used to provide package versions.
Args:
spec_file: Path to a TOML file containing package specifications.
version_file: Optional. Path to a CSV file containing package versions.
Returns:
A list of package specifications as present in the TOML file.
Raises:
SpecificationError: The specifications in the file are not valid.
"""
if version_file:
versions = versions_from_file(version_file)
else:
versions = {}
raw_specs = toml.load(spec_file)
raw_sources = raw_specs.get("sources", {})
raw_packages = raw_specs.get("packages", None)
if not raw_packages:
raise SpecificationError("No packages defined in specification file.")
specs = []
for name, raw_package_spec in raw_packages.items():
if "source" in raw_package_spec:
source_spec = raw_sources.get(raw_package_spec["source"], None)
if source_spec is None:
raise SpecificationError(
f"Undefined source `{raw_package_spec['source']}` in package {name}")
raw_package_spec = _combine_dict(source_spec, raw_package_spec)
if "version" not in raw_package_spec:
version = versions.get(name, None)
if version is not None:
raw_package_spec["version"] = version
def _get(attr_name):
value = raw_package_spec.get(attr_name, None)
if value is None:
raise SpecificationError(f"Package {name}, or its source, has no `{attr_name}`.")
return value
| |
Move the input pointer to the next incoming token. The stream
must become active with LT(1) available. consume() simply
moves the input pointer so that LT(1) points at the next
input symbol. Consume at least one token.
Walk past any token not on the channel the parser is listening to.
"""
if self.p < len(self.tokens):
self.p += 1
self.p = self.skipOffTokenChannels(self.p) # leave p on valid token
def skipOffTokenChannels(self, i):
"""
Given a starting index, return the index of the first on-channel
token.
"""
try:
while self.tokens[i].channel != self.channel:
i += 1
except IndexError:
# hit the end of token stream
pass
return i
def skipOffTokenChannelsReverse(self, i):
while i >= 0 and self.tokens[i].channel != self.channel:
i -= 1
return i
def setTokenTypeChannel(self, ttype, channel):
"""
A simple filter mechanism whereby you can tell this token stream
to force all tokens of type ttype to be on channel. For example,
when interpreting, we cannot exec actions so we need to tell
the stream to force all WS and NEWLINE to be a different, ignored
channel.
"""
self.channelOverrideMap[ttype] = channel
def discardTokenType(self, ttype):
self.discardSet.add(ttype)
def getTokens(self, start=None, stop=None, types=None):
"""
Given a start and stop index, return a list of all tokens in
the token type set. Return None if no tokens were found. This
method looks at both on and off channel tokens.
"""
if self.p == -1:
self.fillBuffer()
if stop is None or stop >= len(self.tokens):
stop = len(self.tokens) - 1
if start is None or stop < 0:
start = 0
if start > stop:
return None
if isinstance(types, (int, long)):
# called with a single type, wrap into set
types = set([types])
filteredTokens = [
token for token in self.tokens[start:stop]
if types is None or token.type in types
]
if len(filteredTokens) == 0:
return None
return filteredTokens
def LT(self, k):
"""
Get the ith token from the current position 1..n where k=1 is the
first symbol of lookahead.
"""
if self.p == -1:
self.fillBuffer()
if k == 0:
return None
if k < 0:
return self.LB(-k)
i = self.p
n = 1
# find k good tokens
while n < k:
# skip off-channel tokens
i = self.skipOffTokenChannels(i + 1) # leave p on valid token
n += 1
try:
return self.tokens[i]
except IndexError:
return EOF_TOKEN
def LB(self, k):
"""Look backwards k tokens on-channel tokens"""
if self.p == -1:
self.fillBuffer()
if k == 0:
return None
if self.p - k < 0:
return None
i = self.p
n = 1
# find k good tokens looking backwards
while n <= k:
# skip off-channel tokens
i = self.skipOffTokenChannelsReverse(i - 1) # leave p on valid token
n += 1
if i < 0:
return None
return self.tokens[i]
def get(self, i):
"""
Return absolute token i; ignore which channel the tokens are on;
that is, count all tokens not just on-channel tokens.
"""
return self.tokens[i]
def LA(self, i):
return self.LT(i).type
def mark(self):
self.lastMarker = self.index()
return self.lastMarker
def release(self, marker=None):
# no resources to release
pass
def size(self):
return len(self.tokens)
def index(self):
return self.p
def rewind(self, marker=None):
if marker is None:
marker = self.lastMarker
self.seek(marker)
def seek(self, index):
self.p = index
def getTokenSource(self):
return self.tokenSource
def getSourceName(self):
return self.tokenSource.getSourceName()
def toString(self, start=None, stop=None):
if self.p == -1:
self.fillBuffer()
if start is None:
start = 0
elif not isinstance(start, int):
start = start.index
if stop is None:
stop = len(self.tokens) - 1
elif not isinstance(stop, int):
stop = stop.index
if stop >= len(self.tokens):
stop = len(self.tokens) - 1
return ''.join([t.text for t in self.tokens[start:stop + 1]])
class RewriteOperation(object):
"""@brief Internal helper class."""
def __init__(self, stream, index, text):
self.stream = stream
self.index = index
self.text = text
def execute(self, buf):
"""Execute the rewrite operation by possibly adding to the buffer.
Return the index of the next token to operate on.
"""
return self.index
def toString(self):
opName = self.__class__.__name__
return '<%s@%d:"%s">' % (opName, self.index, self.text)
__str__ = toString
__repr__ = toString
class InsertBeforeOp(RewriteOperation):
"""@brief Internal helper class."""
def execute(self, buf):
buf.write(self.text)
buf.write(self.stream.tokens[self.index].text)
return self.index + 1
class ReplaceOp(RewriteOperation):
"""
@brief Internal helper class.
I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
instructions.
"""
def __init__(self, stream, first, last, text):
RewriteOperation.__init__(self, stream, first, text)
self.lastIndex = last
def execute(self, buf):
if self.text is not None:
buf.write(self.text)
return self.lastIndex + 1
def toString(self):
return '<ReplaceOp@%d..%d:"%s">' % (
self.index, self.lastIndex, self.text)
__str__ = toString
__repr__ = toString
class DeleteOp(ReplaceOp):
"""
@brief Internal helper class.
"""
def __init__(self, stream, first, last):
ReplaceOp.__init__(self, stream, first, last, None)
def toString(self):
return '<DeleteOp@%d..%d>' % (self.index, self.lastIndex)
__str__ = toString
__repr__ = toString
class TokenRewriteStream(CommonTokenStream):
"""@brief CommonTokenStream that can be modified.
Useful for dumping out the input stream after doing some
augmentation or other manipulations.
You can insert stuff, replace, and delete chunks. Note that the
operations are done lazily--only if you convert the buffer to a
String. This is very efficient because you are not moving data around
all the time. As the buffer of tokens is converted to strings, the
toString() method(s) check to see if there is an operation at the
current index. If so, the operation is done and then normal String
rendering continues on the buffer. This is like having multiple Turing
machine instruction streams (programs) operating on a single input tape. :)
Since the operations are done lazily at toString-time, operations do not
screw up the token index values. That is, an insert operation at token
index i does not change the index values for tokens i+1..n-1.
Because operations never actually alter the buffer, you may always get
the original token stream back without undoing anything. Since
the instructions are queued up, you can easily simulate transactions and
roll back any changes if there is an error just by removing instructions.
For example,
CharStream input = new ANTLRFileStream("input");
TLexer lex = new TLexer(input);
TokenRewriteStream tokens = new TokenRewriteStream(lex);
T parser = new T(tokens);
parser.startRule();
Then in the rules, you can execute
Token t,u;
...
input.insertAfter(t, "text to put after t");}
input.insertAfter(u, "text after u");}
System.out.println(tokens.toString());
Actually, you have to cast the 'input' to a TokenRewriteStream. :(
You can also have multiple "instruction streams" and get multiple
rewrites from a single pass over the input. Just name the instruction
streams and use that name again when printing the buffer. This could be
useful for generating a C file and also its header file--all from the
same buffer:
tokens.insertAfter("pass1", t, "text to put after t");}
tokens.insertAfter("pass2", u, "text after u");}
System.out.println(tokens.toString("pass1"));
System.out.println(tokens.toString("pass2"));
If you don't use named rewrite streams, a "default" stream is used as
the first example shows.
"""
DEFAULT_PROGRAM_NAME = "default"
MIN_TOKEN_INDEX = 0
def __init__(self, tokenSource=None, channel=DEFAULT_CHANNEL):
CommonTokenStream.__init__(self, tokenSource, channel)
# You may have multiple, named streams of rewrite operations.
# I'm calling these things "programs."
# Maps String (name) -> rewrite (List)
self.programs = {}
self.programs[self.DEFAULT_PROGRAM_NAME] = []
# Map String (program name) -> Integer index
self.lastRewriteTokenIndexes = {}
def rollback(self, *args):
"""
Rollback the instruction stream for a program so that
the indicated instruction (via instructionIndex) is no
longer in the stream. UNTESTED!
"""
if len(args) == 2:
programName = args[0]
instructionIndex = args[1]
elif len(args) == 1:
programName = self.DEFAULT_PROGRAM_NAME
instructionIndex = args[0]
else:
raise TypeError("Invalid arguments")
p = self.programs.get(programName, None)
if p is not None:
self.programs[programName] = (
p[self.MIN_TOKEN_INDEX:instructionIndex])
def deleteProgram(self, programName=DEFAULT_PROGRAM_NAME):
"""Reset the program so that no instructions exist"""
self.rollback(programName, self.MIN_TOKEN_INDEX)
def insertAfter(self, *args):
if len(args) == 2:
programName = self.DEFAULT_PROGRAM_NAME
index = args[0]
text = args[1]
elif len(args) == 3:
programName = args[0]
index = args[1]
text = args[2]
else:
raise TypeError("Invalid arguments")
if isinstance(index, Token):
# index is a Token, grap the stream index from it
index = index.index
# to insert after, just insert before next index (even if past end)
self.insertBefore(programName, index + 1, text)
def | |
<gh_stars>1-10
# encoding: utf-8
"""
MIT License
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# Build CYTHON files
# In the main project directory :
# c:\>python setup_SpriteSheetStudio.py build_ext --inplace
# Make an executable file
#
import tkinter
from tkinter import ttk
from tkinter import filedialog
from tkinter import IntVar, DoubleVar, StringVar, Label, LabelFrame, Button, Checkbutton, DISABLED, \
NORMAL, RAISED, Entry, Scale, HORIZONTAL, FLAT, BooleanVar
import Pmw
try:
import numpy
except ImportError:
raise ImportError("Numpy library is not found on your system.\nplease try the following in a command prompt\n"
"C:>pip install numpy\n")
try:
import pygame
except ImportError:
raise ImportError("Pygame library is not found on your system.\nplease try the following in a command prompt\n"
"C:>pip install pygame\n")
from tkinter import colorchooser
import tkinter.font as tkFont
from tkinter import messagebox
import sys
import platform
try:
from SpriteTools import blend_texture_32c, blend_texture_24_alpha, \
swap_channels24_c, \
horizontal_glitch32, horizontal_glitch24, \
invert_surface_24bit, invert_surface_24bit_exclude, invert_surface_32bit, swap_channels32_c, vertical_glitch24_c, \
blend_to_textures_24c, blend_to_textures_32c, greyscale_lightness24_c, sobel24, median_filter24_c, \
color_reduction24_c, greyscale_luminosity24_c, greyscale24_c, vertical_glitch32_c, greyscale_lightness32_c, \
greyscale_luminosity32_c, greyscale32_c, median_filter32_c, color_reduction32_c, sobel32, dithering24_c, \
dithering32_c, pixelate24, pixelate32, create_pixel_blocks_rgba, sepia24_c, create_pixel_blocks_rgb, \
invert_surface_32bit_exclude
except ImportError:
raise ImportError("SpriteTools library is not found for on system.\n"
"Go into the project main directory and type the following in a command prompt \n"
"C:>python setup_SpriteSheetStudio.py build_ext --inplace\n")
try:
from GaussianBlur5x5 import canny_blur5x5_surface24_c, canny_blur5x5_surface32_c
except ImportError:
raise ImportError("GaussianBlur5x5 library is not found on your system.\n"
"Go into the project main directory and type the following in a command prompt \n"
"C:>python setup_SpriteSheetStudio.py build_ext --inplace\n")
from RGB_split import rgb_split_channels, rgb_split_channels_alpha
try:
from Saturation import saturation_array32, saturation_array24
except ImportError:
raise ImportError("Saturation library is not found on your system.\n"
"Go into the project main directory and type the following in a command prompt \n"
"C:>python setup_SpriteSheetStudio.py build_ext --inplace\n")
try:
from bloom import bloom_effect_array32, bloom_effect_array24, blur5x5_array32, blur5x5_array24
except ImportError:
raise ImportError("bloom library is not found on your system.\n"
"Go into the project main directory and type the following in a command prompt \n"
"C:>python setup_SpriteSheetStudio.py build_ext --inplace\n")
try:
from hsv_surface import hsv_surface24c, hsv_surface32c
except ImportError:
raise ImportError("hsv_surface library is not found on your system.\n"
"Go into the project main directory and type the following in a command prompt \n"
"C:>python setup_SpriteSheetStudio.py build_ext --inplace\n")
import os
from os.path import basename
try:
import PIL
from PIL import Image, ImageTk
except ImportError:
raise ImportError("pillow library is not found on your system.\nplease try the following in a command prompt\n"
"C:>pip install pygame\n")
try:
from SpriteSheet import sprite_sheet_per_pixel, sprite_sheet
except ImportError:
raise ImportError("SpriteSheet library is not found on your system.\n"
"Go into the project main directory and type the following in a command prompt \n"
"C:>python setup_SpriteSheetStudio.py build_ext --inplace\n")
icon = pygame.image.load("Assets\\magma.ico")
pygame.display.set_icon(icon)
SCREEN = pygame.display.set_mode((600, 600))
VERSION = 1.00
SCREEN_WIDTH = 1465
SCREEN_HEIGHT = 800
BUFFER_TKINTER_OPTIONS = {}
BUFFER_TKINTER_CHECKBOX = {}
LABEL = None
class GL(tkinter.Frame):
"""
GLOBAL PROGRAM VARIABLE/CONSTANT
"""
def __init__(self):
tkinter.Frame.__init__(self)
# BACKGROUND COLOR
self.background_color = "#858585"
# DEFAULT LABEL TEXT BACKGROUND COLOR
self.bkcolor = "#858585"
# FILE PATH
self.path = "/"
# SPRITESHEET FILE EXTENSION ALLOWED
# JPG
# PNG
# GIF (non-animated)
# BMP
# PCX
# TGA (uncompressed)
# TIF
# LBM (and PBM)
# PBM (and PGM, PPM)
# XPM
self.file_format = (("PNG files", "*.png"), ("JPEG files", "*.jpg"),
("GIF (non animated)", "*.gif"), ("BMP files", "*.bmp"),
("PCX files", "*.pcx"), ("TGA uncompressed", "*.tga"),
("TIF files", "*.tif"), ("LBM (and PBM", "*.lbm"),
("PBM files", "*.pbm"), ("XPM files", "*.xpm"),
("all files", "*.*"))
global VERSION
self.title = "SpriteSheet Studio version %s" % VERSION
# CHECKER BACKGROUND IMAGE TO USE FOR 32-BIT SURFACE
# DEFAULT SIZE 600x600
self.checker_background = pygame.image.load("Assets\\background.png")
self.checker_background = pygame.transform.smoothscale(self.checker_background, (600, 600))
# LIST CONTAINING ALL THE PYGAME SURFACES (IMAGE LOADED AFTER PROCESSING THE
# SPRITESHEET. THE TRANSFORMATION (BLEND, BLUR ETC WILL BE APPLIED TO ALL TEXTURES
# IN THE LIST PYIMAGE)
self.pyimage = []
# LIST CONTAINING THE TKINTER IMAGES TO DISPLAY ON CANVAS
self.tkimage = []
# DEFAULT TKINTER IMAGE DISPLAY FOR THE EDIT COLOR BUTTON (COLOR GRADIENT)
# IMAGE SIZE 20x20
self.color_icon = Image.open(r"Assets\\EditColor.png")
self.color_icon = self.color_icon.resize((20, 20), Image.ANTIALIAS)
self.color_icon = ImageTk.PhotoImage(self.color_icon)
# DEFAULT TKINTER IMAGE DISPLAY BY THE LABEL PREVOEW (64x64)
self.preview_image = Image.open(r"Assets\\python logo.png")
self.preview_image = self.preview_image.resize((64, 64), Image.ANTIALIAS)
self.preview_image = ImageTk.PhotoImage(self.preview_image)
# PADLOCK TKINTER IMAGE TO USE WHEN SIZE WIDTH AND HEIGHT ARE INDENTICAL (LOCKED)
# DEFAULT SIZE 16x16
self.padlock_lock_image = Image.open(r"Assets\\Lock.png")
self.padlock_lock_image = self.padlock_lock_image.resize((16, 16), Image.ANTIALIAS)
self.padlock_lock_image = ImageTk.PhotoImage(self.padlock_lock_image)
# UNLOCK IMAGE
self.padlock_unlock_image = Image.open(r"Assets\\Lock-Unlock-icon.png")
self.padlock_unlock_image = self.padlock_unlock_image.resize((16, 16), Image.ANTIALIAS)
self.padlock_unlock_image = ImageTk.PhotoImage(self.padlock_unlock_image)
# GLARE EFFECT TEXTURE DEFAULT (IMAGE TO DISPLAY ON LABEL DEFAULT SIZE 64x64
self.glow_shape = Image.open(r"Assets\\icon_glareFx_red.png")
self.glow_shape = self.glow_shape.resize((64, 64), Image.ANTIALIAS)
self.glow_shape = ImageTk.PhotoImage(self.glow_shape)
# GLARE EFFECT TEXTURE TO BLEND WITH ALL PYGAME IMAGE (PYIMAGE_)
self.glow_shape_pygame = pygame.image.load(
"Assets\\icon_glareFx_red.png").convert()
self.glow_shape_pygame = pygame.transform.smoothscale(self.glow_shape_pygame, (64, 64))
# TRANSITION TEXTURE FOR TRANSITION EFFECT (EMPTY SURFACE SIZE 10x10)
self.transition_texture = pygame.Surface((10, 10), pygame.SRCALPHA)
# TKINTER VARIABLES
self.rows_value = IntVar()
self.columns_value = IntVar()
self.input_format_32bit = IntVar()
self.input_format_24bit = IntVar()
self.width_value = IntVar()
self.height_value = IntVar()
self.spritesheet_name_variable = StringVar()
self.red = IntVar()
self.green = IntVar()
self.blue = IntVar()
self.exclude_red = IntVar()
self.exclude_green = IntVar()
self.exclude_blue = IntVar()
self.colorkey_red = IntVar()
self.colorkey_green = IntVar()
self.colorkey_blue = IntVar()
self.padlock_status = "locked"
self.rgbsplitxoffset = StringVar()
self.rgbsplityoffset = StringVar()
self.blend_start_frame = IntVar()
self.blend_end_frame = IntVar()
self.hsvstart_frame = IntVar()
self.hsvend_frame = IntVar()
self.bloomstart_frame = IntVar()
self.bloomend_frame = IntVar()
self.rgbsplit_start_frame = IntVar()
self.rgbsplit_end_frame = IntVar()
self.transition_start_frame = IntVar()
self.transition_end_frame = IntVar()
self.glitch_start_frame = IntVar()
self.glitch_end_frame = IntVar()
self.blend_scale_percentage = DoubleVar()
self.output_format_24bit = BooleanVar()
self.output_format_32bit = BooleanVar()
self.rleaccel_value = BooleanVar()
self.set_alpha_value = BooleanVar()
self.colorkey = BooleanVar()
self.hsv_checkbox = BooleanVar()
self.hsv_scale_value = DoubleVar()
self.hsv_rotate = BooleanVar()
self.bloom_checkbox = BooleanVar()
self.highpass_filter_value = DoubleVar()
self.preview_scale_delay = IntVar()
self.checker_value = BooleanVar()
self.inverse_variable = BooleanVar()
self.saturation_checkbox = BooleanVar()
self.saturation_scale_value = DoubleVar()
self.cartoon_checkbox = BooleanVar()
self.blur_checkbox = BooleanVar()
self.blur_progressive = BooleanVar()
self.blurx2 = BooleanVar()
self.blurx4 = BooleanVar()
self.blurx6 = BooleanVar()
self.blurstart_frame = IntVar()
self.blurend_frame = IntVar()
self.glow_checkbox = BooleanVar()
self.glow_var = StringVar()
self.glow_scale_value = DoubleVar()
self.channel_checkbox = BooleanVar()
self.widget_var = StringVar()
self.red_channel = BooleanVar()
self.green_channel = BooleanVar()
self.blue_channel = BooleanVar()
self.split_blue_checkbox = BooleanVar()
self.split_green_checkbox = BooleanVar()
self.split_red_checkbox = BooleanVar()
self.rgbsplit_checkbox = BooleanVar()
self.transition_checkbox = BooleanVar()
self.transition_alpha1 = BooleanVar()
self.transition_alpha2 = BooleanVar()
self.glitch_checkbox = BooleanVar()
self.glitch_horizontal = BooleanVar()
self.glitch_vertical = BooleanVar()
self.exclude_red_inv = IntVar()
self.exclude_green_inv = IntVar()
self.exclude_blue_inv = IntVar()
self.inverse_exclude_variable = BooleanVar()
self.width_entry_variable = IntVar()
self.height_entry_variable = IntVar()
self.cartoon_lightness = BooleanVar()
self.cartoon_luminosity = BooleanVar()
self.cartoon_average = BooleanVar()
self.cartoon_threshold = IntVar()
self.cartoon_neightboors = IntVar()
self.cartoon_color = IntVar()
self.pixel = BooleanVar()
self.pixel_size = IntVar()
self.greyscale = BooleanVar()
self.sepia = BooleanVar()
self.dithering = BooleanVar()
self.dithering_value = IntVar()
self.output_width_value = IntVar()
self.output_height_value = IntVar()
self.output_rows_value = IntVar()
self.output_columns_value = IntVar()
self.file_format_value = StringVar()
self.glow_direction = None
self.file_format_value.set("PNG")
self.pixel_size.set(8)
self.cartoon_lightness.set(0)
self.cartoon_luminosity.set(0)
self.cartoon_average.set(1)
self.cartoon_threshold.set(20)
self.cartoon_neightboors.set(4)
self.cartoon_color.set(16)
self.green_channel.set(1)
self.blue_channel.set(1)
self.red_channel.set(1)
self.glow_scale_value.set(0.0)
self.blurend_frame.set(100)
self.blurstart_frame.set(0)
self.blurx6.set(0)
self.blurx4.set(0)
self.blurx2.set(0)
self.blur_progressive.set(0)
self.blur_checkbox.set(0)
self.cartoon_checkbox.set(0)
self.saturation_scale_value.set(0)
self.saturation_checkbox.set(0)
self.inverse_variable.set(0)
self.checker_value.set(1)
self.preview_scale_delay.set(60)
self.highpass_filter_value.set(128)
self.bloom_checkbox.set(0)
self.hsvstart_frame.set(0)
self.hsvend_frame.set(100)
self.hsv_rotate.set(0)
self.hsv_scale_value.set(0.0)
self.hsv_checkbox.set(0)
self.colorkey.set(0)
self.set_alpha_value.set(0)
self.rleaccel_value.set(0)
self.output_format_32bit.set(0)
self.output_format_24bit.set(0)
self.blend_scale_percentage.set(0.0)
self.glitch_end_frame.set(100)
self.glitch_start_frame.set(0)
self.transition_end_frame.set(100)
self.transition_start_frame.set(0)
self.rgbsplit_end_frame.set(100)
self.rgbsplit_start_frame.set(0)
self.bloomend_frame.set(100)
self.bloomstart_frame.set(0)
self.blurend_frame.set(100)
self.blurstart_frame.set(0)
self.hsvend_frame.set(100)
self.blend_end_frame.set(100)
self.blend_start_frame.set(0)
self.input_format_24bit.set(1)
self.width_value.set(512)
self.height_value.set(512)
self.spritesheet_name_variable.set("")
self.rgbsplitxoffset.set(10)
self.rgbsplityoffset.set(10)
self.split_red_checkbox.set(1)
self.split_green_checkbox.set(1)
self.split_blue_checkbox.set(1)
self.glitch_checkbox.set(0)
self.exclude_red_inv.set(0)
self.exclude_green_inv.set(0)
self.exclude_blue_inv.set(0)
self.inverse_exclude_variable.set(0)
# ROOT WINDOW GEOMETRY
self.x_offset = 100
self.y_offset = 100
global SCREEN_WIDTH, SCREEN_HEIGHT
self.geometry = str(SCREEN_WIDTH) + "x" + str(SCREEN_HEIGHT) + "+" \
+ str(self.x_offset) + "+" + str(self.y_offset)
self.cancel = False
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def pygame_to_tkinter(pygame_surface_, width_: | |
<filename>amd64-linux/lib/python/sun_vtoc_commands.py
from cli import *
#
# VTOC layout: (with unimportant fields removed)
#
# OFFSET SIZE NUM NAME
# 0 128 1 label
VTOC_VERSION = 128 # 128 4 1 version
# 132 8 1 volume name
VTOC_NUMPART = 140 # 140 2 1 number of partitions
VTOC_PART_S2 = 142 # 142 4 8 partition headers, section 2
# 2 bytes tag
# 2 bytes permission flag
# 174 2 1 <pad>
# 176 4 3 bootinfo
VTOC_SANITY = 188 # 188 4 1 sanity
# 192 4 10 <reserved>
# 232 4 8 partition timestamp
# 264 2 1 write reinstruct
# 266 2 1 read reinstruct
# 268 152 1 <pad>
VTOC_RPM = 420 # 420 2 1 rpm
VTOC_PHYS_CYL = 422 # 422 2 1 physical cylinders
VTOC_ALT_P_CYL = 424 # 424 2 1 alternates per cylinder
# 426 2 1 <obsolete>
# 428 2 1 <obsolete>
VTOC_INTRLV = 430 # 430 2 1 interleave
VTOC_DATA_CYL = 432 # 432 2 1 data cylinders
VTOC_ALT_CYL = 434 # 434 2 1 alt cylinders
VTOC_HEADS = 436 # 436 2 1 heads
VTOC_TRACKS = 438 # 438 2 1 sectors per track
# 440 2 1 <obsolete>
# 442 2 1 <obsolete>
VTOC_PART_S1 = 444 # 444 8 8 partition headers, section 1
# 4 bytes start cylinder
# 4 bytes number of blocks
VTOC_MAGIC = 508 # 508 2 1 magic = 0xDABE
VTOC_CHECKSUM = 510 # 510 2 1 checksum
tag_list = { 0 : "unused",
1 : "boot",
2 : "root",
3 : "swap",
4 : "usr",
5 : "backup",
7 : "var",
8 : "home",
130 : "Linux swap",
131 : "Linux" }
flag_list = { 0 : "RW",
1 : "unmountable",
2 : "RO" }
def get_tag_str(tag):
try:
return "(" + tag_list[tag] + ")"
except:
return "(unknown)"
def get_flag_str(flag):
try:
return "(" + flag_list[flag] + ")"
except:
return "(unknown)"
def calculate_checksum(vtoc):
chk = 0
for i in range(0, 510, 2):
chk ^= get_vtoc_int16(vtoc, i)
return chk
def get_vtoc_label(vtoc):
str = ""
for i in vtoc:
if i == 0:
return str
str += chr(i)
def set_vtoc_label(vtoc, str):
for i in range(0, len(str)):
vtoc[i] = ord(str[i])
for j in range(i + 1, 512):
vtoc[j] = 0
def get_vtoc_int16(vtoc, offset):
return (vtoc[offset] << 8) | vtoc[offset + 1]
def set_vtoc_int16(vtoc, offset, value):
vtoc[offset] = (value >> 8) & 0xff
vtoc[offset + 1] = value & 0xff
def get_vtoc_int32(vtoc, offset):
return (get_vtoc_int16(vtoc, offset) << 16) | get_vtoc_int16(vtoc, offset + 2)
def set_vtoc_int32(vtoc, offset, value):
set_vtoc_int16(vtoc, offset, (value >> 16) & 0xffff)
set_vtoc_int16(vtoc, offset + 2, value & 0xffff)
def read_block(obj, offset):
if obj.classname == "scsi-disk":
return list(obj.sector_data[offset * 512])
elif obj.classname == "ide-disk":
block = []
for i in range(0, 512):
block.append(obj.image.byte_access[offset * 512 + i])
return block
else:
raise Exception, "Unknown disk type"
def write_block(obj, offset, block):
if obj.classname == "scsi-disk":
obj.sector_data[offset * 512] = block
elif obj.classname == "ide-disk":
for i in range(0, 512):
obj.image.byte_access[offset * 512 + i] = block[i]
else:
raise Exception, "Unknown disk type"
def print_partitions(obj, vtoc):
heads = get_vtoc_int16(vtoc, VTOC_HEADS)
s_per_t = get_vtoc_int16(vtoc, VTOC_TRACKS)
print "Partition Table:"
print "Number Tag Flag Start End Size"
for i in range(0, 8):
tag = get_vtoc_int16(vtoc, VTOC_PART_S2 + 4 * i + 0)
flag = get_vtoc_int16(vtoc, VTOC_PART_S2 + 4 * i + 2)
start = get_vtoc_int32(vtoc, VTOC_PART_S1 + 8 * i + 0)
blocks = get_vtoc_int32(vtoc, VTOC_PART_S1 + 8 * i + 4)
if blocks == 0:
continue
start *= heads * s_per_t
print " %d %d %-12s %d %-13s %9d %9d %9d" % (
i, tag, get_tag_str(tag), flag, get_flag_str(flag),
start, start + blocks - 1, blocks)
def print_sun_vtoc_cmd(obj):
vtoc = read_block(obj, 0)
if get_vtoc_int16(vtoc, VTOC_MAGIC) != 0xDABE:
print "This does not appear to be a Sun Disk."
print "The magic is %x, expected 0xDABE" % get_vtoc_int16(vtoc, VTOC_MAGIC)
print
return
data_cyl = get_vtoc_int16(vtoc, VTOC_DATA_CYL)
phys_cyl = get_vtoc_int16(vtoc, VTOC_PHYS_CYL)
heads = get_vtoc_int16(vtoc, VTOC_HEADS)
s_per_t = get_vtoc_int16(vtoc, VTOC_TRACKS)
print
print " Label : %s" % get_vtoc_label(vtoc)
print " RPM : %s" % get_vtoc_int16(vtoc, VTOC_RPM)
print " Data cylinders : %d" % data_cyl
print " Alt cylinders : %d" % get_vtoc_int16(vtoc, VTOC_ALT_CYL)
print "Physical cylinders : %d" % phys_cyl
print " Heads : %d" % heads
print " Sectors per Track : %d" % s_per_t
print
print " Number of data blocks : %d" % (data_cyl * s_per_t * heads)
print
print_partitions(obj, vtoc)
num_part = get_vtoc_int16(vtoc, VTOC_NUMPART)
chk_sum = get_vtoc_int16(vtoc, VTOC_CHECKSUM)
if num_part != 8:
print
print "### Illegal number of partitions set (%d), only 8 supported" % num_part
if calculate_checksum(vtoc) != chk_sum:
print "### Incorrect checksum: %d. Expected: %d" % (chk_sum, calculate_checksum(vtoc))
print
def write_sun_vtoc_cmd(obj, C, H, S, quiet):
vtoc = [0] * 512
if -1 in [C, H, S] and [C, H, S] != [-1, -1, -1]:
print "Only Partial geometry specified."
SIM_command_has_problem()
return
alt = 2
if [C, H, S] != [-1, -1, -1]:
cyl = C - alt
heads = H
s_per_t = S
elif obj.classname == "scsi-disk":
print "No geometry specified for SCSI disk VTOC."
SIM_command_has_problem()
return
elif obj.classname == "ide-disk":
cyl = obj.disk_cylinders - alt
heads = obj.disk_heads
s_per_t = obj.disk_sectors_per_track
pass
else:
raise Exception, "Unknown disk type"
set_vtoc_label(vtoc, "SIMDISK cyl %d alt %d hd %d sec %d" % (cyl, alt, heads, s_per_t))
set_vtoc_int32(vtoc, VTOC_VERSION, 1)
set_vtoc_int16(vtoc, VTOC_MAGIC, 0xDABE)
set_vtoc_int16(vtoc, VTOC_DATA_CYL, cyl)
set_vtoc_int16(vtoc, VTOC_ALT_CYL, alt)
set_vtoc_int16(vtoc, VTOC_INTRLV, 1)
set_vtoc_int16(vtoc, VTOC_PHYS_CYL, cyl + alt)
set_vtoc_int16(vtoc, VTOC_HEADS, heads)
set_vtoc_int16(vtoc, VTOC_TRACKS, s_per_t)
set_vtoc_int16(vtoc, VTOC_NUMPART, 8)
set_vtoc_int16(vtoc, VTOC_RPM, 7200)
set_vtoc_int32(vtoc, VTOC_SANITY, 0x600ddeee)
# set checksum last!
set_vtoc_int16(vtoc, VTOC_CHECKSUM, calculate_checksum(vtoc))
write_block(obj, 0, tuple(vtoc))
# create the backup slice
write_sun_vtoc_partition_cmd(obj, 2, "backup", "unmountable", 0, cyl * heads * s_per_t, 1)
if not quiet and SIM_get_quiet() == 0:
print "New VTOC written to disk:"
print_sun_vtoc_cmd(obj)
def write_sun_vtoc_partition_cmd(obj, nbr, tag_str, flag_str, start, blocks, quiet):
if nbr < 0 or nbr > 7:
print "Partitions are numbered 0 ..7\n"
return
try:
tag = tag_list.keys()[tag_list.values().index(tag_str)]
except:
print "Unknown tag type '%s'" % tag_str
print "Try one of:"
for i in tag_list.values():
print " " + i
print
return
try:
flag = flag_list.keys()[flag_list.values().index(flag_str)]
except:
print "Unknown flag '%s'" % flag_str
print "Try one of:"
for i in flag_list.values():
print " " + i
print
return
vtoc = read_block(obj, 0)
heads = get_vtoc_int16(vtoc, VTOC_HEADS)
s_per_t = get_vtoc_int16(vtoc, VTOC_TRACKS)
set_vtoc_int16(vtoc, VTOC_PART_S2 + 4 * nbr + 0, tag)
set_vtoc_int16(vtoc, VTOC_PART_S2 + 4 * nbr + 2, flag)
set_vtoc_int32(vtoc, VTOC_PART_S1 + 8 * nbr + 0, start / (heads * s_per_t))
set_vtoc_int32(vtoc, VTOC_PART_S1 + 8 * nbr + 4, blocks)
# set checksum last!
set_vtoc_int16(vtoc, VTOC_CHECKSUM, calculate_checksum(vtoc))
write_block(obj, 0, tuple(vtoc))
if not quiet and SIM_get_quiet() == 0:
print_partitions(obj, vtoc)
print
def delete_sun_vtoc_partition_cmd(obj, nbr, quiet):
if nbr < 0 or nbr > 7:
print "Partitions are numbered 0 ..7\n"
return
vtoc = read_block(obj, 0)
set_vtoc_int16(vtoc, VTOC_PART_S2 + 4 * nbr + 0, 0)
set_vtoc_int32(vtoc, VTOC_PART_S1 + 8 * nbr + 4, 0)
# set checksum last!
set_vtoc_int16(vtoc, VTOC_CHECKSUM, calculate_checksum(vtoc))
write_block(obj, 0, tuple(vtoc))
if not quiet and SIM_get_quiet() == 0:
print_partitions(obj, vtoc)
print
def dump_sun_partition_cmd(obj, nbr, file):
if nbr < 0 or nbr > 7:
print "Partitions are numbered 0 ..7\n"
return
vtoc = read_block(obj, 0)
heads = get_vtoc_int16(vtoc, VTOC_HEADS)
s_per_t = get_vtoc_int16(vtoc, VTOC_TRACKS)
start = get_vtoc_int32(vtoc, VTOC_PART_S1 + 8 * nbr) * heads * s_per_t
blocks = get_vtoc_int32(vtoc, VTOC_PART_S1 + 8 * nbr + 4)
if blocks == 0:
print "No partition %d.\n" % nbr
return
print "Dumping partition %d. Start block %d. Size in blocks: %d" % (nbr, start, blocks)
# index with list, since python doesn't have 4 bit indexes
try:
obj.image.dump[[file, start * 512, blocks * 512]]
except Exception, msg:
print "Failed getting a dump from the disk image."
print "Error message was: %s\n" % msg
return
print "Partition dumped successfully.\n"
def add_sun_partition_cmd(obj, nbr, file):
if nbr < 0 or nbr > 7:
print "Partitions are numbered 0 ..7\n"
return
vtoc = read_block(obj, 0)
heads = get_vtoc_int16(vtoc, VTOC_HEADS)
s_per_t = get_vtoc_int16(vtoc, VTOC_TRACKS)
start = get_vtoc_int32(vtoc, VTOC_PART_S1 | |
<reponame>yujing1997/CTcnn
import matplotlib
matplotlib.use('Agg') #to actually be able to use matplotlib
import numpy as np
from scipy.misc import imsave,imread
import time
from keras import backend as K
from vis.visualization import visualize_activation,visualize_saliency,visualize_cam,get_num_filters
from vis.utils import utils
from sklearn.metrics import roc_auc_score, classification_report, roc_curve, confusion_matrix
from matplotlib import pyplot as plt
import itertools
import os
from keras.models import load_model, Model
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras import applications, optimizers, activations
from vis.input_modifiers import Jitter
from PIL import Image
import matplotlib.gridspec as gridspec
# util function to convert a tensor into a valid image
def deprocess_image(x):
# normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
return x
def plot_confusion_matrix(cm, classes,
normalize=True,
title='Confusion matrix',
cmap=plt.cm.Blues):
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
#print(cmnormalized)
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig('confusionmatrix.eps')
def get_activations(model, model_inputs, print_shape_only=True, layer_name=None):
print('----- activations -----')
activations = []
inp = model.input
model_multi_inputs_cond = True
if not isinstance(inp, list):
# only one input! let's wrap it in a list.
inp = [inp]
model_multi_inputs_cond = False
outputs = [layer.output for layer in model.layers if
layer.name == layer_name or layer_name is None] # all layer outputs
funcs = [K.function(inp + [K.learning_phase()], [out]) for out in outputs] # evaluation functions
if model_multi_inputs_cond:
list_inputs = []
list_inputs.extend(model_inputs)
list_inputs.append(0.)
else:
list_inputs = [model_inputs, 0.]
# Learning phase. 0 = Test mode (no dropout or batch normalization)
# layer_outputs = [func([model_inputs, 0.])[0] for func in funcs]
layer_outputs = [func(list_inputs)[0] for func in funcs]
for layer_activations in layer_outputs:
activations.append(layer_activations)
if print_shape_only:
print(layer_activations.shape)
else:
print(layer_activations)
return activations
def display_activations(activation_maps):
import numpy as np
batch_size = activation_maps[0].shape[0]
assert batch_size == 1, 'One image at a time to visualize.'
for i, activation_map in enumerate(activation_maps):
print('Displaying activation map {}'.format(i))
shape = activation_map.shape
if len(shape) == 4:
activations = np.hstack(np.transpose(activation_map[0], (2, 0, 1)))
elif len(shape) == 2:
# try to make it square as much as possible. we can skip some activations.
activations = activation_map[0]
num_activations = len(activations)
if num_activations < 1024: # too hard to display it on the screen.
square_param = int(np.floor(np.sqrt(num_activations)))
activations = activations[0: square_param * square_param]
activations = np.reshape(activations, (square_param, square_param))
else:
activations = np.expand_dims(activations, axis=0)
else:
raise Exception('len(shape) = 3 has not been implemented.')
plt.figure()
plt.imshow(activations, interpolation='None', cmap='gray')
#plt.show()
plt.savefig('activations_noDMcase_C15.png')
def generate_max_activation(model,gradmod,backprop):
activations = visualize_activation(model,-1,filter_indices=[0],verbose=True,input_modifiers=[Jitter(16)],backprop_modifier=backprop,grad_modifier=gradmod,act_max_weight=1, lp_norm_weight=10,tv_weight=10)
plt.imsave('activations_inferno.eps',activations[:,:,0],cmap='inferno')
plt.imsave('activations_plasma.eps',activations[:,:,0],cmap='plasma')
plt.imsave('activations_magma.eps',activations[:,:,0],cmap='magma')
plt.imsave('activations_gray.eps',activations[:,:,0],cmap='gray')
plt.imsave('activations_viridis.eps',activations[:,:,0],cmap='viridis')
def generate_saliency_cam_maps(model,testimage,camlayerofinterest,gradmod,backprop,imagename):
grads = visualize_saliency(model,-1,filter_indices = None,seed_input= testimage,backprop_modifier = backprop,grad_modifier=gradmod)
plt.imsave('saliency-' + imagename +'.png',grads,cmap='gray')
cam=visualize_cam(model,camlayerofinterest,filter_indices = None,seed_input=testimage,backprop_modifier=backprop,grad_modifier=gradmod,penultimate_layer_idx=None) #0, 3, 6 are the conv layers. (so 1, 4 7 are what we want)
plt.imsave('cam_conv3-' + imagename +'.png',cam)
background = Image.open("currentCT.png")
overlay = Image.open('cam_conv3-' + imagename +'.png')
mergedimage=Image.blend(background,overlay,0.60)
mergedimage.save("mergedimage.png","PNG")
def brute_force_montage(desiredslice,imagedir,model,camlayerofinterest,gradmod,backprop):
padding = 30
fig = plt.figure(figsize=(3,3))
gs1 = gridspec.GridSpec(4, 3,wspace=0.05,hspace=0)
imagefile = "HN-HMR-027-" + desiredslice
ax = plt.subplot(gs1[0])
ax.set_title("(a)",weight='bold',family='sans-serif')
ax.set_ylabel("DM",size='large')
plt.axis('off')
testimage = imread(imagedir + "TEST/dm/" + imagefile + '.png')
indcolumns = np.nonzero(testimage.any(axis=0))[0] # indices of non empty columns
indrows = np.nonzero(testimage.any(axis=1))[0] # indices of non empty rows
plt.imshow(testimage[indrows[0]-padding-2:indrows[-1]+padding+3,indcolumns[0]-padding-35:indcolumns[-1]+padding+36],cmap = 'gray')
plt.imsave('currentCT.png',testimage[indrows[0]-padding-2:indrows[-1]+padding+3,indcolumns[0]-padding-35:indcolumns[-1]+padding+36],cmap='gray')
testimage = testimage.reshape(1,512,512,1)
cam=visualize_cam(model,camlayerofinterest,filter_indices = None,seed_input=testimage,backprop_modifier=backprop,grad_modifier=gradmod,penultimate_layer_idx=None) #0, 3, 6 are the conv layers. (so 1, 4 7 are what we want)
plt.imsave('cam_conv3-' + imagefile +'.png',cam[indrows[0]-padding-2:indrows[-1]+padding+3,indcolumns[0]-padding-35:indcolumns[-1]+padding+36])
ax=plt.subplot(gs1[1])
ax.set_title("(b)",weight='bold',family='sans-serif')
plt.axis('off')
plt.imshow(cam[indrows[0]-padding-2:indrows[-1]+padding+3,indcolumns[0]-padding-35:indcolumns[-1]+padding+36])
background = Image.open("currentCT.png")
overlay = Image.open('cam_conv3-' + imagefile +'.png')
mergedimage=Image.blend(background,overlay,0.60)
ax=plt.subplot(gs1[2])
ax.set_title("(c)",weight='bold',family='sans-serif')
plt.axis('off')
plt.imshow(mergedimage)
imagefile = "HN-HMR-011-" + desiredslice
plt.subplot(gs1[3])
plt.axis('off')
testimage = imread(imagedir + "TEST/dm/" + imagefile + '.png')
indcolumns = np.nonzero(testimage.any(axis=0))[0] # indices of non empty columns
indrows = np.nonzero(testimage.any(axis=1))[0] # indices of non empty rows
plt.imshow(testimage[indrows[0]-padding:indrows[-1]+padding,indcolumns[0]-padding:indcolumns[-1]+padding],cmap = 'gray')
plt.imsave('currentCT.png',testimage[indrows[0]-padding:indrows[-1]+padding,indcolumns[0]-padding:indcolumns[-1]+padding],cmap='gray')
testimage = testimage.reshape(1,512,512,1)
cam=visualize_cam(model,camlayerofinterest,filter_indices = None,seed_input=testimage,backprop_modifier=backprop,grad_modifier=gradmod,penultimate_layer_idx=None) #0, 3, 6 are the conv layers. (so 1, 4 7 are what we want)
plt.imsave('cam_conv3-' + imagefile +'.png',cam[indrows[0]-padding:indrows[-1]+padding,indcolumns[0]-padding:indcolumns[-1]+padding])
plt.subplot(gs1[4])
plt.axis('off')
plt.imshow(cam[indrows[0]-padding:indrows[-1]+padding,indcolumns[0]-padding:indcolumns[-1]+padding])
background = Image.open("currentCT.png")
overlay = Image.open('cam_conv3-' + imagefile +'.png')
mergedimage=Image.blend(background,overlay,0.60)
plt.subplot(gs1[5])
plt.axis('off')
plt.imshow(mergedimage)
imagefile = "HN-CHUM-015-" + desiredslice
plt.subplot(gs1[6])
plt.axis('off')
testimage = imread(imagedir + "TEST/nodm/" + imagefile + '.png')
indcolumns = np.nonzero(testimage.any(axis=0))[0] # indices of non empty columns
indrows = np.nonzero(testimage.any(axis=1))[0] # indices of non empty rows
plt.imshow(testimage[indrows[0]-padding-9:indrows[-1]+padding+9,indcolumns[0]-padding-37:indcolumns[-1]+padding+37],cmap = 'gray')
plt.imsave('currentCT.png',testimage[indrows[0]-padding-9:indrows[-1]+padding+9,indcolumns[0]-padding-37:indcolumns[-1]+padding+37],cmap='gray')
testimage = testimage.reshape(1,512,512,1)
cam=visualize_cam(model,camlayerofinterest,filter_indices = None,seed_input=testimage,backprop_modifier=backprop,grad_modifier=gradmod,penultimate_layer_idx=None) #0, 3, 6 are the conv layers. (so 1, 4 7 are what we want)
plt.imsave('cam_conv3-' + imagefile +'.png',cam[indrows[0]-padding-9:indrows[-1]+padding+9,indcolumns[0]-padding-37:indcolumns[-1]+padding+37])
plt.subplot(gs1[7])
plt.axis('off')
plt.imshow(cam[indrows[0]-padding-9:indrows[-1]+padding+9,indcolumns[0]-padding-37:indcolumns[-1]+padding+37])
background = Image.open("currentCT.png")
overlay = Image.open('cam_conv3-' + imagefile +'.png')
mergedimage=Image.blend(background,overlay,0.60)
plt.subplot(gs1[8])
plt.axis('off')
plt.imshow(mergedimage)
imagefile = "HN-HMR-010-" + desiredslice
plt.subplot(gs1[9])
plt.axis('off')
testimage = imread(imagedir + "TEST/nodm/" + imagefile + '.png')
indcolumns = np.nonzero(testimage.any(axis=0))[0] # indices of non empty columns
indrows = np.nonzero(testimage.any(axis=1))[0] # indices of non empty rows
plt.imshow(testimage[indrows[0]-padding-16:indrows[-1]+padding+17,indcolumns[0]-padding-45:indcolumns[-1]+padding+46],cmap = 'gray')
plt.imsave('currentCT.png',testimage[indrows[0]-padding-16:indrows[-1]+padding+17,indcolumns[0]-padding-45:indcolumns[-1]+padding+46],cmap='gray')
testimage = testimage.reshape(1,512,512,1)
cam=visualize_cam(model,camlayerofinterest,filter_indices = None,seed_input=testimage,backprop_modifier=backprop,grad_modifier=gradmod,penultimate_layer_idx=None) #0, 3, 6 are the conv layers. (so 1, 4 7 are what we want)
plt.imsave('cam_conv3-' + imagefile +'.png',cam[indrows[0]-padding-16:indrows[-1]+padding+17,indcolumns[0]-padding-45:indcolumns[-1]+padding+46])
plt.subplot(gs1[10])
plt.axis('off')
plt.imshow(cam[indrows[0]-padding-16:indrows[-1]+padding+17,indcolumns[0]-padding-45:indcolumns[-1]+padding+46])
background = Image.open("currentCT.png")
overlay = Image.open('cam_conv3-' + imagefile +'.png')
mergedimage=Image.blend(background,overlay,0.60)
plt.subplot(gs1[11])
plt.axis('off')
plt.imshow(mergedimage)
plt.savefig("fullmontage.eps",dpi=300)
def thorough_numerical_evaluation(model,validation_generator,training_generator,threshold):
probabilities = model.predict_generator(validation_generator, 1,verbose=1)
score = model.evaluate_generator(validation_generator)
true_classes = training_generator.classes
predictions = model.predict_generator(generator = training_generator, steps = 2,workers=1)
roc_auc = roc_auc_score(y_true = true_classes, y_score = np.ravel(predictions))
print('Training AUC')
print(roc_auc)
true_classes = validation_generator.classes
predictions = model.predict_generator(generator = validation_generator, steps = 2,workers=1)
roc_auc = roc_auc_score(y_true = true_classes, y_score = np.ravel(predictions))
print('Testing AUC')
print(roc_auc)
predictions = (predictions-min(predictions))/(max(predictions)-min(predictions)) #Normalize between 0 and 1.
print(predictions)
print(validation_generator.filenames)
fpr, tpr, thresholds = roc_curve(true_classes, np.ravel(predictions))
print('Specificity (1-FPR)')
specificity = 1- fpr
print(specificity)
print('TPR (sensitivity)')
print(tpr)
print('Thresholds')
print(thresholds)
plt.figure()
plt.tight_layout()
plt.plot(fpr,tpr,color = 'darkorange',label = 'ROC curve (area = %0.2f)'% roc_auc)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic curve')
plt.legend(loc="lower right")
ax2 = plt.gca().twinx()
ax2.plot(fpr, thresholds, markeredgecolor='r',linestyle='dashed', color='r')
ax2.set_ylabel('Threshold',color='r')
ax2.set_ylim([thresholds[-1],thresholds[0]])
ax2.set_xlim([fpr[0],fpr[-1]])
plt.savefig('ROCcurve.eps')
dmindex = predictions <= threshold
nodmindex = predictions > threshold
predictions[dmindex] = 0
predictions[nodmindex] = 1
print(predictions)
cnf = confusion_matrix(true_classes,predictions)
plot_confusion_matrix(cnf,['dm','nodm'],normalize=False)
print(validation_generator.class_indices)
print(validation_generator.classes)
print(model.metrics_names)
print(score)
def get_weights(layer_name,model):
layer_idx = utils.find_layer_idx(model, layer_name)
print(np.shape(model.layers[layer_idx].get_weights()[0]))
weights= model.layers[layer_idx].get_weights()[0][:,:,0,:] #For CONV layers, NOTE THIS IS ONLY LOOKING AT A SINGLE SLICE OF THE FILTERS WEIGHTS, just to show it's possible, I have never explicitly used this.
#3weights= model.layers[layer_idx].get_weights()[0] #FOR PRELU layers
plt.figure()
print(np.shape(weights))
for i in range(1,np.shape(weights)[2]):
plt.subplot(12,12,i)
plt.imshow(weights[:,:,i],interpolation="nearest",cmap="gray")
plt.axis('off')
plt.savefig('weights.png')
def generate_filter_max_activation(model,layer_name):
input_img = model.input
img_width = 512
img_height = 512
layer_dict = dict([(layer.name, layer) for layer in model.layers])
kept_filters = []
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon())
for filter_index in range(128): #Set this the number of filters in the layer you are interested in.
print('Processing filter %d' % filter_index)
start_time = time.time()
# we build a loss function that maximizes the activation
# of the nth filter of the layer considered
layer_output = layer_dict[layer_name].output
if K.image_data_format() == 'channels_first':
loss = K.mean(layer_output[:, filter_index, :, :])
else:
loss = K.mean(layer_output[:, :, :, filter_index])
# we compute the gradient of the input picture wrt this loss
grads = K.gradients(loss, input_img)[0]
# normalization trick: we normalize the gradient
grads = normalize(grads)
# this function returns the loss and grads given the input picture
iterate = K.function([input_img], [loss, grads])
# step size for gradient ascent
step = 1
# we start from a gray image with some random noise
if K.image_data_format() == 'channels_first':
input_img_data = np.random.random((1, 1, img_width, img_height))
else:
input_img_data = np.random.random((1, img_width, img_height, 1))
input_img_data = (input_img_data - 0.5) * 20
# we run gradient ascent for x steps
for i in range(400):
loss_value, grads_value = iterate([input_img_data])
input_img_data += grads_value * step
if i == 399:
print('Current loss value:', loss_value)
if loss_value <= 0.:
## some filters get stuck to 0, we can skip them
print('break')
break
# decode the resulting input image
if loss_value > 0:
img = deprocess_image(input_img_data[0])
kept_filters.append((img, loss_value))
end_time = time.time()
print('Filter %d processed in %ds' % (filter_index, end_time - start_time))
print(np.shape(kept_filters))
def brute_force_filter_stich(kept_filters):
# we will stich the best 64 filters on a 8 x 8 grid.
n = 2
kept_filters.sort(key=lambda x: x[1], reverse=True)
#kept_filters = kept_filters[:n * n]
print(np.shape(kept_filters))
# build a black picture with enough space for
# our 8 x 8 filters of size 128 x 128, with a 5px margin in between
margin = 5
#width = (n+1) * img_width + (n) * margin #HEIGHT MODIFIED
width = n * img_width + (n-1) * margin
height = n * img_height + (n-1) * margin
stitched_filters = np.zeros((width, height, 1))
#fill the picture with our saved filters
# for i in range(n):
# for j in range(n):
# print(i*n+j)
# img, loss = kept_filters[i * n + j]
# stitched_filters[(img_width + margin) * i: (img_width + margin) * i + img_width,
# (img_height + margin) * j: (img_height + margin) * j + img_height,:] = img
# plt.imsave('filter_%03d.png' % (i*n+j),img[:,:,0],cmap='gist_ncar')
# for i in range(7):
# print(i+121)
# img, loss = kept_filters[i+121]
# stitched_filters[(img_width + margin) * 11: (img_width + margin) * 11 + img_width,
# (img_height + margin) * i: (img_height + margin) * i + img_height,:] = img
# plt.imsave('filter_%03d.png' % (i+121),img[:,:,0],cmap='gray')
img, loss = kept_filters[1]
stitched_filters[(img_width + margin) * 0: (img_width + margin) * 0 + img_width,
(img_height + margin) * 0: (img_height + margin) * 0 + img_height,:] = img
img, loss = kept_filters[4]
stitched_filters[(img_width + margin) * 0: (img_width + margin) * 0 + img_width,
(img_height + margin) * 1: (img_height + margin) * 1 + img_height,:] = img
img, loss = kept_filters[11]
stitched_filters[(img_width + margin) * 1: (img_width + margin) * 1 + img_width,
(img_height + margin) * 0: (img_height + margin) * 0 + img_height,:] = img
img, loss = kept_filters[16]
stitched_filters[(img_width + margin) * 1: (img_width + margin) * 1 + img_width,
(img_height + margin) * 1: (img_height + margin) * 1 + img_height,:] = img
stitched_filters=stitched_filters[:,:,0]
# save the result to disk
plt.imsave('stitched_filters_%dx%d_gist.eps' % | |
must be 'same', 'valid' or 'causal'. Got "
+ self.padding
)
wx = self.conv(x)
if self.unsqueeze:
wx = wx.squeeze(1)
if not self.skip_transpose:
wx = wx.transpose(1, -1)
return wx
def _manage_padding(
self, x, kernel_size: int, dilation: int, stride: int,
):
"""This function performs zero-padding on the time axis
such that their lengths is unchanged after the convolution.
Arguments
---------
x : torch.Tensor
Input tensor.
kernel_size : int
Size of kernel.
dilation : int
Dilation used.
stride : int
Stride.
"""
# Detecting input shape
L_in = x.shape[-1]
# Time padding
padding = get_padding_elem(L_in, stride, kernel_size, dilation)
# Applying padding
x = F.pad(x, padding, mode=self.padding_mode)
return x
def _check_input_shape(self, shape):
"""Checks the input shape and returns the number of input channels.
"""
if len(shape) == 2:
self.unsqueeze = True
in_channels = 1
elif self.skip_transpose:
in_channels = shape[1]
elif len(shape) == 3:
in_channels = shape[2]
else:
raise ValueError(
"conv1d expects 2d, 3d inputs. Got " + str(len(shape))
)
# Kernel size must be odd
if self.kernel_size % 2 == 0:
raise ValueError(
"The field kernel size must be an odd number. Got %s."
% (self.kernel_size)
)
return in_channels
class Conv2d(nn.Module):
"""This function implements 2d convolution.
Arguments
---------
out_channels : int
It is the number of output channels.
kernel_size : tuple
Kernel size of the 2d convolutional filters over time and frequency
axis.
input_shape : tuple
The shape of the input. Alternatively use ``in_channels``.
in_channels : int
The number of input channels. Alternatively use ``input_shape``.
stride: int
Stride factor of the 2d convolutional filters over time and frequency
axis.
dilation : int
Dilation factor of the 2d convolutional filters over time and
frequency axis.
padding : str
(same, valid). If "valid", no padding is performed.
If "same" and stride is 1, output shape is same as input shape.
padding_mode : str
This flag specifies the type of padding. See torch.nn documentation
for more information.
groups : int
This option specifies the convolutional groups. See torch.nn
documentation for more information.
bias : bool
If True, the additive bias b is adopted.
Example
-------
>>> inp_tensor = torch.rand([10, 40, 16, 8])
>>> cnn_2d = Conv2d(
... input_shape=inp_tensor.shape, out_channels=5, kernel_size=(7, 3)
... )
>>> out_tensor = cnn_2d(inp_tensor)
>>> out_tensor.shape
torch.Size([10, 40, 16, 5])
"""
def __init__(
self,
out_channels,
kernel_size,
input_shape=None,
in_channels=None,
stride=(1, 1),
dilation=(1, 1),
padding="same",
groups=1,
bias=True,
padding_mode="reflect",
):
super().__init__()
# handle the case if some parameter is int
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(stride, int):
stride = (stride, stride)
if isinstance(dilation, int):
dilation = (dilation, dilation)
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.padding = padding
self.padding_mode = padding_mode
self.unsqueeze = False
if input_shape is None and in_channels is None:
raise ValueError("Must provide one of input_shape or in_channels")
if in_channels is None:
in_channels = self._check_input(input_shape)
# Weights are initialized following pytorch approach
self.conv = nn.Conv2d(
in_channels,
out_channels,
self.kernel_size,
stride=self.stride,
padding=0,
dilation=self.dilation,
groups=groups,
bias=bias,
)
def forward(self, x):
"""Returns the output of the convolution.
Arguments
---------
x : torch.Tensor (batch, time, channel)
input to convolve. 2d or 4d tensors are expected.
"""
x = x.transpose(1, -1)
if self.unsqueeze:
x = x.unsqueeze(1)
if self.padding == "same":
x = self._manage_padding(
x, self.kernel_size, self.dilation, self.stride
)
elif self.padding == "valid":
pass
else:
raise ValueError(
"Padding must be 'same' or 'valid'. Got " + self.padding
)
wx = self.conv(x)
if self.unsqueeze:
wx = wx.squeeze(1)
wx = wx.transpose(1, -1)
return wx
def _manage_padding(
self,
x,
kernel_size: Tuple[int, int],
dilation: Tuple[int, int],
stride: Tuple[int, int],
):
"""This function performs zero-padding on the time and frequency axes
such that their lengths is unchanged after the convolution.
Arguments
---------
x : torch.Tensor
kernel_size : int
dilation : int
stride: int
"""
# Detecting input shape
L_in = x.shape[-1]
# Time padding
padding_time = get_padding_elem(
L_in, stride[-1], kernel_size[-1], dilation[-1]
)
padding_freq = get_padding_elem(
L_in, stride[-2], kernel_size[-2], dilation[-2]
)
padding = padding_time + padding_freq
# Applying padding
x = nn.functional.pad(x, padding, mode=self.padding_mode)
return x
def _check_input(self, shape):
"""Checks the input shape and returns the number of input channels.
"""
if len(shape) == 3:
self.unsqueeze = True
in_channels = 1
elif len(shape) == 4:
in_channels = shape[3]
else:
raise ValueError("Expected 3d or 4d inputs. Got " + len(shape))
# Kernel size must be odd
if self.kernel_size[0] % 2 == 0 or self.kernel_size[1] % 2 == 0:
raise ValueError(
"The field kernel size must be an odd number. Got %s."
% (self.kernel_size)
)
return in_channels
class Conv2dWithConstraint(Conv2d):
"""This function implements 2d convolution with kernel max-norm constaint.
This corresponds to set an upper bound for the kernel norm.
Arguments
---------
out_channels : int
It is the number of output channels.
kernel_size : tuple
Kernel size of the 2d convolutional filters over time and frequency
axis.
input_shape : tuple
The shape of the input. Alternatively use ``in_channels``.
in_channels : int
The number of input channels. Alternatively use ``input_shape``.
stride: int
Stride factor of the 2d convolutional filters over time and frequency
axis.
dilation : int
Dilation factor of the 2d convolutional filters over time and
frequency axis.
padding : str
(same, valid). If "valid", no padding is performed.
If "same" and stride is 1, output shape is same as input shape.
padding_mode : str
This flag specifies the type of padding. See torch.nn documentation
for more information.
groups : int
This option specifies the convolutional groups. See torch.nn
documentation for more information.
bias : bool
If True, the additive bias b is adopted.
max_norm : float
kernel max-norm
Example
-------
>>> inp_tensor = torch.rand([10, 40, 16, 8])
>>> max_norm = 1
>>> cnn_2d_constrained = Conv2dWithConstraint(
... in_channels=inp_tensor.shape[-1], out_channels=5, kernel_size=(7, 3)
... )
>>> out_tensor = cnn_2d_constrained(inp_tensor)
>>> torch.any(torch.norm(cnn_2d_constrained.conv.weight.data, p=2, dim=0)>max_norm)
tensor(False)
"""
def __init__(self, *args, max_norm=1, **kwargs):
self.max_norm = max_norm
super(Conv2dWithConstraint, self).__init__(*args, **kwargs)
def forward(self, x):
"""Returns the output of the convolution.
Arguments
---------
x : torch.Tensor (batch, time, channel)
input to convolve. 2d or 4d tensors are expected.
"""
self.conv.weight.data = torch.renorm(
self.conv.weight.data, p=2, dim=0, maxnorm=self.max_norm
)
return super(Conv2dWithConstraint, self).forward(x)
class ConvTranspose1d(nn.Module):
"""This class implements 1d transposed convolution with speechbrain.
Transpose convolution is normally used to perform upsampling.
Arguments
---------
out_channels : int
It is the number of output channels.
kernel_size : int
Kernel size of the convolutional filters.
input_shape : tuple
The shape of the input. Alternatively use ``in_channels``.
in_channels : int
The number of input channels. Alternatively use ``input_shape``.
stride : int
Stride factor of the convolutional filters. When the stride factor > 1,
upsampling in time is performed.
dilation : int
Dilation factor of the convolutional filters.
padding : str or int
To have in output the target dimension, we suggest tuning the kernel
size and the padding properly. We also support the following function
to have some control over the padding and the corresponding ouput
dimensionality.
if "valid", no padding is applied
if "same", padding amount is inferred so that the output size is closest
to possible to input size. Note that for some kernel_size / stride combinations
it is not possible to obtain the exact same size, but we return the closest
possible size.
if "factor", padding amount is inferred so that the output size is closest
to inputsize*stride. Note that for some kernel_size / stride combinations
it is not possible to obtain the exact size, but we return the closest
possible size.
if an integer value is entered, a custom padding is used.
output_padding : int,
Additional size added to one side of the output shape
groups: int
Number of blocked connections from input channels to output channels.
Default: 1
bias: bool
If True, adds a learnable bias to the output
skip_transpose : bool
If False, uses batch x time x channel convention of speechbrain.
If True, uses batch x channel x time convention.
Example
-------
>>> from speechbrain.nnet.CNN import Conv1d, ConvTranspose1d
>>> inp_tensor = torch.rand([10, 12, 40]) #[batch, time, fea]
| |
= df.groupby(['Date', aggregation]).sum()[columns].reset_index()
df_by_date = df1[(df1['Date'] >= start_date_string) & (df1['Date'] <= end_date_string)].groupby([aggregation]).sum()[columns].reset_index()
df_by_date_prior = df1[(df1['Date'] >= prior_start_date_string) & (df1['Date'] <= prior_end_date_string)].groupby([aggregation]).sum()[['Spend TY', 'Sessions - TY', 'Bookings - TY', 'Revenue - TY']].reset_index()
df_by_date_prior.rename(columns={'Spend TY' : 'Spend - LP', 'Sessions - TY' : 'Sessions - LP', 'Bookings - TY' : 'Bookings - LP','Revenue - TY' : 'Revenue - LP'}, inplace=True)
df_by_date_combined = pd.merge(df_by_date, df_by_date_prior, on=[aggregation])
df_by_date_combined.rename(columns={'Birst Category':'Placement type'}, inplace=True)
# Calculate Differences on-the-fly
df_by_date_combined['Spend PoP (%)'] = np.nan
df_by_date_combined['Spend YoY (%)'] = np.nan
df_by_date_combined['Sessions PoP (%)'] = np.nan
df_by_date_combined['Sessions YoY (%)'] = np.nan
df_by_date_combined['Bookings PoP (%)'] = np.nan
df_by_date_combined['Bookings YoY (%)'] = np.nan
df_by_date_combined['Revenue PoP (%)'] = np.nan
df_by_date_combined['Revenue YoY (%)'] = np.nan
df_by_date_combined['Spend PoP (Abs)'] = ((df_by_date_combined['Spend TY'] - df_by_date_combined['Spend - LP']))
df_by_date_combined['Spend PoP (%)'] = np.where((df_by_date_combined['Spend TY'] != 0) & (df_by_date_combined['Spend - LP'] != 0),\
(((df_by_date_combined['Spend TY'] - df_by_date_combined['Spend - LP'])/df_by_date_combined['Spend - LP']) * 100), df_by_date_combined['Spend PoP (%)'])
df_by_date_combined['Spend YoY (%)'] = np.where((df_by_date_combined['Spend TY'] != 0) & (df_by_date_combined['Spend LY'] != 0),\
((df_by_date_combined['Spend TY'] - df_by_date_combined['Spend LY'])/df_by_date_combined['Spend LY']) * 100, df_by_date_combined['Spend YoY (%)'])
df_by_date_combined['Sessions PoP (%)'] = np.where((df_by_date_combined['Sessions - TY'] != 0) & (df_by_date_combined['Sessions - LP'] != 0),\
((df_by_date_combined['Sessions - TY'] - df_by_date_combined['Sessions - LP'])/df_by_date_combined['Sessions - LP']) * 100, df_by_date_combined['Sessions PoP (%)'])
df_by_date_combined['Sessions YoY (%)'] = np.where((df_by_date_combined['Sessions - TY'] != 0) & (df_by_date_combined['Sessions - LY'] != 0),\
((df_by_date_combined['Sessions - TY'] - df_by_date_combined['Sessions - LY'])/df_by_date_combined['Sessions - LY']) * 100, df_by_date_combined['Sessions YoY (%)'])
df_by_date_combined['Bookings PoP (Abs)'] = (df_by_date_combined['Bookings - TY'] - df_by_date_combined['Bookings - LP'])
df_by_date_combined['Bookings YoY (Abs)'] = (df_by_date_combined['Bookings - TY'] - df_by_date_combined['Bookings - LY'])
df_by_date_combined['Bookings PoP (%)'] = np.where((df_by_date_combined['Bookings - TY'] != 0) & (df_by_date_combined['Bookings - LP'] != 0),\
(df_by_date_combined['Bookings - TY'] - df_by_date_combined['Bookings - LP'])/df_by_date_combined['Bookings - LP'] * 100, df_by_date_combined['Bookings PoP (%)'])
df_by_date_combined['Bookings YoY (%)'] = np.where((df_by_date_combined['Bookings - TY'] != 0) & (df_by_date_combined['Bookings - LY'] != 0),\
(df_by_date_combined['Bookings - TY'] - df_by_date_combined['Bookings - LY'])/df_by_date_combined['Bookings - LY'] * 100, df_by_date_combined['Bookings YoY (%)'])
df_by_date_combined['Revenue PoP (Abs)'] = (df_by_date_combined['Revenue - TY'] - df_by_date_combined['Revenue - LP'])
df_by_date_combined['Revenue YoY (Abs)'] = (df_by_date_combined['Revenue - TY'] - df_by_date_combined['Revenue - LY'])
df_by_date_combined['Revenue PoP (%)'] = np.where((df_by_date_combined['Revenue - LP'] != 0) & (df_by_date_combined['Revenue - LP'] != 0),\
(df_by_date_combined['Revenue - TY'] - df_by_date_combined['Revenue - LP'])/df_by_date_combined['Revenue - LP'] * 100, df_by_date_combined['Revenue PoP (%)'])
df_by_date_combined['Revenue YoY (%)'] = np.where((df_by_date_combined['Revenue - TY'] != 0) & (df_by_date_combined['Revenue - LY'] != 0),\
(df_by_date_combined['Revenue - TY'] - df_by_date_combined['Revenue - LY'])/df_by_date_combined['Revenue - LY'] * 100, df_by_date_combined['Revenue YoY (%)'])
# Calculate CPS, CR, CPA
df_by_date_combined['CPS - TY'] = np.nan
df_by_date_combined['CPS - LP'] = np.nan
df_by_date_combined['CPS - LY'] = np.nan
df_by_date_combined['CPS PoP (Abs)'] = np.nan
df_by_date_combined['CPS YoY (Abs)'] = np.nan
df_by_date_combined['CVR - TY'] = np.nan
df_by_date_combined['CVR - LP'] = np.nan
df_by_date_combined['CVR - LY'] = np.nan
df_by_date_combined['CVR PoP (Abs)'] = np.nan
df_by_date_combined['CVR YoY (Abs)'] = np.nan
df_by_date_combined['CPA - TY'] = np.nan
df_by_date_combined['CPA - LP'] = np.nan
df_by_date_combined['CPA - LY'] = np.nan
df_by_date_combined['CPA PoP (Abs)'] = np.nan
df_by_date_combined['CPA YoY (Abs)'] = np.nan
df_by_date_combined['CPS PoP (%)'] = np.nan
df_by_date_combined['CPS YoY (%)'] = np.nan
df_by_date_combined['CVR PoP (%)'] = np.nan
df_by_date_combined['CVR YoY (%)'] = np.nan
df_by_date_combined['CPA PoP (%)' ] = np.nan
df_by_date_combined['CPA YoY (%)'] = np.nan
df_by_date_combined['CPS - TY'] = np.where((df_by_date_combined['Spend TY'] != 0) & (df_by_date_combined['Sessions - TY'] != 0),\
(df_by_date_combined['Spend TY']/df_by_date_combined['Sessions - TY']), df_by_date_combined['CPS - TY'])
df_by_date_combined['CPS - LP'] = np.where((df_by_date_combined['Spend - LP'] != 0) & (df_by_date_combined['Sessions - LP'] != 0),\
(df_by_date_combined['Spend - LP']/df_by_date_combined['Sessions - LP']), df_by_date_combined['CPS - LP'])
df_by_date_combined['CPS PoP (Abs)'] = (df_by_date_combined['CPS - TY'] - df_by_date_combined['CPS - LP'])
df_by_date_combined['CPS PoP (%)'] = np.where((df_by_date_combined['CPS - TY'] != 0) & (df_by_date_combined['CPS - LP'] != 0),\
((df_by_date_combined['CPS - TY'] - df_by_date_combined['CPS - LP'])/df_by_date_combined['CPS - LP']), df_by_date_combined['CPS PoP (%)'])
df_by_date_combined['CPS - LY'] = np.where((df_by_date_combined['Spend LY'] != 0) & (df_by_date_combined['Sessions - LY'] != 0),\
(df_by_date_combined['Spend LY']/df_by_date_combined['Sessions - LY']), df_by_date_combined['CPS - LY'])
df_by_date_combined['CPS YoY (Abs)'] = (df_by_date_combined['CPS - TY'] - df_by_date_combined['CPS - LY'])
df_by_date_combined['CPS YoY (%)'] = np.where((df_by_date_combined['CPS - TY'] != 0) & (df_by_date_combined['CPS - LY'] != 0),\
((df_by_date_combined['CPS - TY'] - df_by_date_combined['CPS - LY'])/df_by_date_combined['CPS - LY']), df_by_date_combined['CPS YoY (%)'] )
df_by_date_combined['CVR - TY'] = np.where(((df_by_date_combined['Bookings - TY'] != 0) & (df_by_date_combined['Sessions - TY'] != 0)), \
(df_by_date_combined['Bookings - TY']/df_by_date_combined['Sessions - TY'] * 100), df_by_date_combined['CVR - TY'])
df_by_date_combined['CVR - LP'] = np.where(((df_by_date_combined['Bookings - LP'] != 0) & (df_by_date_combined['Sessions - LP'] != 0)), \
(df_by_date_combined['Bookings - LP']/df_by_date_combined['Sessions - LP'] * 100), df_by_date_combined['CVR - LP'])
df_by_date_combined['CVR PoP (Abs)'] = np.where((df_by_date_combined['CVR - TY'].notnull() & df_by_date_combined['CVR - LP'].notnull()), \
((df_by_date_combined['CVR - TY'] - df_by_date_combined['CVR - LP'])), df_by_date_combined['CVR PoP (Abs)'])
df_by_date_combined['CVR PoP (%)'] = np.where(((df_by_date_combined['CVR - TY'] != 0) & (df_by_date_combined['CVR - LP'] != 0)), \
((df_by_date_combined['CVR - TY'] - df_by_date_combined['CVR - LP'])/df_by_date_combined['CVR - LP']), df_by_date_combined['CVR PoP (%)'])
df_by_date_combined['CVR - LY'] = np.where(((df_by_date_combined['Bookings - LY'] != 0) & (df_by_date_combined['Sessions - LY'] != 0)), \
(df_by_date_combined['Bookings - LY']/df_by_date_combined['Sessions - LY'] * 100), df_by_date_combined['CVR - LY'])
df_by_date_combined['CVR YoY (Abs)'] = np.where((df_by_date_combined['CVR - TY'].notnull() & df_by_date_combined['CVR - LY'].notnull()), \
((df_by_date_combined['CVR - TY'] - df_by_date_combined['CVR - LY'])), df_by_date_combined['CVR YoY (Abs)'])
df_by_date_combined['CVR YoY (%)'] = np.where(((df_by_date_combined['CVR - TY'] != 0) & (df_by_date_combined['CVR - LY'] != 0)), \
((df_by_date_combined['CVR - TY'] - df_by_date_combined['CVR - LY'])/df_by_date_combined['CVR - LY']), df_by_date_combined['CVR YoY (%)'])
df_by_date_combined['CPA - TY'] = np.where((df_by_date_combined['Spend TY'] != 0) & (df_by_date_combined['Bookings - TY'] != 0), \
(df_by_date_combined['Spend TY']/df_by_date_combined['Bookings - TY']), df_by_date_combined['CPA - TY'])
df_by_date_combined['CPA - LP'] = np.where((df_by_date_combined['Spend - LP'] != 0) & (df_by_date_combined['Bookings - LP'] != 0), \
(df_by_date_combined['Spend - LP']/df_by_date_combined['Bookings - LP']), df_by_date_combined['CPA - LP'])
df_by_date_combined['CPA PoP (Abs)'] = np.where((df_by_date_combined['CPA - TY'] != 0) & (df_by_date_combined['CPA - LP'] != 0), \
(df_by_date_combined['CPA - TY'] - df_by_date_combined['CPA - LP']), df_by_date_combined['CPA PoP (Abs)'])
df_by_date_combined['CPA PoP (%)' ] = np.where((df_by_date_combined['CPA - TY'] != 0) & (df_by_date_combined['CPA - LP'] != 0), \
((df_by_date_combined['CPA - TY'] - df_by_date_combined['CPA - LP'])/df_by_date_combined['CPA - LP']), df_by_date_combined['CPA PoP (%)' ] )
df_by_date_combined['CPA - LY'] = np.where((df_by_date_combined['Spend LY'] != 0) & (df_by_date_combined['Bookings - LY'] != 0), \
(df_by_date_combined['Spend LY']/df_by_date_combined['Bookings - LY']), df_by_date_combined['CPA - LY'])
df_by_date_combined['CPA YoY (Abs)'] = np.where((df_by_date_combined['CPA - TY'] != 0) & (df_by_date_combined['CPA - LY'] != 0), \
(df_by_date_combined['CPA - TY'] - df_by_date_combined['CPA - LY']), df_by_date_combined['CPA YoY (Abs)'])
df_by_date_combined['CPA YoY (%)'] = np.where((df_by_date_combined['CPA - TY'] != 0) & (df_by_date_combined['CPA - LY'] != 0), \
(df_by_date_combined['CPA - TY'] - df_by_date_combined['CPA - LY'])/df_by_date_combined['CPA - LY'], df_by_date_combined['CPA YoY (%)'])
df_by_date_combined['TY Start Date'] = start_date_string
df_by_date_combined['TY End Date'] = end_date_string
df_by_date_combined['LP Start Date'] = prior_start_date_string
df_by_date_combined['LP End Date'] = prior_end_date_string
last_years_start_date = start_date - timedelta(364)
last_years_start_date_string = datetime.strftime(last_years_start_date, '%Y-%m-%d')
last_years_end_date = end_date - timedelta(364)
last_years_end_date_string = datetime.strftime(last_years_end_date, '%Y-%m-%d')
df_by_date_combined['LY Start Date'] = last_years_start_date_string
df_by_date_combined['LY End Date'] = last_years_end_date_string
# Rearrange the columns
df_by_date_combined_dt = df_by_date_combined[[
'Placement type', 'TY Start Date', 'TY End Date', 'LP Start Date', 'LP End Date', 'LY Start Date', 'LY End Date',
'Spend TY', 'Spend - LP', 'Spend PoP (Abs)', 'Spend PoP (%)', 'Spend LY', 'Spend YoY (%)',
'Sessions - TY', 'Sessions - LP', 'Sessions PoP (%)', 'Sessions - LY', 'Sessions YoY (%)',
'Bookings - TY', 'Bookings - LP', 'Bookings PoP (%)', 'Bookings PoP (Abs)', 'Bookings - LY', 'Bookings YoY (%)', 'Bookings YoY (Abs)',
'Revenue - TY', 'Revenue - LP', 'Revenue PoP (Abs)', 'Revenue PoP (%)', 'Revenue - LY', 'Revenue YoY (%)', 'Revenue YoY (Abs)',
'CPS - TY',
'CPS - LP', 'CPS PoP (Abs)', 'CPS PoP (%)',
'CPS - LY', 'CPS YoY (Abs)', 'CPS YoY (%)',
'CVR - TY',
'CVR - LP', 'CVR PoP (Abs)', 'CVR PoP (%)',
'CVR - LY', 'CVR YoY (Abs)', 'CVR YoY (%)',
'CPA - TY',
'CPA - LP', 'CPA PoP (Abs)', 'CPA PoP (%)',
'CPA - LY', 'CPA YoY (Abs)', 'CPA YoY (%)'
]]
download_df_1 = df_by_date_combined_dt
return download_df_1
# Second Data Table Update Function
def update_second_datatable(start_date, end_date, category, aggregation):
if start_date is not None:
start_date = dt.strptime(start_date, '%Y-%m-%d')
start_date_string = start_date.strftime('%Y-%m-%d')
if end_date is not None:
end_date = dt.strptime(end_date, '%Y-%m-%d')
end_date_string = end_date.strftime('%Y-%m-%d')
days_selected = (end_date - start_date).days
prior_start_date = start_date - timedelta(days_selected + 1)
prior_start_date_string = datetime.strftime(prior_start_date, '%Y-%m-%d')
prior_end_date = end_date - timedelta(days_selected + 1)
prior_end_date_string = datetime.strftime(prior_end_date, '%Y-%m-%d')
if aggregation == 'Placement type':
df1 = df[(df['Category'] == category)].groupby(['Date', aggregation]).sum()[columns].reset_index()
df_by_date = df1[(df1['Date'] >= start_date_string) & (df1['Date'] <= end_date_string)].groupby([aggregation]).sum()[columns].reset_index()
df_by_date_prior = df1[(df1['Date'] >= prior_start_date_string) & (df1['Date'] <= prior_end_date_string)].groupby([aggregation]).sum()[['Spend TY', 'Sessions - TY', 'Bookings - TY', 'Revenue - TY']].reset_index()
df_by_date_prior.rename(columns={'Spend TY' : 'Spend - LP', 'Sessions - TY' : 'Sessions - LP', 'Bookings - TY' : 'Bookings - LP','Revenue - TY' : 'Revenue - LP'}, inplace=True)
df_by_date_combined = pd.merge(df_by_date, df_by_date_prior, on=[aggregation])
elif aggregation == 'GA Category':
df1 = df.groupby(['Date', aggregation]).sum()[columns].reset_index()
df_by_date = df1[(df1['Date'] >= start_date_string) & (df1['Date'] <= end_date_string)].groupby([aggregation]).sum()[columns].reset_index()
df_by_date_prior = df1[(df1['Date'] >= prior_start_date_string) & (df1['Date'] <= prior_end_date_string)].groupby([aggregation]).sum()[['Spend TY', 'Sessions - TY', 'Bookings - TY', 'Revenue - TY']].reset_index()
df_by_date_prior.rename(columns={'Spend TY' : 'Spend | |
164, 365, 205, 548, 270, 256, 82, 26,
227, 69, 387, 633, 762, 694, 385, 92, 542, 608, 571,
825, 541, 533, 421, 666, 332, 113, 684, 892, 28, 979,
976, 706, 457, 185, 895, 310, 106, 142, 45, 230, 65,
67, 201, 738, 910, 523, 893, 189, 97, 466, 258, 382,
61, 105, 774, 572, 620, 737, 871, 900, 799, 516, 203,
294, 616, 223, 90, 715, 888, 274, 276, 82, 814, 827,
110, 436, 254, 61, 398, 300, 14, 365, 970, 238, 860,
431, 794, 301, 832, 331, 401, 375, 939, 181])
def test_snail_024(self):
self.assertEqual(snail([[733]]), [733])
def test_snail_025(self):
self.assertEqual(snail([[776, 298, 262, 318, 957, 178, 428, 566, 345,
169, 434, 817, 494, 398, 648, 512, 314, 465],
[843, 563, 885, 994, 556, 571, 786, 143, 731,
828, 992, 701, 211, 989, 361, 904, 168, 175],
[153, 906, 802, 413, 532, 445, 864, 275, 891,
169, 899, 36, 278, 126, 691, 437, 199, 30],
[449, 454, 466, 728, 660, 493, 312, 492, 198,
771, 359, 787, 302, 121, 292, 282, 739, 958],
[798, 332, 106, 365, 874, 905, 831, 462, 88,
380, 443, 602, 925, 421, 564, 986, 446, 580],
[78, 187, 603, 551, 283, 789, 262, 542, 551,
422, 581, 100, 108, 574, 249, 473, 606, 83],
[359, 14, 876, 400, 826, 868, 779, 67, 946, 568,
826, 561, 582, 815, 72, 771, 851, 21],
[41, 860, 746, 556, 979, 831, 335, 126, 212,
701, 18, 318, 725, 944, 65, 802, 182, 433],
[746, 66, 844, 140, 842, 49, 547, 451, 436, 434,
72, 973, 2, 212, 311, 691, 546, 176],
[630, 510, 740, 7, 888, 439, 231, 788, 524, 270,
126, 558, 969, 576, 166, 393, 856, 548],
[538, 867, 432, 194, 149, 678, 379, 801, 182,
738, 209, 161, 950, 810, 869, 627, 395, 1000],
[523, 863, 18, 340, 416, 658, 734, 699, 538, 62,
740, 808, 202, 69, 895, 785, 882, 368],
[997, 453, 658, 870, 438, 799, 870, 257, 681,
887, 109, 40, 178, 475, 550, 283, 90, 167],
[243, 774, 470, 223, 518, 660, 730, 117, 885,
377, 305, 744, 622, 484, 789, 498, 464, 837],
[753, 492, 372, 529, 47, 461, 160, 259, 282,
983, 73, 192, 366, 101, 307, 257, 89, 968],
[135, 25, 644, 83, 479, 794, 845, 60, 310, 821,
239, 247, 713, 343, 405, 407, 308, 63],
[297, 590, 149, 649, 317, 843, 23, 652, 69, 819,
886, 381, 411, 781, 477, 672, 822, 185],
[642, 274, 676, 957, 888, 269, 954, 78, 8, 944,
730, 846, 83, 218, 865, 327, 705, 629]]),
[776, 298, 262, 318, 957, 178, 428, 566, 345, 169, 434,
817, 494, 398, 648, 512, 314, 465, 175, 30, 958, 580,
83, 21, 433, 176, 548, 1000, 368, 167, 837, 968, 63,
185, 629, 705, 327, 865, 218, 83, 846, 730, 944, 8,
78, 954, 269, 888, 957, 676, 274, 642, 297, 135, 753,
243, 997, 523, 538, 630, 746, 41, 359, 78, 798, 449,
153, 843, 563, 885, 994, 556, 571, 786, 143, 731, 828,
992, 701, 211, 989, 361, 904, 168, 199, 739, 446, 606,
851, 182, 546, 856, 395, 882, 90, 464, 89, 308, 822,
672, 477, 781, 411, 381, 886, 819, 69, 652, 23, 843,
317, 649, 149, 590, 25, 492, 774, 453, 863, 867, 510,
66, 860, 14, 187, 332, 454, 906, 802, 413, 532, 445,
864, 275, 891, 169, 899, 36, 278, 126, 691, 437, 282,
986, 473, 771, 802, 691, 393, 627, 785, 283, 498, 257,
407, 405, 343, 713, 247, 239, 821, 310, 60, 845, 794,
479, 83, 644, 372, 470, 658, 18, 432, 740, 844, 746,
876, 603, 106, 466, 728, 660, 493, 312, 492, 198, 771,
359, 787, 302, 121, 292, 564, 249, 72, 65, 311, 166,
869, 895, 550, 789, 307, 101, 366, 192, 73, 983, 282,
259, 160, 461, 47, 529, 223, 870, 340, 194, 7, 140,
556, 400, 551, 365, 874, 905, 831, 462, 88, 380, 443,
602, 925, 421, 574, 815, 944, 212, 576, 810, 69, 475,
484, 622, 744, 305, 377, 885, 117, 730, 660, 518, 438,
416, 149, 888, 842, 979, 826, 283, 789, 262, 542, 551,
422, 581, 100, 108, 582, 725, 2, 969, 950, 202, 178,
40, 109, 887, 681, 257, 870, 799, 658, 678, 439, 49,
831, 868, 779, 67, 946, 568, 826, 561, 318, 973, 558,
161, 808, 740, 62, 538, 699, 734, 379, 231, 547, 335,
126, 212, 701, 18, 72, 126, 209, 738, 182, 801, 788,
451, 436, 434, 270, 524])
def test_snail_026(self):
self.assertEqual(snail(
[[348, 421, 186, 172, 681, 428, 955, 583, 1000, 631, 543],
[751, 963, 968, 739, 248, 380, 307, 61, 874, 248, 908],
[803, 186, 336, 83, 196, 775, 898, 148, 43, 24, 993],
[274, 904, 695, 140, 582, 766, 810, 824, 717, 591, 136],
[632, 95, 397, 516, 457, 937, 220, 150, 971, 391, 283],
[157, 543, 946, 629, 703, 392, 816, 292, 935, 107, 289],
[794, 824, 923, 134, 486, 165, 956, 714, 775, 265, 654],
[261, 551, 238, 976, 460, 921, 501, 439, 811, 202, 916],
[817, 671, 357, 391, 181, 639, 191, 534, 945, 204, 249],
[761, 208, 763, 142, 330, 832, 998, 706, 301, 117, 615],
[977, 386, 105, 274, 166, 993, 248, 316, 340, 378, 886]]),
[348, 421, 186, 172, 681, 428, 955, 583, 1000, 631,
543, 908, 993, 136, 283, 289, 654, 916, 249, 615, 886,
378, 340, 316, 248, 993, 166, 274, 105, 386, 977, 761,
817, 261, 794, 157, 632, 274, 803, 751, 963, 968, 739,
248, 380, 307, 61, 874, 248, 24, 591, 391, 107, 265,
202, 204, 117, 301, 706, 998, 832, 330, 142, 763, 208,
671, 551, 824, 543, 95, 904, 186, 336, 83, 196, 775,
898, 148, 43, 717, 971, 935, 775, 811, 945, 534, 191,
639, 181, 391, 357, 238, 923, 946, 397, 695, 140, 582,
766, 810, 824, 150, 292, 714, 439, 501, 921, 460, 976,
134, 629, 516, 457, 937, 220, 816, 956, 165, 486, 703,
392])
def test_snail_027(self):
self.assertEqual(snail([[279, 149, 635, 162, 437, 751, 73, 382, 918,
994, 660, 832, 818, 312, 381, 306, 375, 87,
245],
[54, 599, 406, 599, 951, 888, 231, 723, 287,
692, 617, 275, 719, 445, 361, 954, 583, 951,
162],
[966, 522, 282, 502, 739, 889, 323, 635, 486,
477, 231, 502, 471, 524, 566, 189, 91, 694,
768],
[164, 463, 961, 850, 665, 898, 53, 331, 507, 69,
164, 99, 435, 418, 104, 868, 998, 186, 161],
[138, 179, 498, 106, 803, 338, 361, 631, 370,
805, 156, 583, 102, 486, 989, 468, 772, 491,
656],
[450, 129, 723, 662, 665, 9, 227, 23, 222, 199,
111, 556, 897, 4, 81, 665, 108, 906, 457],
[442, 235, 249, 838, 26, 861, 927, 55, 260, 9,
140, 495, 478, 544, 693, 849, 727, 448, 421],
[812, 736, 968, 113, 205, 680, 936, 699, 733,
830, 760, 301, 891, 701, 530, 34, 234, 764,
136],
[191, 591, 992, 189, 987, 162, 784, 566, 788,
983, 584, 919, 410, 408, 225, 778, 200, 854,
852],
[424, 5, 610, 711, 796, 952, 899, 192, 643, 399,
953, 720, 406, 324, 706, 943, 139, 87, 668],
[412, 431, 428, 777, 880, 971, 931, 966, 281,
510, 63, 1000, 115, 833, 746, 390, 333, 636,
671],
[249, 695, 992, 731, 15, 843, 567, 332, 762,
942, 804, 601, 83, 738, 165, 517, 258, 171,
227],
[976, 808, 967, 898, 78, 231, 563, 182, 696,
611, 421, 809, 6, 954, 656, 338, 422, 777,
172],
[839, 795, 83, 698, 557, 584, 452, 382, 89, 858,
886, 514, 671, 669, 827, 78, 160, 694, 784],
[1000, 249, 558, 794, 891, 668, 564, 399, 18,
452, 938, 516, 359, 2, 140, | |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from bpy.types import Menu, Panel, UIList
from bl_ui.utils import PresetPanel
class RENDER_PT_presets(PresetPanel, Panel):
bl_label = "Render Presets"
preset_subdir = "render"
preset_operator = "script.execute_preset"
preset_add_operator = "render.preset_add"
class RENDER_PT_ffmpeg_presets(PresetPanel, Panel):
bl_label = "FFMPEG Presets"
preset_subdir = "ffmpeg"
preset_operator = "script.python_file_run"
class RENDER_MT_framerate_presets(Menu):
bl_label = "Frame Rate Presets"
preset_subdir = "framerate"
preset_operator = "script.execute_preset"
draw = Menu.draw_preset
class RenderOutputButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "output"
# COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here
@classmethod
def poll(cls, context):
return (context.engine in cls.COMPAT_ENGINES)
class RENDER_PT_dimensions(RenderOutputButtonsPanel, Panel):
bl_label = "Dimensions"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
_frame_rate_args_prev = None
_preset_class = None
def draw_header_preset(self, _context):
RENDER_PT_presets.draw_panel_header(self.layout)
@staticmethod
def _draw_framerate_label(*args):
# avoids re-creating text string each draw
if RENDER_PT_dimensions._frame_rate_args_prev == args:
return RENDER_PT_dimensions._frame_rate_ret
fps, fps_base, preset_label = args
if fps_base == 1.0:
fps_rate = round(fps)
else:
fps_rate = round(fps / fps_base, 2)
# TODO: Change the following to iterate over existing presets
custom_framerate = (fps_rate not in {23.98, 24, 25, 29.97, 30, 50, 59.94, 60})
if custom_framerate is True:
fps_label_text = f"Custom ({fps_rate!r} fps)"
show_framerate = True
else:
fps_label_text = f"{fps_rate!r} fps"
show_framerate = (preset_label == "Custom")
RENDER_PT_dimensions._frame_rate_args_prev = args
RENDER_PT_dimensions._frame_rate_ret = args = (fps_label_text, show_framerate)
return args
@staticmethod
def draw_framerate(layout, sub, rd):
if RENDER_PT_dimensions._preset_class is None:
RENDER_PT_dimensions._preset_class = bpy.types.RENDER_MT_framerate_presets
args = rd.fps, rd.fps_base, RENDER_PT_dimensions._preset_class.bl_label
fps_label_text, show_framerate = RENDER_PT_dimensions._draw_framerate_label(*args)
sub.menu("RENDER_MT_framerate_presets", text=fps_label_text)
if show_framerate:
col = layout.column(align=True)
col.prop(rd, "fps")
col.prop(rd, "fps_base", text="Base")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
scene = context.scene
rd = scene.render
col = layout.column(align=True)
col.prop(rd, "resolution_x", text="Resolution X")
col.prop(rd, "resolution_y", text="Y")
col.prop(rd, "resolution_percentage", text="%")
col = layout.column(align=True)
col.prop(rd, "pixel_aspect_x", text="Aspect X")
col.prop(rd, "pixel_aspect_y", text="Y")
row = layout.row(align=False)
row.use_property_split = False
row.prop(rd, "use_border")
if rd.use_border:
row.prop(rd, "use_crop_to_border")
col = layout.column(align=True)
col.prop(scene, "frame_start", text="Frame Start")
col.prop(scene, "frame_end", text="End")
col.prop(scene, "frame_step", text="Step")
col = layout.split()
col.alignment = 'RIGHT'
col.label(text="Frame Rate")
self.draw_framerate(layout, col, rd)
class RENDER_PT_frame_remapping(RenderOutputButtonsPanel, Panel):
bl_label = "Time Remapping"
bl_parent_id = "RENDER_PT_dimensions"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
rd = context.scene.render
col = layout.column(align=True)
col.prop(rd, "frame_map_old", text="Old")
col.prop(rd, "frame_map_new", text="New")
class RENDER_PT_post_processing(RenderOutputButtonsPanel, Panel):
bl_label = "Post Processing"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
rd = context.scene.render
flow = layout.grid_flow(row_major=True, columns=0, even_columns=True, even_rows=False, align=False)
col = flow.column()
col.prop(rd, "use_compositing")
col = flow.column()
col.prop(rd, "use_sequencer")
layout.prop(rd, "dither_intensity", text="Dither", slider=True)
class RENDER_PT_stamp(RenderOutputButtonsPanel, Panel):
bl_label = "Metadata"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = False
layout.use_property_decorate = False # No animation.
rd = context.scene.render
flow = layout.grid_flow(row_major=True, columns=0, even_columns=True, even_rows=False, align=False)
col = flow.column()
col.prop(rd, "use_stamp_date", text="Date")
col = flow.column()
col.prop(rd, "use_stamp_time", text="Time")
col = flow.column()
col.prop(rd, "use_stamp_render_time", text="Render Time")
col = flow.column()
col.prop(rd, "use_stamp_frame", text="Frame")
col = flow.column()
col.prop(rd, "use_stamp_frame_range", text="Frame Range")
col = flow.column()
col.prop(rd, "use_stamp_memory", text="Memory")
col = flow.column()
col.prop(rd, "use_stamp_hostname", text="Hostname")
col = flow.column()
col.prop(rd, "use_stamp_camera", text="Camera")
col = flow.column()
col.prop(rd, "use_stamp_lens", text="Lens")
col = flow.column()
col.prop(rd, "use_stamp_scene", text="Scene")
col = flow.column()
col.prop(rd, "use_stamp_marker", text="Marker")
col = flow.column()
col.prop(rd, "use_stamp_filename", text="Filename")
col = flow.column()
col.prop(rd, "use_stamp_sequencer_strip", text="Strip Name")
if rd.use_sequencer:
col = flow.column()
col.prop(rd, "use_stamp_strip_meta", text="Use Strip Metadata")
class RENDER_PT_stamp_note(RenderOutputButtonsPanel, Panel):
bl_label = "Note"
bl_parent_id = "RENDER_PT_stamp"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw_header(self, context):
rd = context.scene.render
self.layout.prop(rd, "use_stamp_note", text="")
def draw(self, context):
layout = self.layout
rd = context.scene.render
layout.active = rd.use_stamp_note
layout.prop(rd, "stamp_note_text", text="")
class RENDER_PT_stamp_burn(RenderOutputButtonsPanel, Panel):
bl_label = "Burn Into Image"
bl_parent_id = "RENDER_PT_stamp"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw_header(self, context):
rd = context.scene.render
self.layout.prop(rd, "use_stamp", text="")
def draw(self, context):
layout = self.layout
rd = context.scene.render
layout.use_property_split = True
col = layout.column()
col.active = rd.use_stamp
col.prop(rd, "stamp_font_size", text="Font Size")
col.column().prop(rd, "stamp_foreground", slider=True)
col.column().prop(rd, "stamp_background", slider=True)
col.prop(rd, "use_stamp_labels", text="Include Labels")
class RENDER_PT_output(RenderOutputButtonsPanel, Panel):
bl_label = "Output"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = False
layout.use_property_decorate = False # No animation.
rd = context.scene.render
image_settings = rd.image_settings
layout.prop(rd, "filepath", text="")
layout.template_image_settings(image_settings, color_management=False)
# Options subpanel for the output panel
class RENDER_PT_output_options(RenderOutputButtonsPanel, Panel):
bl_label = "Options"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
bl_parent_id = "RENDER_PT_output"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.use_property_split = False
layout.use_property_decorate = False # No animation.
rd = context.scene.render
image_settings = rd.image_settings
flow = layout.grid_flow(row_major=True, columns=0, even_columns=True, even_rows=False, align=False)
col = flow.column()
col.active = not rd.is_movie_format
col.prop(rd, "use_overwrite")
col = flow.column()
col.active = not rd.is_movie_format
col.prop(rd, "use_placeholder")
col = flow.column()
col.prop(rd, "use_file_extension")
col = flow.column()
col.prop(rd, "use_render_cache")
class RENDER_PT_output_views(RenderOutputButtonsPanel, Panel):
bl_label = "Views"
bl_parent_id = "RENDER_PT_output"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
@classmethod
def poll(cls, context):
rd = context.scene.render
return rd.use_multiview
def draw(self, context):
layout = self.layout
layout.use_property_split = False
layout.use_property_decorate = False # No animation.
rd = context.scene.render
layout.template_image_views(rd.image_settings)
class RENDER_PT_encoding(RenderOutputButtonsPanel, Panel):
bl_label = "Encoding"
bl_parent_id = "RENDER_PT_output"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw_header_preset(self, _context):
RENDER_PT_ffmpeg_presets.draw_panel_header(self.layout)
@classmethod
def poll(cls, context):
rd = context.scene.render
return rd.image_settings.file_format in {'FFMPEG', 'XVID', 'H264', 'THEORA'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
rd = context.scene.render
ffmpeg = rd.ffmpeg
layout.prop(rd.ffmpeg, "format")
layout.prop(ffmpeg, "use_autosplit")
class RENDER_PT_encoding_video(RenderOutputButtonsPanel, Panel):
bl_label = "Video"
bl_parent_id = "RENDER_PT_encoding"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
@classmethod
def poll(cls, context):
rd = context.scene.render
return rd.image_settings.file_format in {'FFMPEG', 'XVID', 'H264', 'THEORA'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
self.draw_vcodec(context)
def draw_vcodec(self, context):
"""Video codec options."""
layout = self.layout
ffmpeg = context.scene.render.ffmpeg
needs_codec = ffmpeg.format in {'AVI', 'QUICKTIME', 'MKV', 'OGG', 'MPEG4'}
if needs_codec:
layout.prop(ffmpeg, "codec")
if needs_codec and ffmpeg.codec == 'NONE':
return
if ffmpeg.codec in {'DNXHD'}:
layout.prop(ffmpeg, "use_lossless_output")
# Output quality
use_crf = needs_codec and ffmpeg.codec in {'H264', 'MPEG4', 'WEBM'}
if use_crf:
layout.prop(ffmpeg, "constant_rate_factor")
# Encoding speed
layout.prop(ffmpeg, "ffmpeg_preset")
# I-frames
layout.prop(ffmpeg, "gopsize")
# B-Frames
split = layout.split(factor=0.5)
split.prop(ffmpeg, "use_max_b_frames", text="Max B-frames")
pbox = split.column()
pbox.prop(ffmpeg, "max_b_frames", text="")
pbox.enabled = ffmpeg.use_max_b_frames
if not use_crf or ffmpeg.constant_rate_factor == 'NONE':
col = layout.column()
sub = col.column(align=True)
sub.prop(ffmpeg, "video_bitrate")
sub.prop(ffmpeg, "minrate", text="Minimum")
sub.prop(ffmpeg, "maxrate", text="Maximum")
col.prop(ffmpeg, "buffersize", text="Buffer")
col.separator()
col.prop(ffmpeg, "muxrate", text="Mux Rate")
col.prop(ffmpeg, "packetsize", text="Mux Packet Size")
class RENDER_PT_encoding_audio(RenderOutputButtonsPanel, Panel):
bl_label = "Audio"
bl_parent_id = "RENDER_PT_encoding"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
@classmethod
def poll(cls, context):
rd = context.scene.render
return rd.image_settings.file_format in {'FFMPEG', 'XVID', 'H264', 'THEORA'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
rd = context.scene.render
ffmpeg = rd.ffmpeg
if ffmpeg.format != 'MP3':
layout.prop(ffmpeg, "audio_codec", text="Audio Codec")
if ffmpeg.audio_codec != 'NONE':
layout.prop(ffmpeg, "audio_bitrate")
layout.prop(ffmpeg, "audio_volume", slider=True)
class RENDER_UL_renderviews(UIList):
def draw_item(self, _context, layout, _data, item, icon, _active_data, _active_propname, index):
view = item
if self.layout_type in {'DEFAULT', 'COMPACT'}:
if view.name in {"left", "right"}:
layout.label(text=view.name, icon_value=icon + (not view.use))
else:
layout.prop(view, "name", text="", index=index, icon_value=icon, emboss=False)
layout.prop(view, "use", text="", index=index)
elif self.layout_type == 'GRID':
layout.alignment = 'CENTER'
layout.label(text="", icon_value=icon + (not view.use))
class RENDER_PT_stereoscopy(RenderOutputButtonsPanel, Panel):
bl_label = "Stereoscopy"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
bl_options = {'DEFAULT_CLOSED'}
def draw_header(self, context):
rd = context.scene.render
self.layout.prop(rd, "use_multiview", text="")
def draw(self, context):
layout = self.layout
scene = context.scene
rd = scene.render
rv = rd.views.active
layout.active = rd.use_multiview
basic_stereo = rd.views_format == | |
<gh_stars>0
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import torch
from torch import nn
from torch.nn import functional as F
from einops import rearrange
from nle import nethack
from .util import id_pairs_table
import numpy as np
NUM_GLYPHS = nethack.MAX_GLYPH
NUM_FEATURES = nethack.BLSTATS_SHAPE[0]
PAD_CHAR = 0
NUM_CHARS = 256
def get_action_space_mask(action_space, reduced_action_space):
mask = np.array([int(a in reduced_action_space) for a in action_space])
return torch.Tensor(mask)
def conv_outdim(i_dim, k, padding=0, stride=1, dilation=1):
"""Return the dimension after applying a convolution along one axis"""
return int(1 + (i_dim + 2 * padding - dilation * (k - 1) - 1) / stride)
def select(embedding_layer, x, use_index_select):
"""Use index select instead of default forward to possible speed up embedding."""
if use_index_select:
out = embedding_layer.weight.index_select(0, x.reshape(-1))
# handle reshaping x to 1-d and output back to N-d
return out.reshape(x.shape + (-1,))
else:
return embedding_layer(x)
class NetHackNet(nn.Module):
"""This base class simply provides a skeleton for running with torchbeast."""
AgentOutput = collections.namedtuple("AgentOutput", "action policy_logits baseline")
def __init__(self):
super(NetHackNet, self).__init__()
self.register_buffer("reward_sum", torch.zeros(()))
self.register_buffer("reward_m2", torch.zeros(()))
self.register_buffer("reward_count", torch.zeros(()).fill_(1e-8))
def forward(self, inputs, core_state):
raise NotImplementedError
def initial_state(self, batch_size=1):
return ()
@torch.no_grad()
def update_running_moments(self, reward_batch):
"""Maintains a running mean of reward."""
new_count = len(reward_batch)
new_sum = torch.sum(reward_batch)
new_mean = new_sum / new_count
curr_mean = self.reward_sum / self.reward_count
new_m2 = torch.sum((reward_batch - new_mean) ** 2) + (
(self.reward_count * new_count)
/ (self.reward_count + new_count)
* (new_mean - curr_mean) ** 2
)
self.reward_count += new_count
self.reward_sum += new_sum
self.reward_m2 += new_m2
@torch.no_grad()
def get_running_std(self):
"""Returns standard deviation of the running mean of the reward."""
return torch.sqrt(self.reward_m2 / self.reward_count)
class BaselineNet(NetHackNet):
"""This model combines the encodings of the glyphs, top line message and
blstats into a single fixed-size representation, which is then passed to
an LSTM core before generating a policy and value head for use in an IMPALA
like architecture.
This model was based on 'neurips2020release' tag on the NLE repo, itself
based on Kuttler et al, 2020
The NetHack Learning Environment
https://arxiv.org/abs/2006.13760
"""
def __init__(self, observation_shape, action_space, flags, device):
super(BaselineNet, self).__init__()
self.flags = flags
self.corpus_attention = flags.corpus_attention
self.attention_dim = flags.attention_dim
self.register_buffer('doc_embeddings', torch.load("/home/ekarais/nethack/nlp4nethack/nethack_baselines/torchbeast/models/reduced_doc_embeddings.pt"))
embed_size = 128
#self.doc_embeddings = torch.randn(1699, embed_size)
#self.doc_embeddings.requires_grad = False
self.attention = nn.MultiheadAttention(self.attention_dim, 4, kdim=128, vdim=128)
self.observation_shape = observation_shape
self.num_actions = len(action_space)
self.H = observation_shape[0]
self.W = observation_shape[1]
self.use_lstm = flags.use_lstm
self.h_dim = flags.hidden_dim
# GLYPH + CROP MODEL
self.glyph_model = GlyphEncoder(flags, self.H, self.W, flags.crop_dim, device)
# MESSAGING MODEL
self.msg_model = MessageEncoder(
flags.msg.hidden_dim, flags.msg.embedding_dim, device
)
# BLSTATS MODEL
self.blstats_model = BLStatsEncoder(NUM_FEATURES, flags.embedding_dim)
out_dim = (
self.blstats_model.hidden_dim
+ self.glyph_model.hidden_dim
+ self.msg_model.hidden_dim
)
self.fc = nn.Sequential(
nn.Linear(out_dim, self.h_dim),
nn.ReLU(),
nn.Linear(self.h_dim, self.h_dim),
nn.ReLU(),
)
if self.use_lstm:
self.core = nn.LSTM(self.h_dim, self.h_dim, num_layers=1)
if self.corpus_attention:
#self.scaling = nn.Linear(self.h_dim, 256)
self.policy = nn.Linear(self.h_dim + self.attention_dim, self.num_actions)
self.baseline = nn.Linear(self.h_dim + self.attention_dim, 1)
else:
self.policy = nn.Linear(self.h_dim, self.num_actions)
self.baseline = nn.Linear(self.h_dim, 1)
if flags.restrict_action_space:
reduced_space = nethack.USEFUL_ACTIONS
logits_mask = get_action_space_mask(action_space, reduced_space)
#self.policy_logits_mask = nn.parameter.Parameter(
# logits_mask, requires_grad=False
#)
self.register_buffer('policy_logits_mask', logits_mask)
def initial_state(self, batch_size=1):
return tuple(
torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size)
for _ in range(2)
)
def forward(self, glyphs, chars, colors, specials, blstats, message, done, core_state, learning=False):
T, B, H, W = glyphs.shape
reps = []
# -- [B' x K] ; B' == (T x B)
glyphs_rep = self.glyph_model(glyphs, chars, colors, specials, blstats)
reps.append(glyphs_rep)
# -- [B' x K]
char_rep = self.msg_model(message)
reps.append(char_rep)
# -- [B' x K]
features_emb = self.blstats_model(blstats)
reps.append(features_emb)
# -- [B' x K]
st = torch.cat(reps, dim=1)
# -- [B' x K]
st = self.fc(st)
if self.use_lstm:
core_input = st.reshape(T, B, -1)
core_output_list = []
notdone = (~done).float()
for input, nd in zip(core_input.unbind(), notdone.unbind()):
# Reset core state to zero whenever an episode ended.
# Make `done` broadcastable with (num_layers, B, hidden_size)
# states:
nd = nd.reshape(1, -1, 1)
int_core_state = tuple(nd * t for t in core_state)
output, new_core_state = self.core(input.unsqueeze(0), int_core_state)
core_output_list.append(output)
core_output = torch.flatten(torch.cat(core_output_list), 0, 1)
else:
core_output = st
#print("q shape", core_output.shape, flush=True)
#print("k,v shape", self.doc_embeddings.shape, flush=True)
if self.corpus_attention:
#scaled_output = self.scaling(core_output)
self.doc_embeddings = self.doc_embeddings.to(core_output.get_device())
batch_size = core_output.shape[0]
attention_out, _ = self.attention(torch.unsqueeze(core_output, 0),
torch.unsqueeze(self.doc_embeddings, 1).expand(-1, batch_size,-1),
torch.unsqueeze(self.doc_embeddings, 1).expand(-1, batch_size,-1))
#print("q shape", core_output.shape, flush=True)
#print("attention shape", attention_out.shape, flush=True)
core_output = torch.cat([core_output, attention_out[0]], dim=1)
# -- [B' x A]
policy_logits = self.policy(core_output)
# -- [B' x 1]
baseline = self.baseline(core_output)
if self.flags.restrict_action_space:
policy_logits = policy_logits * self.policy_logits_mask + (
(1 - self.policy_logits_mask) * -1e10
)
if self.training:
action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1)
else:
# Don't sample when testing.
action = torch.argmax(policy_logits, dim=1)
policy_logits = policy_logits.reshape(T, B, -1)
baseline = baseline.reshape(T, B)
action = action.reshape(T, B)
output = dict(policy_logits=policy_logits, baseline=baseline, action=action)
return (output, new_core_state, features_emb)
class GlyphEncoder(nn.Module):
"""This glyph encoder first breaks the glyphs (integers up to 6000) to a
more structured representation based on the qualities of the glyph: chars,
colors, specials, groups and subgroup ids..
Eg: invisible hell-hound: char (d), color (red), specials (invisible),
group (monster) subgroup id (type of monster)
Eg: lit dungeon floor: char (.), color (white), specials (none),
group (dungeon) subgroup id (type of dungeon)
An embedding is provided for each of these, and the embeddings are
concatenated, before encoding with a number of CNN layers. This operation
is repeated with a crop of the structured reprentations taken around the
characters position, and the two representations are concatenated
before returning.
"""
def __init__(self, flags, rows, cols, crop_dim, device=None):
super(GlyphEncoder, self).__init__()
self.crop = Crop(rows, cols, crop_dim, crop_dim, device)
K = flags.embedding_dim # number of input filters
L = flags.layers # number of convnet layers
assert (
K % 8 == 0
), "This glyph embedding format needs embedding dim to be multiple of 8"
unit = K // 8
self.chars_embedding = nn.Embedding(256, 2 * unit)
self.colors_embedding = nn.Embedding(16, unit)
self.specials_embedding = nn.Embedding(256, unit)
#self.id_pairs_table = nn.parameter.Parameter(
# torch.from_numpy(id_pairs_table()), requires_grad=False
#)
self.register_buffer('id_pairs_table', torch.from_numpy(id_pairs_table()))
num_groups = self.id_pairs_table.select(1, 1).max().item() + 1
num_ids = self.id_pairs_table.select(1, 0).max().item() + 1
self.groups_embedding = nn.Embedding(num_groups, unit)
self.ids_embedding = nn.Embedding(num_ids, 3 * unit)
F = 3 # filter dimensions
S = 1 # stride
P = 1 # padding
M = 16 # number of intermediate filters
self.output_filters = 8
in_channels = [K] + [M] * (L - 1)
out_channels = [M] * (L - 1) + [self.output_filters]
h, w, c = rows, cols, crop_dim
conv_extract, conv_extract_crop = [], []
for i in range(L):
conv_extract.append(
nn.Conv2d(
in_channels=in_channels[i],
out_channels=out_channels[i],
kernel_size=(F, F),
stride=S,
padding=P,
)
)
conv_extract.append(nn.ELU())
conv_extract_crop.append(
nn.Conv2d(
in_channels=in_channels[i],
out_channels=out_channels[i],
kernel_size=(F, F),
stride=S,
padding=P,
)
)
conv_extract_crop.append(nn.ELU())
# Keep track of output shapes
h = conv_outdim(h, F, P, S)
w = conv_outdim(w, F, P, S)
c = conv_outdim(c, F, P, S)
self.hidden_dim = (h * w + c * c) * self.output_filters
self.extract_representation = nn.Sequential(*conv_extract)
self.extract_crop_representation = nn.Sequential(*conv_extract_crop)
self.select = lambda emb, x: select(emb, x, flags.use_index_select)
def glyphs_to_ids_groups(self, glyphs):
T, B, H, W = glyphs.shape
#ids_groups = self.id_pairs_table.index_select(0, glyphs.reshape(-1).long())
ids_groups = self.id_pairs_table.index_select(0, glyphs.reshape(-1).long())
ids = ids_groups.select(1, 0).reshape(T, B, H, W).long()
groups = ids_groups.select(1, 1).reshape(T, B, H, W).long()
return [ids, groups]
def forward(self, glyphs, chars, colors, specials, blstats):
T, B, H, W = glyphs.shape
ids, groups = self.glyphs_to_ids_groups(glyphs)
glyph_tensors = [
self.select(self.chars_embedding, chars.long()),
self.select(self.colors_embedding, colors.long()),
self.select(self.specials_embedding, specials.long()),
self.select(self.groups_embedding, groups),
self.select(self.ids_embedding, ids),
]
glyphs_emb = torch.cat(glyph_tensors, dim=-1)
glyphs_emb = rearrange(glyphs_emb, "T B H W K -> (T B) K H W")
#coordinates = blstats.reshape(T * B, -1).float()[:, :2]
coordinates = blstats.reshape(T * B, -1).float()[:, :2]
crop_emb = self.crop(glyphs_emb, coordinates)
glyphs_rep = self.extract_representation(glyphs_emb)
glyphs_rep = rearrange(glyphs_rep, "B C H W -> B (C H W)")
assert glyphs_rep.shape[0] == T * B
crop_rep | |
from collections import OrderedDict
import numpy as np
import pytorch_lightning as pl
import torch
import torchvision
from nowcasting_utils.models.loss import get_loss
from pl_bolts.optimizers.lr_scheduler import LinearWarmupCosineAnnealingLR
from torch.optim import lr_scheduler
from satflow.models import ConvLSTM, R2U_Net
from satflow.models.gan import GANLoss, define_discriminator, define_generator
from satflow.models.layers import ConditionTime
class CloudGAN(pl.LightningModule):
def __init__(
self,
forecast_steps: int = 48,
input_channels: int = 12,
lr: float = 0.0002,
beta1: float = 0.5,
beta2: float = 0.999,
num_filters: int = 64,
generator_model: str = "runet",
norm: str = "batch",
use_dropout: bool = False,
discriminator_model: str = "enhanced",
discriminator_layers: int = 0,
loss: str = "vanilla",
scheduler: str = "plateau",
lr_epochs: int = 10,
lambda_l1: float = 100.0,
l1_loss: str = "l1",
channels_per_timestep: int = 12,
condition_time: bool = False,
pretrained: bool = False,
):
"""
Creates CloudGAN, based off of https://www.climatechange.ai/papers/icml2021/54
Changes include allowing outputs for all timesteps, optionally conditioning on time
for single timestep output
Args:
forecast_steps: Number of timesteps to forecast
input_channels: Number of input channels
lr: Learning Rate
beta1: optimizer beta1
beta2: optimizer beta2 value
num_filters: Number of filters in generator
generator_model: Generator name
norm: Norm type
use_dropout: Whether to use dropout
discriminator_model: model for discriminator, one of options in define_discriminator
discriminator_layers: Number of layers in discriminator, only for NLayerDiscriminator
loss: Loss function, described in GANLoss
scheduler: LR scheduler name
lr_epochs: Epochs for LR scheduler
lambda_l1: Lambda for L1 loss, from slides recommended between 5-200
l1_loss: Loss to use for the L1 in the slides, default is L1, also SSIM is available
channels_per_timestep: Channels per input timestep
condition_time: Whether to condition on a future timestep, similar to MetNet
"""
super().__init__()
self.lr = lr
self.b1 = beta1
self.b2 = beta2
self.loss = loss
self.lambda_l1 = lambda_l1
self.lr_epochs = lr_epochs
self.lr_method = scheduler
self.forecast_steps = forecast_steps
self.input_channels = input_channels
self.output_channels = forecast_steps * channels_per_timestep
self.channels_per_timestep = channels_per_timestep
self.condition_time = condition_time
if condition_time:
self.ct = ConditionTime(forecast_steps)
# define networks (both generator and discriminator)
gen_input_channels = (
input_channels # + forecast_steps if condition_time else input_channels
)
self.recurrent = (
False # Does the generator generate all timesteps at once, or a single one at a time?
)
if generator_model == "runet":
generator_model = R2U_Net(gen_input_channels, self.output_channels, t=3)
elif generator_model == "convlstm":
self.recurrent = True # ConvLSTM makes a list of output timesteps
generator_model = ConvLSTM(
gen_input_channels, hidden_dim=num_filters, out_channels=self.channels_per_timestep
)
self.generator = define_generator(
gen_input_channels,
self.output_channels,
num_filters,
generator_model,
norm,
use_dropout,
)
if generator_model == "convlstm":
# Timestep x C x H x W inputs/outputs, need to flatten for discriminator
# TODO Add Discriminator that can use timesteps
self.flatten_generator = True
else:
self.flatten_generator = False
self.discriminator = define_discriminator(
self.channels_per_timestep if condition_time else self.output_channels,
num_filters,
discriminator_model,
discriminator_layers,
norm,
)
# define loss functions
self.criterionGAN = GANLoss(loss)
self.criterionL1 = get_loss(l1_loss, channels=self.channels_per_timestep)
self.save_hyperparameters()
def train_per_timestep(
self, images: torch.Tensor, future_images: torch.Tensor, optimizer_idx: int, batch_idx: int
):
"""
For training with conditioning on time, so when the model is giving a single output
This goes through every timestep in forecast_steps and runs the training
Args:
images: (Batch, Timestep, Channels, Width, Height)
future_images: (Batch, Timestep, Channels, Width, Height)
optimizer_idx: int, the optiimizer to use
Returns:
"""
if optimizer_idx == 0:
# generate images
total_loss = 0
vis_step = True if np.random.random() < 0.01 else False
generated_images = self(
images, forecast_steps=self.forecast_steps
) # (Batch, Channel, Width, Height)
for i in range(self.forecast_steps):
# x = self.ct.forward(images, i) # Condition on future timestep
# fake = self(x, forecast_steps=i + 1) # (Batch, Channel, Width, Height)
fake = generated_images[:, :, i, :, :] # Only take the one at the end
if vis_step:
self.visualize_step(
images, future_images[:, i, :, :], fake, batch_idx, step=f"train_frame_{i}"
)
# adversarial loss is binary cross-entropy
gan_loss = self.criterionGAN(self.discriminator(fake), True)
# Only L1 loss on the given timestep
l1_loss = self.criterionL1(fake, future_images[:, i, :, :]) * self.lambda_l1
self.log(f"train/frame_{i}_l1_loss", l1_loss)
g_loss = gan_loss + l1_loss
total_loss += g_loss
g_loss = total_loss / self.forecast_steps # Get the mean loss over all timesteps
tqdm_dict = {"g_loss": g_loss}
output = OrderedDict({"loss": g_loss, "progress_bar": tqdm_dict, "log": tqdm_dict})
self.log_dict({"train/g_loss": g_loss})
return output
# train discriminator
if optimizer_idx == 1:
# Measure discriminator's ability to classify real from generated samples
# generate images
total_loss = 0
generated_images = self(
images, forecast_steps=self.forecast_steps
) # (Batch, Channel, Width, Height)
for i in range(self.forecast_steps):
# x = self.ct.forward(images, i) # Condition on future timestep
# fake = self(x, forecast_steps=i + 1) # (Batch, Channel, Width, Height)
fake = generated_images[:, :, i, :, :] # Only take the one at the end
real_loss = self.criterionGAN(self.discriminator(future_images[:, i, :, :]), True)
# adversarial loss is binary cross-entropy
fake_loss = self.criterionGAN(self.discriminator(fake), False)
# Only L1 loss on the given timestep
# discriminator loss is the average of these
d_loss = (real_loss + fake_loss) / 2
self.log(f"train/frame_{i}_d_loss", d_loss)
total_loss += d_loss
d_loss = total_loss / self.forecast_steps # Average of the per-timestep loss
tqdm_dict = {"d_loss": d_loss}
output = OrderedDict({"loss": d_loss, "progress_bar": tqdm_dict, "log": tqdm_dict})
self.log_dict({"train/d_loss": d_loss})
return output
def train_all_timestep(
self, images: torch.Tensor, future_images: torch.Tensor, optimizer_idx: int, batch_idx: int
):
"""
Train on all timesteps, instead of single timestep at a time. No conditioning on future timestep
Args:
images:
future_images:
optimizer_idx:
batch_idx:
Returns:
"""
if optimizer_idx == 0:
# generate images
generated_images = self(images)
fake = torch.cat((images, generated_images), 1)
# log sampled images
if np.random.random() < 0.01:
self.visualize_step(
images, future_images, generated_images, batch_idx, step="train"
)
# adversarial loss is binary cross-entropy
gan_loss = self.criterionGAN(self.discriminator(fake), True)
l1_loss = self.criterionL1(generated_images, future_images) * self.lambda_l1
g_loss = gan_loss + l1_loss
tqdm_dict = {"g_loss": g_loss}
output = OrderedDict({"loss": g_loss, "progress_bar": tqdm_dict, "log": tqdm_dict})
self.log_dict({"train/g_loss": g_loss})
return output
# train discriminator
if optimizer_idx == 1:
# Measure discriminator's ability to classify real from generated samples
# how well can it label as real?
real = torch.cat((images, future_images), 1)
real_loss = self.criterionGAN(self.discriminator(real), True)
# how well can it label as fake?
gen_output = self(images)
fake = torch.cat((images, gen_output), 1)
fake_loss = self.criterionGAN(self.discriminator(fake), False)
# discriminator loss is the average of these
d_loss = (real_loss + fake_loss) / 2
tqdm_dict = {"d_loss": d_loss}
output = OrderedDict({"loss": d_loss, "progress_bar": tqdm_dict, "log": tqdm_dict})
self.log_dict({"train/d_loss": d_loss})
return output
def training_step(self, batch, batch_idx, optimizer_idx):
images, future_images = batch
if self.condition_time:
return self.train_per_timestep(images, future_images, optimizer_idx, batch_idx)
return self.train_all_timestep(images, future_images, optimizer_idx, batch_idx)
def val_all_timestep(self, images, future_images, batch_idx):
# generate images
generated_images = self(images)
fake = torch.cat((images, generated_images), 1)
# log sampled images
if np.random.random() < 0.01:
self.visualize_step(images, future_images, generated_images, batch_idx, step="val")
# adversarial loss is binary cross-entropy
gan_loss = self.criterionGAN(self.discriminator(fake), True)
l1_loss = self.criterionL1(generated_images, future_images) * self.lambda_l1
g_loss = gan_loss + l1_loss
# how well can it label as real?
real = torch.cat((images, future_images), 1)
real_loss = self.criterionGAN(self.discriminator(real), True)
# how well can it label as fake?
fake_loss = self.criterionGAN(self.discriminator(fake), True)
# discriminator loss is the average of these
d_loss = (real_loss + fake_loss) / 2
tqdm_dict = {"d_loss": d_loss}
output = OrderedDict(
{
"val/discriminator_loss": d_loss,
"val/generator_loss": g_loss,
"progress_bar": tqdm_dict,
"log": tqdm_dict,
}
)
self.log_dict({"val/d_loss": d_loss, "val/g_loss": g_loss, "val/loss": d_loss + g_loss})
return output
def val_per_timestep(self, images, future_images, batch_idx):
total_g_loss = 0
total_d_loss = 0
vis_step = True if np.random.random() < 0.01 else False
generated_images = self(
images, forecast_steps=self.forecast_steps
) # (Batch, Channel, Width, Height)
for i in range(self.forecast_steps):
# x = self.ct.forward(images, i) # Condition on future timestep
fake = generated_images[:, :, i, :, :] # Only take the one at the end
if vis_step:
self.visualize_step(
images, future_images[:, i, :, :], fake, batch_idx, step=f"val_frame_{i}"
)
# adversarial loss is binary cross-entropy
gan_loss = self.criterionGAN(self.discriminator(fake), True)
# Only L1 loss on the given timestep
l1_loss = self.criterionL1(fake, future_images[:, i, :, :]) * self.lambda_l1
real_loss = self.criterionGAN(self.discriminator(future_images[:, i, :, :]), True)
# adversarial loss is binary cross-entropy
fake_loss = self.criterionGAN(self.discriminator(fake), False)
# Only L1 loss on the given timestep
# discriminator loss is the average of these
d_loss = (real_loss + fake_loss) / 2
self.log(f"val/frame_{i}_d_loss", d_loss)
total_d_loss += d_loss
self.log(f"val/frame_{i}_l1_loss", l1_loss)
g_loss = gan_loss + l1_loss
total_g_loss += g_loss
g_loss = total_g_loss / | |
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.im_func
if inspect.isfunction(obj): obj = obj.func_code
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. CmdTest Runner
######################################################################
class CmdTestRunner:
"""
A class used to run CmdTest test cases, and accumulate statistics.
The `run` method is used to process a single CmdTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
TODO: this is wrong
>>> tests = CmdTestFinder().find("foo.py")
>>> runner = CmdTestRunner(verbose=False)
>>> for test in tests:
... print runner.run(test)
(0, 1)
(0, 1)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
2 items passed all tests:
1 tests in foo
1 tests in foo.bar
2 tests in 2 items.
2 passed and 0 failed.
Test passed.
(0, 2)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
2
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out`) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing CmdTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of cmdtest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
## # Create a fake output target for capturing cmdtest output.
## #TODO: probably don't need this for process running
## self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# CmdTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `CmdTestRunner.report_*` methods, using the
writer function `out`. Return a tuple `(f, t)`, where `t` is
the number of examples tried, and `f` is the number of examples
that failed.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then supress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
log.debug("run %r", example)
# - look at pexpect, Expect really *is* the right way to do
# this
continue
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
raise "TODO: this has got to change"
try:
# Don't blink! This is where the user's code gets run.
exec compile(example.source, filename, "single",
compileflags, 1) in test.globs
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
if not quiet:
got += _exception_traceback(exc_info)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
m1 = re.match(r'[^:]*:', example.exc_msg)
m2 = re.match(r'[^:]*:', exc_msg)
if m1 and m2 and check(m1.group(0), m2.group(0),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exc_info)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return failures, tries
def __record_outcome(self, test, f, t):
"""
Record the fact that the given CmdTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
def run(self, test, out=None):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The output of each example is checked using
`CmdTestRunner.check_output`, and the results are formatted by
the `CmdTestRunner.report_*` | |
#!/usr/bin/env python3
import json
from pathlib import Path
import platform
import os
import subprocess
from time import time, sleep, monotonic
import cv2
import numpy as np
import depthai
import consts.resource_paths
from depthai_helpers import utils
from depthai_helpers.cli_utils import cli_print, parse_args, PrintColors
import socket
import socketserver
import threading
from PIL import Image
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from io import StringIO,BytesIO
class Streamer:
def __init__(self):
self.data_json = None
def get_data(self, data_input):
self.data_json = data_input
return self.data_json
def pass_data(self):
return json.dumps(self.data_json)
# TCPServer
class TCPServerRequest(socketserver.BaseRequestHandler):
def handle(self):
# Handle is called each time a client is connected
# When OpenDataCam connects, do not return - instead keep the connection open and keep streaming data
# First send HTTP header
header = 'HTTP/1.0 200 OK\r\nServer: Mozarella/2.2\r\nAccept-Range: bytes\r\nConnection: close\r\nMax-Age: 0\r\nExpires: 0\r\nCache-Control: no-cache, private\r\nPragma: no-cache\r\nContent-Type: application/json\r\n\r\n['
self.request.send(header.encode())
while True:
sleep(0.1)
json_string = self.server.mycustomadata
json_separator = ',\n'
json_to_send = json_string + json_separator
self.request.send(json_to_send.encode())
# HTTPServer MJPEG
class VideoStreamHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','multipart/x-mixed-replace; boundary=--jpgboundary')
self.end_headers()
while True:
MJPEG_frame_RGB=cv2.cvtColor(MJPEG_frame,cv2.COLOR_BGR2RGB)
JPG = Image.fromarray(MJPEG_frame_RGB)
stream_file = BytesIO()
JPG.save(stream_file,'JPEG')
self.wfile.write("--jpgboundary".encode())
self.send_header('Content-type','image/jpeg')
self.send_header('Content-length',str(stream_file.getbuffer().nbytes))
self.end_headers()
JPG.save(self.wfile,'JPEG')
# sleep(0.01)
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
def decode_mobilenet_ssd(nnet_packet):
detections = []
# the result of the MobileSSD has detection rectangles (here: entries), and we can iterate threw them
for _, e in enumerate(nnet_packet.entries()):
# for MobileSSD entries are sorted by confidence
# {id == -1} or {confidence == 0} is the stopper (special for OpenVINO models and MobileSSD architecture)
if e[0]['id'] == -1.0 or e[0]['confidence'] == 0.0 or e[0]['label'] > len(labels):
break
# save entry for further usage (as image package may arrive not the same time as nnet package)
detections.append(e)
return detections
def nn_to_depth_coord(x, y):
x_depth = int(nn2depth['off_x'] + x * nn2depth['max_w'])
y_depth = int(nn2depth['off_y'] + y * nn2depth['max_h'])
return x_depth, y_depth
def average_depth_coord(pt1, pt2):
factor = 1 - config['depth']['padding_factor']
x_shift = int((pt2[0] - pt1[0]) * factor / 2)
y_shift = int((pt2[1] - pt1[1]) * factor / 2)
avg_pt1 = (pt1[0] + x_shift), (pt1[1] + y_shift)
avg_pt2 = (pt2[0] - x_shift), (pt2[1] - y_shift)
return avg_pt1, avg_pt2
def show_mobilenet_ssd(entries_prev, frame, is_depth=0):
img_h = frame.shape[0]
img_w = frame.shape[1]
global config
# iterate through pre-saved entries & draw rectangle & text on image:
for e in entries_prev:
# the lower confidence threshold - the more we get false positives
if e[0]['confidence'] > config['depth']['confidence_threshold']:
if is_depth:
pt1 = nn_to_depth_coord(e[0]['left'], e[0]['top'])
pt2 = nn_to_depth_coord(e[0]['right'], e[0]['bottom'])
color = (255, 0, 0) # bgr
avg_pt1, avg_pt2 = average_depth_coord(pt1, pt2)
cv2.rectangle(frame, avg_pt1, avg_pt2, color)
color = (255, 255, 255) # bgr
else:
pt1 = int(e[0]['left'] * img_w), int(e[0]['top'] * img_h)
pt2 = int(e[0]['right'] * img_w), int(e[0]['bottom'] * img_h)
color = (0, 0, 255) # bgr
x1, y1 = pt1
cv2.rectangle(frame, pt1, pt2, color)
# Handles case where TensorEntry object label is out if range
if e[0]['label'] > len(labels):
print("Label index=",e[0]['label'], "is out of range. Not applying text to rectangle.")
else:
pt_t1 = x1, y1 + 20
cv2.putText(frame, labels[int(e[0]['label'])], pt_t1, cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
pt_t2 = x1, y1 + 40
cv2.putText(frame, '{:.2f}'.format(100*e[0]['confidence']) + ' %', pt_t2, cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)
if config['ai']['calc_dist_to_bb']:
pt_t3 = x1, y1 + 60
cv2.putText(frame, 'x:' '{:7.3f}'.format(e[0]['distance_x']) + ' m', pt_t3, cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)
pt_t4 = x1, y1 + 80
cv2.putText(frame, 'y:' '{:7.3f}'.format(e[0]['distance_y']) + ' m', pt_t4, cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)
pt_t5 = x1, y1 + 100
cv2.putText(frame, 'z:' '{:7.3f}'.format(e[0]['distance_z']) + ' m', pt_t5, cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)
return frame
def decode_age_gender_recognition(nnet_packet):
detections = []
for _, e in enumerate(nnet_packet.entries()):
if e[1]["female"] > 0.8 or e[1]["male"] > 0.8:
detections.append(e[0]["age"])
if e[1]["female"] > e[1]["male"]:
detections.append("female")
else:
detections.append("male")
return detections
def show_age_gender_recognition(entries_prev, frame):
# img_h = frame.shape[0]
# img_w = frame.shape[1]
if len(entries_prev) != 0:
age = (int)(entries_prev[0]*100)
cv2.putText(frame, "Age: " + str(age), (0, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
gender = entries_prev[1]
cv2.putText(frame, "G: " + str(gender), (0, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
frame = cv2.resize(frame, (300, 300))
return frame
def decode_emotion_recognition(nnet_packet):
detections = []
for i in range(len(nnet_packet.entries()[0][0])):
detections.append(nnet_packet.entries()[0][0][i])
return detections
def show_emotion_recognition(entries_prev, frame):
# img_h = frame.shape[0]
# img_w = frame.shape[1]
e_states = {
0 : "neutral",
1 : "happy",
2 : "sad",
3 : "surprise",
4 : "anger"
}
if len(entries_prev) != 0:
max_confidence = max(entries_prev)
if(max_confidence > 0.7):
emotion = e_states[np.argmax(entries_prev)]
cv2.putText(frame, emotion, (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
frame = cv2.resize(frame, (300, 300))
return frame
def decode_landmarks_recognition(nnet_packet):
landmarks = []
for i in range(len(nnet_packet.entries()[0][0])):
landmarks.append(nnet_packet.entries()[0][0][i])
landmarks = list(zip(*[iter(landmarks)]*2))
return landmarks
def show_landmarks_recognition(entries_prev, frame):
img_h = frame.shape[0]
img_w = frame.shape[1]
if len(entries_prev) != 0:
for i in entries_prev:
try:
x = int(i[0]*img_h)
y = int(i[1]*img_w)
except:
continue
# # print(x,y)
cv2.circle(frame, (x,y), 3, (0, 0, 255))
frame = cv2.resize(frame, (300, 300))
return frame
# TCPServer initialize
server_TCP = socketserver.TCPServer(('127.0.0.1', 8070), TCPServerRequest)
th = threading.Thread(target=server_TCP.serve_forever)
def json_stream(frame_id, entries_prev):
img_h = frame.shape[0]
img_w = frame.shape[1]
json_dic = {"frame_id": frame_id, "object": []}
global config
# iterate through pre-saved entries & draw rectangle & text on image:
for e in entries_prev:
# the lower confidence threshold - the more we get false positives
if e[0]['confidence'] > config['depth']['confidence_threshold']:
class_id = e[0]['label']
label_name = labels[int(e[0]['label'])]
center_x = 1 # replace with actual coordinates
center_y = 2 # replace with actual coordinates
width = 3 # replace with actual coordinates
height = 4 # replace with actual coordinates
confidence = e[0]['confidence']
data_json = send_json(json_dic, class_id, label_name, center_x, center_y, width, height, confidence)
streamer = Streamer()
streamer.get_data(data_json)
# start a thread to allow server and video running at the same time
server_TCP.mycustomadata = streamer.pass_data()
th.daemon = True
th.start()
def send_json(json_dic, class_id, label_name, center_x, center_y, width, height, confidence):
json_dic['object'].append(
{
'class_id': class_id,
'name': label_name,
'relative_coordinates': {
'center_x': center_x,
'center_y': center_y,
'width': width,
'height': height
},
'confidence': confidence
}
)
return json_dic
global args
try:
args = vars(parse_args())
except:
os._exit(2)
stream_list = args['streams']
if args['config_overwrite']:
args['config_overwrite'] = json.loads(args['config_overwrite'])
print("Using Arguments=",args)
if args['force_usb2']:
cli_print("FORCE USB2 MODE", PrintColors.WARNING)
cmd_file = consts.resource_paths.device_usb2_cmd_fpath
else:
cmd_file = consts.resource_paths.device_cmd_fpath
if args['dev_debug']:
cmd_file = ''
print('depthai will not load cmd file into device.')
calc_dist_to_bb = True
if args['disable_depth']:
calc_dist_to_bb = False
decode_nn=decode_mobilenet_ssd
show_nn=show_mobilenet_ssd
if args['cnn_model'] == 'age-gender-recognition-retail-0013':
decode_nn=decode_age_gender_recognition
show_nn=show_age_gender_recognition
calc_dist_to_bb=False
if args['cnn_model'] == 'emotions-recognition-retail-0003':
decode_nn=decode_emotion_recognition
show_nn=show_emotion_recognition
calc_dist_to_bb=False
if args['cnn_model'] in ['facial-landmarks-35-adas-0002', 'landmarks-regression-retail-0009']:
decode_nn=decode_landmarks_recognition
show_nn=show_landmarks_recognition
calc_dist_to_bb=False
if args['cnn_model']:
cnn_model_path = consts.resource_paths.nn_resource_path + args['cnn_model']+ "/" + args['cnn_model']
blob_file = cnn_model_path + ".blob"
suffix=""
if calc_dist_to_bb:
suffix="_depth"
blob_file_config = cnn_model_path + suffix + ".json"
blob_file_path = Path(blob_file)
blob_file_config_path = Path(blob_file_config)
if not blob_file_path.exists():
cli_print("\nWARNING: NN blob not found in: " + blob_file, PrintColors.WARNING)
os._exit(1)
if not blob_file_config_path.exists():
cli_print("\nWARNING: NN json not found in: " + blob_file_config, PrintColors.WARNING)
os._exit(1)
with open(blob_file_config) as f:
data = json.load(f)
try:
labels = data['mappings']['labels']
except:
print("Labels not found in json!")
print('depthai.__version__ == %s' % depthai.__version__)
print('depthai.__dev_version__ == %s' % depthai.__dev_version__)
if platform.system() == 'Linux':
ret = subprocess.call(['grep', '-irn', 'ATTRS{idVendor}=="03e7"', '/etc/udev/rules.d'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
if(ret != 0):
cli_print("\nWARNING: Usb rules not found", PrintColors.WARNING)
cli_print("\nSet rules: \n"
"""echo 'SUBSYSTEM=="usb", ATTRS{idVendor}=="03e7", MODE="0666"' | sudo tee /etc/udev/rules.d/80-movidius.rules \n"""
"sudo udevadm control --reload-rules && udevadm trigger \n"
"Disconnect/connect usb cable on host! \n", PrintColors.RED)
os._exit(1)
if not depthai.init_device(cmd_file, args['device_id']):
print("Error initializing device. Try to reset it.")
exit(1)
print('Available streams: ' + str(depthai.get_available_steams()))
# Do not modify the default values in the config Dict below directly. Instead, use the `-co` argument when running this script.
config = {
# Possible streams:
# ['left', 'right','previewout', 'metaout', 'depth_sipp', 'disparity', 'depth_color_h']
# If "left" is used, it must be in the first position.
# To test depth use:
# 'streams': [{'name': 'depth_sipp', "max_fps": 12.0}, {'name': 'previewout', "max_fps": 12.0}, ],
'streams': stream_list,
'depth':
{
'calibration_file': consts.resource_paths.calib_fpath,
'padding_factor': 0.3,
'depth_limit_m': 10.0, # In meters, for filtering purpose during x,y,z calc
'confidence_threshold' : 0.5, #Depth is calculated for bounding boxes with confidence higher than this number
},
'ai':
{
'blob_file': blob_file,
'blob_file_config': blob_file_config,
'calc_dist_to_bb': calc_dist_to_bb,
'keep_aspect_ratio': not args['full_fov_nn'],
},
'board_config':
{
'swap_left_and_right_cameras': args['swap_lr'], # True for 1097 (RPi Compute) and 1098OBC (USB w/onboard cameras)
'left_fov_deg': args['field_of_view'], # Same on 1097 and 1098OBC
'rgb_fov_deg': args['rgb_field_of_view'],
'left_to_right_distance_cm': args['baseline'], # Distance between stereo cameras
'left_to_rgb_distance_cm': args['rgb_baseline'], # Currently unused
'store_to_eeprom': args['store_eeprom'],
'clear_eeprom': args['clear_eeprom'],
'override_eeprom': args['override_eeprom'],
},
#'video_config':
#{
# 'rateCtrlMode': 'cbr',
# 'profile': 'h265_main', # Options: 'h264_baseline' / 'h264_main' / 'h264_high' / 'h265_main'
# 'bitrate': 8000000, # When using CBR
# 'maxBitrate': 8000000, # When using CBR
# 'keyframeFrequency': 30,
# 'numBFrames': 0,
# 'quality': 80 # (0 - 100%) When using | |
/ 2
ry = mask_diameters * dy / 2
# major and minor ellipse axes with center at (xc, yc)
x = np.array([-rx, rx, 0, 0, 0]) + xc
y = np.array([0, 0, 0, -ry, ry]) + yc
x_rot, y_rot = rotate_points(x, y, xc, yc, phi)
return np.array([x_rot, y_rot])
def beam_size(image, mask_diameters=3, corner_fraction=0.035, nT=3, max_iter=25):
"""
Determine beam parameters in an image with noise.
The function first estimates the elliptical spot by excluding all points
that are less than the average value found in the corners of the image.
These beam parameters are then used to determine a rectangle that surrounds
the elliptical spot. The rectangle size is `mask_diameters` times the spot
diameters. This is the integration region used for estimate a new beam
spot.
This process is repeated until two successive spot sizes match again as
outlined in ISO 11146
`corner_fraction` determines the size of the corners. ISO 11146-3
recommends values from 2-5%. The default value of 3.5% works pretty well.
`mask_diameters` is the size of the rectangular mask in diameters
of the ellipse. ISO 11146 states that `mask_diameters` should be 3.
This default value works fine.
`nT` accounts for noise in the background. The background is estimated
using the values in the cornes of the image as `mean+nT*stdev`. ISO 11146
states that `2<nT<4`. The default value works fine.
`max_iter` is the maximum number of iterations done before giving up.
The returned parameters are::
`xc`, `yc` is the center of the elliptical spot.
`dx`, `dy` are the diameters of the elliptical spot.
`phi` is tilt of the ellipse from the axis [radians]
Args:
image: 2D array of image of beam
mask_diameters: the size of the integration rectangle in diameters
corner_fraction: the fractional size of the corners
nT: the multiple of background noise to remove
max_iter: maximum number of iterations.
Returns:
elliptical beam parameters [xc, yc, dx, dy, phi]
"""
# remove any offset
zero_background_image = corner_subtract(image, corner_fraction, nT)
# zero_background_image = np.copy(image)
xc, yc, dx, dy, phi = basic_beam_size(zero_background_image)
for _iteration in range(1, max_iter):
xc2, yc2, dx2, dy2, _ = xc, yc, dx, dy, phi
mask = rotated_rect_mask(image, xc, yc, dx, dy, phi, mask_diameters)
masked_image = np.copy(zero_background_image)
masked_image[mask < 1] = 0 # zero all values outside mask
xc, yc, dx, dy, phi = basic_beam_size(masked_image)
if abs(xc-xc2) < 1 and abs(yc-yc2) < 1 and abs(dx-dx2) < 1 and abs(dy-dy2) < 1:
break
return xc, yc, dx, dy, phi
def beam_test_image(h, v, xc, yc, dx, dy, phi, noise=0, max_value=255):
"""
Create a test image.
Create a v x h image with an elliptical beam with specified center and
beam dimensions. By default the values in the image will range from 0 to
255. The default image will have no background and no noise.
Args:
h: number of columns in 2D test image
v: number of rows in 2D test image
xc: horizontal center of beam
yc: vertical center of beam
dx: ellipse diameter for axis closest to horizontal
dy: ellipse diameter for axis closest to vertical
phi: angle that elliptical beam is rotated [radians]
noise: normally distributed pixel noise to add to image
max_value: all values in image fall between 0 and `max_value`
Returns:
2D image of astigmatic spot is v x h pixels in size
"""
rx = dx/2
ry = dy/2
image0 = np.zeros([v, h])
y, x = np.ogrid[:v, :h]
scale = max_value - 3 * noise
image0 = scale * np.exp(-2*(x-xc)**2/rx**2 - 2*(y-yc)**2/ry**2)
image1 = rotate_image(image0, xc, yc, phi)
if noise > 0:
image1 += np.random.poisson(noise, size=(v, h))
# after adding noise, the signal may exceed the range 0 to max_value
np.place(image1, image1 > max_value, max_value)
np.place(image1, image1 < 0, 0)
if max_value < 256:
return image1.astype(np.uint8)
if max_value < 65536:
return image1.astype(np.uint16)
return image1
def ellipse_arrays(xc, yc, dx, dy, phi, npoints=200):
"""
Return x, y arrays to draw a rotated ellipse.
Args:
xc: horizontal center of beam
yc: vertical center of beam
dx: horizontal diameter of beam
dy: vertical diameter of beam
phi: angle that elliptical beam is rotated [radians]
Returns:
x, y : two arrays of points on the ellipse
"""
t = np.linspace(0, 2*np.pi, npoints)
a = dx/2*np.cos(t)
b = dy/2*np.sin(t)
xp = xc + a*np.cos(phi) - b*np.sin(phi)
yp = yc - a*np.sin(phi) - b*np.cos(phi)
return np.array([xp, yp])
def basic_beam_size_naive(image):
"""
Slow but simple implementation of ISO 11146 beam standard.
This is identical to `basic_beam_size()` and is the obvious way to
program the calculation of the necessary moments. It is slow.
Args:
image: 2D array of image with beam spot in it
Returns:
beam parameters [xc, yc, dx, dy, phi]
"""
v, h = image.shape
# locate the center just like ndimage.center_of_mass(image)
p = 0.0
xc = 0.0
yc = 0.0
for i in range(v):
for j in range(h):
p += image[i, j]
xc += image[i, j]*j
yc += image[i, j]*i
xc = int(xc/p)
yc = int(yc/p)
# calculate variances
xx = 0.0
yy = 0.0
xy = 0.0
for i in range(v):
for j in range(h):
xx += image[i, j]*(j-xc)**2
xy += image[i, j]*(j-xc)*(i-yc)
yy += image[i, j]*(i-yc)**2
xx /= p
xy /= p
yy /= p
# compute major and minor axes as well as rotation angle
dx = 2*np.sqrt(2)*np.sqrt(xx+yy+np.sign(xx-yy)*np.sqrt((xx-yy)**2+4*xy**2))
dy = 2*np.sqrt(2)*np.sqrt(xx+yy-np.sign(xx-yy)*np.sqrt((xx-yy)**2+4*xy**2))
phi = 2 * np.arctan2(2*xy, xx-yy)
return xc, yc, dx, dy, phi
def draw_beam_figure():
"""Draw a simple astigmatic beam ellipse with labels."""
theta = np.radians(30)
xc = 0
yc = 0
dx = 50
dy = 25
plt.subplots(1, 1, figsize=(6, 6))
# If the aspect ratio is not `equal` then the major and minor radii
# do not appear to be orthogonal to each other!
plt.axes().set_aspect('equal')
xp, yp = ellipse_arrays(xc, yc, dx, dy, theta)
plt.plot(xp, yp, 'k', lw=2)
xp, yp = rotated_rect_arrays(xc, yc, dx, dy, theta)
plt.plot(xp, yp, ':b', lw=2)
sint = np.sin(theta)/2
cost = np.cos(theta)/2
plt.plot([xc-dx*cost, xc+dx*cost], [yc+dx*sint, yc-dx*sint], ':b')
plt.plot([xc+dy*sint, xc-dy*sint], [yc+dy*cost, yc-dy*cost], ':r')
# draw axes
plt.annotate("x'", xy=(-25, 0), xytext=(25, 0),
arrowprops=dict(arrowstyle="<-"), va='center', fontsize=16)
plt.annotate("y'", xy=(0, 25), xytext=(0, -25),
arrowprops=dict(arrowstyle="<-"), ha='center', fontsize=16)
plt.annotate(r'$\phi$', xy=(13, -2.5), fontsize=16)
plt.annotate('', xy=(15.5, 0), xytext=(
14, -8.0), arrowprops=dict(arrowstyle="<-", connectionstyle="arc3, rad=-0.2"))
plt.annotate(r'$d_x$', xy=(-17, 7), color='blue', fontsize=16)
plt.annotate(r'$d_y$', xy=(-4, -8), color='red', fontsize=16)
plt.xlim(-30, 30)
plt.ylim(30, -30) # inverted to match image coordinates!
plt.axis('off')
def crop_image_to_rect(image, xc, yc, xmin, xmax, ymin, ymax):
"""
Return image cropped to specified rectangle.
Args:
image: image of beam
xc,yc: beam center (pixels)
xmin: left edge (pixels)
xmax: right edge (pixels)
ymin: top edge (pixels)
ymax: bottom edge (pixels)
Returns:
cropped_image: cropped image
new_xc, new_yc: new beam center (pixels)
"""
v, h = image.shape
xmin = max(0, int(xmin))
xmax = min(h, int(xmax))
ymin = max(0, int(ymin))
ymax = min(v, int(ymax))
new_xc = xc-xmin
new_yc = yc-ymin
return image[ymin:ymax, xmin:xmax], new_xc, new_yc
def crop_image_to_integration_rect(image, xc, yc, dx, dy, phi):
"""
Return image cropped to integration rectangle.
Since the image is being cropped, the center of the beam will move.
Args:
image: image of beam
xc: horizontal center of beam
yc: vertical center of beam
dx: horizontal diameter of beam
dy: vertical diameter of beam
phi: angle that elliptical beam is rotated [radians]
Returns:
cropped_image: cropped image
new_xc: x-position of beam center in cropped image
new_yc: y-position of beam center in cropped image
"""
xp, yp = rotated_rect_arrays(xc, yc, dx, dy, phi, mask_diameters=3)
return crop_image_to_rect(image, xc, yc, min(xp), max(xp), min(yp), max(yp))
def luminance(value, cmap_name='gist_ncar', vmin=0, vmax=255):
"""Return luminance of depending on cmap and value."""
# value between 0 and 1
v = (value-vmin)/(vmax-vmin)
v = min(max(0, v), 1)
cmap = matplotlib.cm.get_cmap(cmap_name)
# 0.3 seems like a reasonable compromise
rgb = cmap(v)
r = 255 * rgb[0]
g = 255 * rgb[1]
b = 255 * rgb[2]
lum = 0.2126 * r + 0.7152 * g + 0.0722 * b # per ITU-R BT.709
return lum
def draw_as_dotted_contrast_line(image, xpts, ypts, cmap='gist_ncar', vmax=None):
"""Draw lines in white or black depending on background image."""
| |
if self.document_isNew[modelType.modelDocument.uri]:
qnamesDerivedFrom = modelType.qnameDerivedFrom
if not isinstance(qnamesDerivedFrom, (list,tuple)): # list if a union
qnamesDerivedFrom = (qnamesDerivedFrom,)
for qnameDerivedFrom in qnamesDerivedFrom:
if modelType.qname in self.type_id and qnameDerivedFrom in self.type_id:
typeDerivationEdges.append({
'from_id': self.type_id[modelType.qname],
'to_id': self.type_id[qnameDerivedFrom],
'rel': "derived_from"})
### was ### g.addEdge(g.v(it.from_id), g.v(it.to_id), it.rel)
self.execute("Insert type derivation edges", """
e.each{
fromV = g.v(it.from_id)
toV = g.v(it.to_id)
vOutIt = fromV.out(it.rel).has('id',toV.id)
vOutIt.hasNext() ?: g.addEdge(fromV, toV, it.rel)
}
""",
params={'e': typeDerivationEdges})
aspectEdges = []
for modelConcept in self.modelXbrl.qnameConcepts.values():
if self.document_isNew[modelConcept.modelDocument.uri]:
if modelConcept.qname in self.aspect_id:
if modelConcept.typeQname in self.type_id:
aspectEdges.append({'from_id': self.aspect_id[modelConcept.qname],
'to_id': self.type_id[modelConcept.typeQname],
'rel': "data_type"})
if modelConcept.substitutesForQname in self.type_id:
aspectEdges.append({'from_id': self.aspect_id[modelConcept.qname],
'to_id': self.type_id[modelConcept.substitutesForQname.typeQname],
'rel': "substitutes_for"})
baseXbrliTypeQnames = modelConcept.baseXbrliTypeQname # may be union or single
if not isinstance(baseXbrliTypeQnames, (list,tuple)):
baseXbrliTypeQnames = (baseXbrliTypeQnames,) # was single base type
for baseXbrliTypeQname in baseXbrliTypeQnames:
if baseXbrliTypeQname in self.type_id:
aspectEdges.append({'from_id': self.aspect_id[modelConcept.qname],
'to_id': self.type_id[baseXbrliTypeQname],
'rel': "base_xbrli_type"})
self.execute("Insert aspect edges for data type, substitutes for, and base xbrli type", """
e.each{
fromV = g.v(it.from_id)
toV = g.v(it.to_id)
vOutIt = fromV.out(it.rel).has('id',toV.id)
vOutIt.hasNext() ?: g.addEdge(fromV, toV, it.rel)
}
""",
params={'e': aspectEdges})
'''
def insertValidCombinations(self):
# document-> validCombinationsSet-> cubes
self.showStatus("insert ValidCombinations")
drsELRs = set(ELR
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.values()
if arcrole == XbrlConst.all)
hasHcRels = self.modelXbrl.relationshipSet(XbrlConst.all).modelRelationships
hcConcepts = set(hasHcRel.toModelObject for hasHcRel in hasHcRels)
# one cube region per head pri item with multiple cube regions
for hcConcept in hcConcepts:
# include any other concepts in this pri item with clean inheritance
for drsELR in drsELRs:
# each ELR is another cube region
for allRel in val.modelXbrl.relationshipSet(XbrlConst.all, ELR)
drsPriItems(val, fromELR, fromPriItem
... this becomes an unweildly large model, don't see a use case for compiling it out
'''
def insertAspectProxies(self, qnames):
aspectQnames = [qname
for qname in qnames
if qname not in self.aspect_proxy_id and qname in self.aspect_id]
#print ("missing qnames: " + ", ".join(str(q) for q in aspectQnames if q not in self.aspect_id))
results = self.execute("Insert aspect proxies", """
reportV = g.v(report_id)
aspectProxyV_ids = []
aspect_ids.each{
aspectV = g.v(it)
aspectProxyV = g.addVertex(['_class':'aspect_proxy'])
aspectProxyV_ids << aspectProxyV.id
g.addEdge(aspectV, aspectProxyV, 'proxy')
g.addEdge(reportV, aspectProxyV, 'report_aspect_proxy')
}
aspectProxyV_ids
""",
params={'report_id': self.report_id,
'aspect_ids': [self.aspect_id[qname] for qname in aspectQnames]}
)["results"]
for i, proxy_id in enumerate(results):
self.aspect_proxy_id[aspectQnames[i]] = proxy_id
def periodAspectValue(self, context):
if context.isForeverPeriod:
return 'forever'
if context.isInstantPeriod:
return (str(context.instantDatetime),)
return (str(context.startDatetime),str(context.endDatetime))
def insertDataPoints(self):
# separate graph
# document-> dataTypeSet -> dataType
self.showStatus("insert DataPoints")
# do all schema element vertices
dataPointObjectIndices = []
# note these initial aspects Qnames used also must be in conceptsUsed above
aspectQnamesUsed = {XbrlConst.qnXbrliIdentifier, XbrlConst.qnXbrliPeriod, XbrlConst.qnXbrliUnit}
dimensions = [] # index by hash of dimension
if self.modelXbrl.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL):
instanceDocument = self.modelXbrl.modelDocument
dataPoints = []
entityIdentifiers = [] # index by (scheme, identifier)
periods = [] # index by (instant,) or (start,end) dates
units = [] # index by measures (qnames set)
for fact in self.modelXbrl.factsInInstance:
aspectQnamesUsed.add(fact.concept.qname)
dataPointObjectIndices.append(fact.objectIndex)
datapoint = {'_class': 'data_point',
#'name': str(fact.qname), not needed, get from aspect (concept)
'source_line': fact.sourceline}
datapoint['xml_id'] = XmlUtil.elementFragmentIdentifier(fact)
if fact.context is not None:
datapoint['context'] = fact.contextID
context = fact.context
p = self.periodAspectValue(context)
if p not in periods:
periods.append(p)
e = fact.context.entityIdentifier
if e not in entityIdentifiers:
entityIdentifiers.append(e)
for dimVal in context.qnameDims.values():
aspectQnamesUsed.add(dimVal.dimensionQname)
if dimVal.isExplicit:
aspectQnamesUsed.add(dimVal.memberQname)
key = (dimVal.dimensionQname, True, dimVal.memberQname)
else:
key = (dimVal.dimensionQname, False, dimVal.typedMember.stringValue)
if key not in dimensions:
dimensions.append(key)
if fact.isNumeric:
datapoint['effective_value'] = str(fact.effectiveValue)
if fact.unit is not None:
u = str(fact.unit.measures) # string for now
if u not in units:
units.append(u)
datapoint['unit']= fact.unitID
if fact.precision:
datapoint['precision'] = fact.precision
if fact.decimals:
datapoint['decimals'] = fact.decimals
datapoint['value'] = dbString( str(fact.value) ) # compress if very long
dataPoints.append(datapoint)
results = self.execute("Insert data points", """
docV = g.v(document_id)
dpIt = docV.out('data_points')
datapointsV = (dpIt.hasNext() ? dpIt.next() : g.addVertex(datapoints_set) )
dpE = docV.out('data_points').has('id', datapointsV.id)
dpE.hasNext() ?: g.addEdge(docV, datapointsV, 'data_points')
datapointV_ids = []
datapoints.each{
dpV = g.addVertex(it)
datapointV_ids << dpV.id
g.addEdge(datapointsV, dpV, 'data_point')
}
[datapointsV.id, datapointV_ids]
""",
params={'document_id': self.document_ids[instanceDocument.uri],
'datapoints_set': {
'_class': 'datapoints_set'},
'datapoints': dataPoints}
)["results"]
datapointsV_id, datapointVids_list = results
dataPointVertexIds = dict((dataPointObjectIndices[i], int(id))
for i, id in enumerate(datapointVids_list))
results = self.execute("Insert entity identifiers", """
entIdentV_ids = []
entityIdentifiers.each{
entIdentV = g.addVertex(it)
entIdentV_ids << entIdentV.id
}
entIdentV_ids
""",
params={'entityIdentifiers': [{'_class':'entity_identifier',
'scheme': e[0],
'identifier': e[1]}
for e in entityIdentifiers]}
)["results"]
entityIdentifierVertexIds = [int(entIdent_id) for entIdent_id in results]
p = []
for period in periods:
if period == 'forever':
p.append({'_class': 'period',
'forever': 'forever'})
elif len(period) == 1:
p.append({'_class': 'period',
'instant': period[0]})
else:
p.append({'_class': 'period',
'start_date': period[0],
'end_date': period[1]})
results = self.execute("Insert periods", """
periodV_ids = []
periods.each{
periodV = g.addVertex(it)
periodV_ids << periodV.id
}
periodV_ids
""",
params={'periods': p}
)["results"]
periodVertexIds = [int(period_id) for period_id in results]
results = self.execute("Insert units", """
unitV_ids = []
units.each{
unitV = g.addVertex(it)
unitV_ids << unitV.id
}
unitV_ids
""",
params={'units': [{'_class':'unit',
'measures': u}
for u in units]}
)["results"]
unitVertexIds = [int(unit_id) for unit_id in results]
if dimensions:
self.showStatus("insert aspect value selection groups")
aspValSels = []
for dimQn, isExplicit, value in dimensions:
if isExplicit:
aspValSels.append({'_class': 'aspect_value_selection',
'name':dimQn.localName + '-' + value.localName})
else:
aspValSels.append({'_class': 'aspect_value_selection',
'name': dimQn.localName + '-' + str(len(aspValSels)+1),
' typed_value': value})
results = self.execute("Insert aspect value selection groups", """
aspectValSelGroupV = g.addVertex(aspect_val_sel_group)
aspectValSelV_ids = []
aspect_val_sels.each{
aspectValSelV = g.addVertex(it)
aspectValSelV_ids << aspectValSelV.id
g.addEdge(aspectValSelGroupV, aspectValSelV, 'aspect_value_selection_group')
}
[aspectValSelGroupV.id, aspectValSelV_ids]
""",
params={'aspect_val_sel_group': {'_class': 'aspect_value_selection_group'},
'aspect_val_sels': aspValSels}
)["results"]
aspValSelGrpV_id, aspValSelV_ids_list = results
aspValSelVertexIds = [int(aspValSel_id) for aspValSel_id in aspValSelV_ids_list]
else:
aspValSelVertexIds = []
dimValAspValSelVertexIds = dict((dimensions[i], aspValSel_id)
for i, aspValSel_id in enumerate(aspValSelVertexIds))
self.showStatus("insert aspect proxies")
self.insertAspectProxies(aspectQnamesUsed)
if dimensions:
self.showStatus("insert dimension member edges")
# connect aspectValueSelection to concept dimension and member concepts
self.execute("Insert dimension member edges", """
aspects.each{
g.addEdge(g.v(it.aspValSel_id), g.v(it.dimension_id), 'aspect')
}
aspect_values.each{
g.addEdge(g.v(it.aspValSel_id), g.v(it.member_id), 'aspect_value')
}
[]
""",
params={'aspects': [{
'aspValSel_id': aspValSel_id,
'dimension_id': self.aspect_proxy_id[dimQn]}
for i, aspValSel_id in enumerate(aspValSelVertexIds)
for dimQn,isExplicit,memQn in dimensions[i:i+1]],
'aspect_values': [{
'aspValSel_id': aspValSel_id,
'member_id': self.aspect_proxy_id[memQn]}
for i, aspValSel_id in enumerate(aspValSelVertexIds)
for dimQn,isExplicit,memQn in dimensions[i:i+1]
if isExplicit]}
)["results"]
# add aspect proxy relationships
edges = []
if self.modelXbrl.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL):
# aspect value - aspect relationships
for aspectProxyId, rel, aspectValueVertexIds in (
(self.aspect_proxy_id[XbrlConst.qnXbrliIdentifier], 'entity_identifier_aspects', entityIdentifierVertexIds),
(self.aspect_proxy_id[XbrlConst.qnXbrliPeriod], 'period_aspects', periodVertexIds),
(self.aspect_proxy_id[XbrlConst.qnXbrliUnit], 'unit_aspects', unitVertexIds) ):
for aspectValueVertexId in aspectValueVertexIds:
edges.append({'from_id': aspectValueVertexId,
'to_id': aspectProxyId,
'rel': rel})
# fact - aspect relationships
for i, factObjectIndex in enumerate(dataPointObjectIndices):
fact = self.modelXbrl.modelObjects[factObjectIndex]
dataPoint_id = dataPointVertexIds[factObjectIndex]
# fact concept aspect
edges.append({
'from_id': dataPoint_id,
'to_id': self.aspect_proxy_id[fact.qname],
'rel': "base_item"})
context = fact.context
if context is not None:
# entityIdentifier aspect
edges.append({
'from_id': dataPoint_id,
'to_id': entityIdentifierVertexIds[entityIdentifiers.index(context.entityIdentifier)],
'rel': "entity_identifier"})
# period aspect
edges.append({
'from_id': dataPoint_id,
'to_id': periodVertexIds[periods.index(self.periodAspectValue(context))],
'rel': "period"})
# dimension aspectValueSelections
for dimVal in context.qnameDims.values():
key = (dimVal.dimensionQname, dimVal.isExplicit,
dimVal.memberQname if dimVal.isExplicit else dimVal.typedMember.stringValue)
edges.append({
'from_id': dataPoint_id,
'to_id': dimValAspValSelVertexIds[key],
'rel': "aspect_value_selection"})
if fact.isNumeric and fact.unit is not None:
# unit aspect
u = str(fact.unit.measures) # string for now
edges.append({
'from_id': dataPoint_id,
'to_id': unitVertexIds[units.index(u)],
'rel': "_unit"})
for tupleFact in fact.modelTupleFacts:
# edge to tuple from item
edges.append({
'from_id': dataPointVertexIds[tupleFact.objectIndex],
'to_id': dataPoint_id,
'rel': "tuple"})
self.showStatus("insert aspect relationship edges")
results = self.execute("Insert aspect relationship edges", """
e.each{g.addEdge(g.v(it.from_id), g.v(it.to_id), it.rel)}
[]
""",
params={'e': edges})["results"]
def insertRelationshipSets(self):
self.showStatus("insert relationship sets")
results = self.execute("Insert relationship sets", """
reportV = g.v(report_id)
relSetsV = g.addVertex(relSets)
g.addEdge(reportV, relSetsV, 'relationship_sets')
relSetV_ids = []
relSet.each{
relSetV = g.addVertex(it)
relSetV_ids << relSetV.id
g.addEdge(relSetsV, relSetV, 'relationship_set')}
[relSetsV.id, relSetV_ids]
""",
params={
'report_id': self.report_id,
'relSets': {
'_class': 'relationship_sets'},
'relSet': [{
'_class': 'relationship_set',
'arcrole': arcrole,
'linkrole': linkrole,
'linkdefinition': self.modelXbrl.roleTypeDefinition(linkrole) or '',
'linkname': str(linkqname),
'arcname': str(arcqname)
} for arcrole, linkrole, linkqname, arcqname in self.relationshipSets]
})["results"]
relSetsV_id, relSetV_ids_list = results
relationshipSetIDs = [int(relSet_id) for relSet_id in relSetV_ids_list]
# do tree walk to build relationships with depth annotated, no targetRole navigation
relE = [] # fromV, toV, label
resources = set()
aspectQnamesUsed = set()
resourceIDs = {} # index by object
def walkTree(rels, seq, depth, relationshipSet, visited, relationshipSetId, doVertices):
for rel in rels:
if rel not in visited:
| |
and skip_no_changes_ is False:
# so we will try to merge it nevertheless
lgr.info("There was nothing to merge but we were instructed to merge due to skip_no_changes=False")
all_to_merge = [branch]
nmerges = 1
plmerges = "s" if nmerges > 1 else ""
lgr.info("Initiating %(nmerges)d merge%(plmerges)s of %(branch)s using strategy %(strategy)s", locals())
options = ['--no-commit'] if not commit else []
for to_merge in all_to_merge:
# we might have switched away to orig_branch
if self.repo.get_active_branch() != target_branch_:
self.repo.checkout(target_branch_)
if strategy is None:
self.repo.merge(to_merge, options=options, **merge_kwargs)
elif strategy == 'theirs':
self.repo.merge(to_merge, options=["-s", "ours", "--no-commit"],
expect_stderr=True, **merge_kwargs)
self.repo._git_custom_command([], "git read-tree -m -u %s" % to_merge)
self.repo.add('.', options=self.options) # so everything is staged to be committed
else:
raise NotImplementedError(strategy)
if commit:
if strategy is not None:
msg = branch if (nmerges == 1) else ("%s (%s)" % (branch, to_merge))
self._commit("Merged %s using strategy %s" % (msg, strategy), options=["-a"])
else:
# record into our activity stats
stats = data.get('datalad_stats', None)
if stats:
stats.merges.append([branch, target_branch_])
if orig_branch is not None:
self.repo.checkout(orig_branch)
yield data
return merge_branch
def _precommit(self):
self.repo.precommit() # so that all batched annexes stop
if self._statusdb:
self._statusdb.save()
# there is something to commit and backends was set but no .gitattributes yet
path = self.repo.path
if self.repo.dirty and \
'annex.backend' not in self.repo.get_git_attributes() and \
isinstance(self.repo, AnnexRepo):
backends = self.repo.default_backends
if backends:
self.repo.set_default_backend(backends[0], commit=False)
# at least use repo._git_custom_command
def _commit(self, msg=None, options=[]):
# we need a custom commit due to "fancy" merges and GitPython
# not supporting that ATM
# https://github.com/gitpython-developers/GitPython/issues/361
# and apparently not actively developed
msg = str(msg).strip()
if not msg:
# we need to provide some commit msg, could may be deduced from current status
# TODO
msg = "a commit"
msg = GitRepo._get_prefixed_commit_msg(msg)
if msg is not None:
options = options + ["-m", msg]
self._precommit() # so that all batched annexes stop
self.repo._git_custom_command([], ["git", "commit"] + options,
check_fake_dates=True)
# self.repo.commit(msg)
# self.repo.repo.git.commit(options)
def _unstage(self, fpaths):
# self.repo.cmd_call_wrapper.run(["git", "reset"] + fpaths)
self.repo._git_custom_command(fpaths, ["git", "reset"])
def _stage(self, fpaths):
self.repo.add(fpaths, git=True)
# self.repo.cmd_call_wrapper.run(["git", "add"] + fpaths)
def _get_status(self, args=[]):
"""Custom check of status to see what files were staged, untracked etc
until
https://github.com/gitpython-developers/GitPython/issues/379#issuecomment-180101921
is resolved
"""
# out, err = self.repo.cmd_call_wrapper.run(["git", "status", "--porcelain"])
cmd_args = ["git", "status", "--porcelain"] + args
staged, notstaged, untracked, deleted = [], [], [], []
statuses = {
'??': untracked,
'A ': staged,
'M ': staged,
' M': notstaged,
' D': deleted, # rm-ed smth committed before
'D ': deleted, # git rm-ed smth committed before
'AD': (staged, deleted) # so we added, but then removed before committing
# generaly shouldn't happen but in some tricky S3 cases crawling did happen :-/
# TODO: handle "properly" by committing before D happens
}
if isinstance(self.repo, AnnexRepo) and self.repo.is_direct_mode():
statuses['AD'] = staged
out, err = self.repo.proxy(cmd_args)
else:
out, err = self.repo._git_custom_command([], cmd_args)
assert not err
for l in out.split('\n'):
if not l:
continue
act = l[:2] # first two characters is what is happening to the file
fname = l[3:]
try:
act_list = statuses[act]
if isinstance(act_list, tuple): # like in case of AD
for l in act_list:
l.append(fname)
else:
act_list.append(fname)
# for the purpose of this use, we don't even want MM or anything else
except KeyError:
raise RuntimeError("git status %r not yet supported. TODO" % act)
return staged, notstaged, untracked, deleted
def commit_versions(self,
regex,
dirs=True, # either match directory names
rename=False,
**kwargs):
"""Generate multiple commits if multiple versions were staged
Parameters
----------
TODO
**kwargs: dict, optional
Passed to get_versions
"""
def _commit_versions(data):
self._precommit() # so that all batched annexes stop
# figure out versions for all files (so we could dataset conflicts with existing
# non versioned)
# TODO: we need to care only about staged (and unstaged?) files ATM!
# So let's do it. And use separate/new Git repo since we are doing manual commits through
# calls to git. TODO: RF to avoid this
# Not usable for us ATM due to
# https://github.com/gitpython-developers/GitPython/issues/379
# repo = Repo(self.repo.path)
#
# def process_diff(diff):
# """returns full paths for files in the diff"""
# out = []
# for obj in diff:
# assert(not obj.renamed) # not handling atm
# assert(not obj.deleted_file) # not handling atm
# assert(obj.a_path == obj.b_path) # not handling atm
# out.append(opj(self.repo.path, obj.a_path))
# return out
#
# staged = process_diff(repo.index.diff('HEAD'))#repo.head.commit))
# notstaged = process_diff(repo.index.diff(None))
staged, notstaged, untracked, deleted = self._get_status()
# verify that everything is under control!
assert (not notstaged) # not handling atm, although should be safe I guess just needs logic
# to not unstage them
assert (not untracked) # not handling atm
assert (not deleted) # not handling atm
if not staged:
return # nothing to be done -- so we wash our hands off entirely
if not dirs:
raise NotImplementedError("ATM matching will happen to dirnames as well")
versions = get_versions(staged, regex, **kwargs)
if not versions:
# no versioned files were added, nothing to do really
for d in self.finalize()(data):
yield d
return
# we don't really care about unversioned ones... overlay and all that ;)
if None in versions:
versions.pop(None)
# take only new versions to deal with
versions_db = SingleVersionDB(self.repo)
prev_version = versions_db.version
if prev_version is None:
new_versions = versions # consider all!
else:
version_keys = list(versions.keys())
if prev_version not in versions_db.versions:
# shouldn't happen
raise RuntimeError(
"previous version %s not found among known to DB: %s" % (prev_version, versions_db.versions.keys()))
# all new versions must be greater than the previous version
# since otherwise it would mean that we are complementing previous version and it might be
# a sign of a problem
# Well -- so far in the single use-case with openfmri it was that they added
# derivatives for the same version, so I guess we will allow for that, thus allowing =
assert (all((LooseVersion(prev_version) <= LooseVersion(v)) for v in versions))
# old implementation when we didn't have entire versions db stored
# new_versions = OrderedDict(versions.items()[version_keys.index(prev_version) + 1:])
new_versions = versions
# if we have "new_versions" smallest one smaller than previous -- we got a problem!
# TODO: how to dataset ==? which could be legit if more stuff was added for the same
# version? but then if we already tagged with that -- we would need special handling
if new_versions:
smallest_new_version = next(iter(new_versions))
if prev_version:
if LooseVersion(smallest_new_version) < LooseVersion(prev_version):
raise ValueError("Smallest new version %s is < prev_version %s"
% (smallest_new_version, prev_version))
versions_db.update_versions(versions) # store all new known versions
# early return if no special treatment is needed
nnew_versions = len(new_versions)
if nnew_versions <= 1:
# if a single new version -- no special treatment is needed, but we need to
# inform db about this new version
if nnew_versions == 1:
_call(setattr, versions_db, 'version', smallest_new_version)
# we can't return a generator here
for d in self.finalize()(data):
yield d
return
# unstage all versioned files from the index
nunstaged = 0
for version, fpaths in iteritems(versions):
nfpaths = len(fpaths)
lgr.debug("Unstaging %d files for version %s", nfpaths, version)
nunstaged += nfpaths
_call(self._unstage, list(fpaths.values()))
stats = data.get('datalad_stats', None)
stats_str = ('\n\n' + stats.as_str(mode='full')) if stats else ''
for iversion, (version, fpaths) in enumerate(iteritems(new_versions)): # for all versions past previous
# stage/add files of that version to index
if rename:
# we need to rename and create a new vfpaths
vfpaths = []
for fpath, vfpath in iteritems(fpaths):
# ATM we do not allow unversioned -- should have failed earlier, if not HERE!
# assert(not lexists(fpath))
# nope! it must be there from previous commit of a versioned file!
# so rely on logic before
lgr.debug("Renaming %s into %s" % (vfpath, fpath))
os.rename(vfpath, fpath)
vfpaths.append(fpath)
else:
# so far we didn't bother about status, so just values would be sufficient
vfpaths = list(fpaths.values())
nfpaths = len(vfpaths)
lgr.debug("Staging %d files for version %s", nfpaths, version)
nunstaged -= nfpaths
assert (nfpaths >= 0)
| |
1,
call = lambda z: complex(*z[:2])
),
'ÆĿ': attrdict(
arity = 1,
ldepth = 0,
call = lambda z: int(sympy.functions.combinatorial.numbers.lucas(z))
),
'Æl': attrdict(
arity = 1,
ldepth = 0,
call = lambda z: overload((math.log, cmath.log), z)
),
'Æm': attrdict(
arity = 1,
ldepth = 1,
call = lambda z: div(sum(z), len(z))
),
'Æṁ': attrdict(
arity = 1,
ldepth = 1,
call = median
),
'Æṃ': attrdict(
arity = 1,
ldepth = 1,
call = mode
),
'ÆN': attrdict(
arity = 1,
ldepth = 0,
call = lambda z: sympy.ntheory.generate.prime(z)
),
'Æn': attrdict(
arity = 1,
ldepth = 0,
call = lambda z: sympy.ntheory.generate.nextprime(z)
),
'Æp': attrdict(
arity = 1,
ldepth = 0,
call = lambda z: sympy.ntheory.generate.prevprime(z)
),
'ÆR': attrdict(
arity = 1,
ldepth = 0,
call = lambda z: list(sympy.ntheory.generate.primerange(2, z + 1))
),
'Ær': attrdict(
arity = 1,
ldepth = 1,
call = lambda z: jambify(from_base(z[::-1], sympy.poly('x')).all_roots())
),
'Æṛ': attrdict(
arity = 1,
ldepth = 1,
call = lambda z: jambify(sympy.prod(map(sympy.poly('x').__sub__, z)).coeffs()[::-1])
),
'ÆT': attrdict(
arity = 1,
ldepth = 0,
call = lambda z: overload((math.tan, cmath.tan), z)
),
'ÆṬ': attrdict(
arity = 1,
ldepth = 0,
call = lambda z: overload((math.atan, cmath.atan), z)
),
'ÆṪ': attrdict(
arity = 1,
ldepth = 0,
call = lambda z: sympy.ntheory.factor_.totient(z) if z > 0 else 0
),
'Æṭ': attrdict(
arity = 1,
ldepth = 2,
call = lambda z: sum(sum(r[i : i+1]) for i, r in enumerate(z))
),
'ÆS': attrdict(
arity = 1,
ldepth = 0,
call = lambda z: overload((math.sin, cmath.sin), z)
),
'ÆṢ': attrdict(
arity = 1,
ldepth = 0,
call = lambda z: overload((math.asin, cmath.asin), z)
),
'Æs': attrdict(
arity = 1,
ldepth = 0,
call = lambda z: int(sympy.ntheory.factor_.divisor_sigma(z))
),
'Æṣ': attrdict(
arity = 1,
ldepth = 0,
call = lambda z: int(sympy.ntheory.factor_.divisor_sigma(z) - z)
),
'Æv': attrdict(
arity = 1,
ldepth = 0,
call = lambda z: len(sympy.ntheory.factor_.factorint(z))
),
'Æ+': attrdict(
arity = 1,
ldepth = 0,
call = lambda z: sum(to_base(z, 10))
),
'Ʋ': attrdict(
arity = 1,
ldepth = 0,
call = lambda z: int(isqrt(z) ** 2 == z)
),
'ƽ': attrdict(
arity = 1,
ldepth = 0,
call = isqrt
),
'ư': attrdict(
arity = 1,
ldepth = 0,
call = math.degrees
),
'Æ⁹': attrdict(
arity = 1,
ldepth = 0,
call = math.radians
),
'Æ!': attrdict(
arity = 1,
ldepth = 0,
call = to_factorial_base
),
'Æ¡': attrdict(
arity = 1,
ldepth = 1,
call = from_factorial_base
),
'Æ?': attrdict(
arity = 1,
ldepth = 0,
call = to_primorial_base
),
'Æ¿': attrdict(
arity = 1,
ldepth = 1,
call = from_primorial_base
),
'Æ‘': attrdict(
arity = 1,
ldepth = 0,
call = lambda z: int(z) + 1
),
'Æ’': attrdict(
arity = 1,
ldepth = 0,
call = lambda z: math.ceil(z) - 1
),
'Œ?': attrdict(
arity = 1,
ldepth = 0,
call = pemutation_at_index
),
'Œ¿': attrdict(
arity = 1,
call = permutation_index
),
'ŒB': attrdict(
arity = 1,
ldepth = 1,
call = lambda z: bounce(z)
),
'ŒḄ': attrdict(
arity = 1,
call = lambda z: bounce(iterable(z, make_range = True))
),
'ŒḂ': attrdict(
arity = 1,
call = is_palindrome
),
'Œc': attrdict(
arity = 1,
rdepth = 0,
call = lambda z: jambify(itertools.combinations(iterable(z, make_range = True), 2))
),
'Œċ': attrdict(
arity = 1,
rdepth = 0,
call = lambda z: jambify(itertools.combinations_with_replacement(iterable(z, make_range = True), 2))
),
'ŒD': attrdict(
arity = 1,
ldepth = 2,
call = diagonals
),
'ŒḌ': attrdict(
arity = 1,
ldepth = 2,
call = from_diagonals
),
'ŒḊ': attrdict(
arity = 1,
call = depth
),
'Œd': attrdict(
arity = 1,
ldepth = 2,
call = lambda z: diagonals([r[::-1] for r in z])
),
'Œḍ': attrdict(
arity = 1,
ldepth = 2,
call = lambda z: [r[::-1] for r in from_diagonals(z)]
),
'ŒĖ': attrdict(
arity = 1,
call = lambda z: list(enumerate_md(z))
),
'Œe': attrdict(
arity = 1,
call = lambda z: [t for t in iterable(z, make_range = True)[1::2]]
),
'ŒG': attrdict(
arity = 1,
ldepth = 1,
call = get_request
),
'ŒĠ': attrdict(
arity = 1,
call = group_md
),
'Œg': attrdict(
arity = 1,
call = group_equal
),
'ŒH': attrdict(
arity = 1,
call = lambda z: split_evenly(iterable(z, make_range = True), 2)
),
'ŒJ': attrdict(
arity = 1,
call = indices_md
),
'Œl': attrdict(
arity = 1,
ldepth = 1,
call = lambda z: to_case(z, lower = True)
),
'ŒM': attrdict(
arity = 1,
call = maximal_indices_md
),
'ŒṀ': attrdict(
arity = 1,
ldepth = 2,
call = lambda z: neighborhood(z, moore = True, wrap = True)
),
'ŒṂ': attrdict(
arity = 1,
ldepth = 2,
call = lambda z: neighborhood(z, moore = True)
),
'ŒṆ': attrdict(
arity = 1,
ldepth = 2,
call = lambda z: neighborhood(z)
),
'ŒṄ': attrdict(
arity = 1,
ldepth = 2,
call = lambda z: neighborhood(z, wrap = True)
),
'Œo': attrdict(
arity = 1,
call = lambda z: [t for t in iterable(z, make_range = True)[::2]]
),
'ŒP': attrdict(
arity = 1,
call = powerset
),
'ŒṖ': attrdict(
arity = 1,
call = partitions
),
'Œṗ': attrdict(
arity = 1,
ldepth = 0,
call = lambda z: sorted(integer_partitions(z), key = len, reverse = True)
),
'Œp': attrdict(
arity = 1,
call = lambda z: jambify(itertools.product(*[iterable(t, make_range = True) for t in z]))
),
'ŒQ': attrdict(
arity = 1,
call = distinct_sieve
),
'ŒR': attrdict(
arity = 1,
ldepth = 0,
call = lambda z: list(range(-abs(int(z)), abs(int(z)) + 1))
),
'ŒṘ': attrdict(
arity = 1,
call = lambda z: jambify(repr(z))
),
'Œr': attrdict(
arity = 1,
call = rle
),
'Œṙ': attrdict(
arity = 1,
call = rld_zoom
),
'Œs': attrdict(
arity = 1,
ldepth = 1,
call = lambda z: to_case(z, swap = True)
),
'ŒT': attrdict(
arity = 1,
call = time_format
),
'ŒṬ': attrdict(
arity = 1,
ldepth = 2,
call = untruth_md
),
'ŒṪ': attrdict(
arity = 1,
call = lambda z: [t for t, u in enumerate_md(iterable(z)) if u]
),
'Œt': attrdict(
arity = 1,
ldepth = 1,
call = lambda z: to_case(z, title = True)
),
'ŒV': attrdict(
arity = 1,
ldepth = 1,
call = lambda z: python_eval(''.join(map(str, z)))
),
'ŒỤ': attrdict(
arity = 1,
call = lambda z: sorted(indices_md(iterable(z)), key = lambda t: at_index_md(t, iterable(z)))
),
'Œu': attrdict(
arity = 1,
ldepth = 1,
call = lambda z: to_case(z, upper = True)
),
'ŒẎ': attrdict(
arity = 1,
ldepth = 2,
call = lambda z: sum(map(iterable, iterable(z)), [])
),
'Œ:': attrdict(
arity = 1,
ldepth = 1,
call = lambda z: [0] + z + [0]
),
'Œœ': attrdict(
arity = 1,
call = odd_even
),
'Œɠ': attrdict(
arity = 1,
call = group_lengths
),
'œ?': attrdict(
arity = 2,
ldepth = 0,
call = pemutation_at_index
),
'œ¿': attrdict(
arity = 2,
call = lambda x, y: permutation_index([y.index(value) for value in x])
),
'æ.': attrdict(
arity = 2,
ldepth = 1,
rdepth = 1,
call = dot_product
),
'æ%': attrdict(
arity = 2,
ldepth = 0,
rdepth = 0,
call = symmetric_mod
),
'æ*': attrdict(
arity = 2,
ldepth = 2,
rdepth = 0,
call = lambda x, y: matrix_to_list((sympy.Matrix(x) ** y))
),
'æ×': attrdict(
arity = 2,
ldepth = 2,
rdepth = 2,
call = lambda x, y: matrix_to_list((sympy.Matrix(x) * sympy.Matrix(y)))
),
'æA': attrdict(
arity = 2,
ldepth = 0,
rdepth = 0,
call = math.atan2
),
'æḄ': attrdict(
arity = 2,
ldepth = 0,
rdepth = 0,
call = lambda x, y: (x + y + 1) % 2
),
'æḂ': attrdict(
arity = 2,
ldepth = 0,
rdepth = 0,
call = lambda x, y: (x + y) % 2
),
'æC': attrdict(
arity = 2,
ldepth = 1,
rdepth = 0,
call = convolve_power
),
'æc': attrdict(
arity = 2,
ldepth = 1,
rdepth = 1,
call = convolve
),
'æċ': attrdict(
arity = 2,
ldepth = 0,
rdepth = 0,
call = lambda x, y: from_base([1] + [0] * len(to_base(x, y)), y)
),
'æḟ': attrdict(
arity = 2,
ldepth = 0,
rdepth = 0,
call = lambda x, y: from_base([1] + [0] * (len(to_base(x, y)) - 1), y)
),
'æi': attrdict(
arity = 2,
ldepth = 0,
rdepth = 0,
call = modinv
),
'æị': attrdict(
arity = 2,
ldepth = 0,
rdepth = 0,
call = complex
),
'æl': attrdict(
arity = 2,
ldepth = 0,
rdepth = 0,
call = lcm
),
'æR': attrdict(
arity = 2,
ldepth = 0,
rdepth = 0,
call = primerange
),
'ær': attrdict(
arity = 2,
ldepth = 0,
rdepth = 0,
call = round
),
'æp': attrdict(
arity = 2,
ldepth = 0,
rdepth = 0,
call = lambda x, y: float('%%.%dg'%y%x)
),
'æ«': attrdict(
arity = 2,
ldepth = 0,
rdepth = 0,
call = shift_left
),
'æ»': attrdict(
arity = 2,
ldepth = 0,
rdepth = 0,
call = shift_right
),
'œc': attrdict(
arity = 2,
rdepth = 0,
call = lambda x, y: jambify(itertools.combinations(iterable(x, make_range = True), y))
),
'œċ': attrdict(
arity = 2,
rdepth = 0,
call = lambda x, y: jambify(itertools.combinations_with_replacement(iterable(x, make_range = True), y))
),
'œẹ': attrdict(
arity = 2,
call = lambda x, y: [t for t, u in enumerate_md(iterable(x)) if u == y]
),
'œi': attrdict(
arity = 2,
call = index_of_md
),
'œị': attrdict(
arity = 2,
ldepth = 1,
call = at_index_md
),
'œl': attrdict(
arity = 2,
call = lambda x, y: trim(x, iterable(y), left = True)
),
'œP': attrdict(
arity = 2,
call = lambda x, y: partition_at([int(t + 1 in iterable(x)) for t in range(max(iterable(x) or [0]))], y, keep_border = False)
),
'œṖ': attrdict(
arity = 2,
call = lambda x, y: partition_at([int(t + 1 in iterable(x)) for t in range(max(iterable(x) or [0]))], y)
),
'œp': attrdict(
arity = 2,
call = lambda x, y: partition_at(x, y, border = 0)
),
'œṗ': attrdict(
arity = 2,
call = partition_at
),
'œr': attrdict(
arity = 2,
call = lambda x, y: trim(x, iterable(y), right = True)
),
'œS': attrdict(
arity = 2,
call = lambda x, y: time.sleep(overload((float, bool), y)) or x
),
'œs': attrdict(
arity = 2,
rdepth = 0,
call = lambda x, y: split_evenly(iterable(x, make_range = True), y)
),
'œṡ': attrdict(
arity = 2,
rdepth = 0,
call = split_once
),
'œṣ': attrdict(
arity = 2,
call = lambda x, y: jambify(split_around(iterable(x, make_digits = True), iterable(y)))
),
'œ:': attrdict(
arity = 2,
ldepth = 1,
call = lambda x, y: [y] + x + [y]
),
'œ&': attrdict(
arity = 2,
call = multiset_intersect
),
'œ-': attrdict(
arity = 2,
call = multiset_difference
),
'œ^': attrdict(
arity = 2,
call = multiset_symdif
),
'œ|': attrdict(
arity = 2,
call = multiset_union
),
'Ø0': attrdict(
arity = 0,
call = lambda: [0, 0]
),
'Ø1': attrdict(
arity = 0,
call = lambda: [1, 1]
),
'Ø2': attrdict(
arity = 0,
call = lambda: [2, 2]
),
'Ø.': attrdict(
arity = 0,
call = lambda: [0, 1]
),
'ؽ': attrdict(
arity = 0,
call = lambda: [1, 2]
),
'Ø+': attrdict(
arity = 0,
call = lambda: [1, -1]
),
'Ø-': attrdict(
arity = 0,
call = lambda: [-1, 1]
),
'ØỊ': attrdict(
arity = 0,
call = lambda: [-1, 0, 1]
),
'Øİ': attrdict(
arity = 0,
call = lambda: [1, 0, -1]
),
'Ø(': attrdict(
arity = 0,
call = lambda: list('()')
),
'Ø<': attrdict(
arity = 0,
call = lambda: list('<>')
),
'Ø[': attrdict(
arity = 0,
call = lambda: list('[]')
),
'Ø{': attrdict(
arity = | |
<reponame>YichenZhou113/Baxter-Teleoperation<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf8 -*-
#
# Hello World client in Python
# Connects REQ socket to tcp://localhost:5555
# Sends "Hello" to server, expects "World" back
#
import sys
import csv
import zmq
import time
from time import sleep
import numpy as np
from numpy import linalg as LA
import math
import argparse
import baxter_interface
import baxter_external_devices
from baxter_interface import CHECK_VERSION
import rospy
from rospy import Duration
"""setting the global variables"""
#left_current_
left_step = 0
left_dest_matrix = [[0] * 7 for i in range(500)]
right_dest_matrix = [[0] * 7 for i in range(100)]
joint_buffer = [0] * 14
left_joint_buffer = [0] * 7
right_joint_buffer = [0] * 7
left_amount_buffer = [0] * 7
right_amount_buffer = [0] * 7
left_dest_buffer = [0] * 7
right_dest_buffer = [0] * 7
mode_left = 'continuous mode'
mode_right = 'continuous mode' #set the global modes for each limb
ksphere_size = '1' #global size of how much further
plane = "m" #global plane choose
left_row_destination = {'left_s0':0, 'left_s1':0, 'left_e0':0, 'left_e1':0, 'left_w0':0, 'left_w1':0, 'left_w2':0}
right_row_destination = {'right_s0':0, 'right_s1':0, 'right_e0':0, 'right_e1':0, 'right_w0':0, 'right_w1':0, 'right_w2':0}
left_row_dictionary = {'left_s0':0, 'left_s1':0, 'left_e0':0, 'left_e1':0, 'left_w0':0, 'left_w1':0, 'left_w2':0}
right_row_dictionary = {'right_s0':0, 'right_s1':0, 'right_e0':0, 'right_e1':0, 'right_w0':0, 'right_w1':0, 'right_w2':0} #joint angles information for each limb
left_joints = ['left_s1', 'left_e1', 'left_w1']
right_joints = ['right_s1', 'right_e1', 'right_w1'] #joint selection for each limb
left_modes = ['continuous mode', 'direct mode']
right_modes = ['continuous mode', 'direct mode'] #mode selection for each limb
wanting_row = {} #stores the temporary row information
testcase = 'passed' #check for whether can reach specific position
left_count = 0
right_count = 0
def get_distance(distance_buffer):
return LA.norm(distance_buffer, ord=2)
def get_step(norm):
return 10*norm
#def get_time(step):
# return
def set_ksphere_size(size):
global ksphere_size
ksphere_size = size #set the extension size
def set_plane(level):
global plane
plane = level #set the plane level
def rotate(thelist):
"""
Rotates a list left.
@param l: the list
"""
if len(thelist):
temp = thelist[0]
thelist[:-1] = thelist[1:]
thelist[-1] = temp
print(thelist) #rotate the joints
def rotate_mode(thelist, limb):
"""
Rotates a list left.
@param l: the list
"""
global mode_left
global mode_right
if len(thelist):
temp = thelist[0]
thelist[:-1] = thelist[1:]
thelist[-1] = temp
if limb == 'left':
mode_left = thelist[0]
if limb == 'right':
mode_right = thelist[0] #rotate the modes for each limb
def set_j(cmd, limb, move):
global left_row_dictionary
global right_row_dictionary
global wanting_row
global testcase
global left_joints
global right_joints
global left_modes
global right_modes
global mode_left
global mode_right
global joint_buffer
global left_dest_buffer
global left_dest_matrix
global right_dest_matrix
global left_amount_buffer
global right_amount_buffer
global left_joint_buffer
global right_joint_buffer
global left_count
global right_count
global left_step
left = baxter_interface.Limb('left')
right = baxter_interface.Limb('right')
movement = plane + '_' + move
if limb == 'left':
joint = left_joints[0]
mode_left = left_modes[0]
global_limb = 'left'
if limb == 'right':
joint = right_joints[0]
mode_right = right_modes[0]
global_limb = 'right'
if joint == 'left_s1':
body_part = 'left_link123'
if joint == 'left_e1':
body_part = 'left_link23'
if joint == 'left_w1':
body_part = 'left_link3'
if joint == 'right_s1':
body_part = 'right_link123'
if joint == 'right_e1':
body_part = 'right_link23'
if joint == 'right_w1':
body_part = 'right_link3' #set the parameters to search in the csv file
print(joint)
csv_file = csv.reader(open('baxter_databank.csv', "rb"), delimiter=",")
for row in csv_file:
if movement == row[0] and body_part == row[1] and ksphere_size == row[3]:
wanting_row = row
testcase = 'passed' #store the row information
break
testcase = 'failed' #no such position in database
if testcase == 'passed':
if limb == 'left':
left_row_dictionary['left_s0'] = float(wanting_row[4])
left_row_dictionary['left_s1'] = float(wanting_row[5])
left_row_dictionary['left_e0'] = float(wanting_row[6])
left_row_dictionary['left_e1'] = float(wanting_row[7])
left_row_dictionary['left_w0'] = float(wanting_row[8])
left_row_dictionary['left_w1'] = float(wanting_row[9])
left_row_dictionary['left_w2'] = float(wanting_row[10])
"""for i in range(4,11):
joint_buffer.insert(i-4, float(wanting_row[i]))
#print(len(joint_buffer))
if len(joint_buffer) > 7:
joint_buffer = joint_buffer[:7]"""
left_dest_buffer.insert(0,left_row_dictionary['left_s0'])
left_dest_buffer.insert(1,left_row_dictionary['left_s1'])
left_dest_buffer.insert(2,left_row_dictionary['left_e0'])
left_dest_buffer.insert(3,left_row_dictionary['left_e1'])
left_dest_buffer.insert(4,left_row_dictionary['left_w0'])
left_dest_buffer.insert(5,left_row_dictionary['left_w1'])
left_dest_buffer.insert(6,left_row_dictionary['left_w2'])
#print('111111')
#print(left_dest_buffer)
print('222222')
left_joint_buffer.insert(0,left.joint_angles()['left_s0'])
left_joint_buffer.insert(1,left.joint_angles()['left_s1'])
left_joint_buffer.insert(2,left.joint_angles()['left_e0'])
left_joint_buffer.insert(3,left.joint_angles()['left_e1'])
left_joint_buffer.insert(4,left.joint_angles()['left_w0'])
left_joint_buffer.insert(5,left.joint_angles()['left_w1'])
left_joint_buffer.insert(6,left.joint_angles()['left_w2'])
if len(joint_buffer) > 7:
left_joint_buffer = left_joint_buffer[:7]
#print('333333')
#print(left_joint_buffer)
#joint_buffer = left_joint_buffer + right_joint_buffer
left_amount_buffer = [x - y for x, y in zip(left_dest_buffer, left_joint_buffer)]
left_norm = get_distance(left_amount_buffer)
print(left_norm)
left_step = int(get_step(left_norm))
print(left_step)
#print('444444')
#print(left_amount_buffer)
#left_amount_buffer = [x/100 for x in left_amount_buffer]
if left_step != 0:
left_amount_buffer = [x/left_step for x in left_amount_buffer]
left_dest_matrix = [[0] * 7 for i in range(left_step)]
#print('555555')
#print(left_amount_buffer)
for i in range(left_step):
for j in range(7):
left_dest_matrix[i][j] = float((i+1) * left_amount_buffer[j]) + float(left_joint_buffer[j])
print(left_dest_matrix)
left_count = 0
if limb == 'right':
right_row_dictionary['right_s0'] = float(wanting_row[12])
right_row_dictionary['right_s1'] = float(wanting_row[13])
right_row_dictionary['right_e0'] = float(wanting_row[14])
right_row_dictionary['right_e1'] = float(wanting_row[15])
right_row_dictionary['right_w0'] = float(wanting_row[16])
right_row_dictionary['right_w1'] = float(wanting_row[17])
right_row_dictionary['right_w2'] = float(wanting_row[18]) #fill in the dictionary for setting positions
"""for i in range(12,19):
joint_buffer.insert(i-12, float(wanting_row[i]))
if len(joint_buffer) > 7:
joint_buffer = joint_buffer[:7]"""
right_dest_buffer.insert(0,right_row_dictionary['right_s0'])
right_dest_buffer.insert(1,right_row_dictionary['right_s1'])
right_dest_buffer.insert(2,right_row_dictionary['right_e0'])
right_dest_buffer.insert(3,right_row_dictionary['right_e1'])
right_dest_buffer.insert(4,right_row_dictionary['right_w0'])
right_dest_buffer.insert(5,right_row_dictionary['right_w1'])
right_dest_buffer.insert(6,right_row_dictionary['right_w2'])
#print('111111')
#print(right_dest_buffer)
#print('222222')
right_joint_buffer.insert(0,right.joint_angles()['right_s0'])
right_joint_buffer.insert(1,right.joint_angles()['right_s1'])
right_joint_buffer.insert(2,right.joint_angles()['right_e0'])
right_joint_buffer.insert(3,right.joint_angles()['right_e1'])
right_joint_buffer.insert(4,right.joint_angles()['right_w0'])
right_joint_buffer.insert(5,right.joint_angles()['right_w1'])
right_joint_buffer.insert(6,right.joint_angles()['right_w2'])
if len(joint_buffer) > 7:
right_joint_buffer = right_joint_buffer[:7]
#print('333333')
#print(right_joint_buffer)
#joint_buffer = right_joint_buffer + right_joint_buffer
right_amount_buffer = [x - y for x, y in zip(right_dest_buffer, right_joint_buffer)]
#print('444444')
#print(right_amount_buffer)
right_amount_buffer = [x/100 for x in right_amount_buffer]
#print('555555')
#print(right_amount_buffer)
for i in range(100):
for j in range(7):
right_dest_matrix[i][j] = float(i * right_amount_buffer[j]) + float(right_joint_buffer[j])
#print(right_dest_matrix[i])
right_count = 0
if testcase == 'failed':
print('cannot reach that position')
def map_joystick(joystick):
global ksphere_size
global plane
global left_row_dictionary
global right_row_dictionary
global left_joints
global right_joints
global left_modes
global right_modes
global mode
global progress
global joint_buffer
global left_joint_buffer
global right_joint_buffer
global left_amount_buffer
global right_amount_buffer
global left_dest_buffer
global right_dest_buffer
global left_dest_matrix
global right_dest_matrix
global left_count
global right_count
global left_step
left = baxter_interface.Limb('left')
right = baxter_interface.Limb('right')
grip_left = baxter_interface.Gripper('left', CHECK_VERSION)
grip_right = baxter_interface.Gripper('right', CHECK_VERSION)
#abbreviations
jfor_right = lambda s1, s2: (joystick.stick_value(s1) < 0) and (joystick.stick_value(s2) > 0)
jfor_left = lambda s1, s2: (joystick.stick_value(s1) > 0) and (joystick.stick_value(s2) > 0)
jback_left = lambda s1, s2: (joystick.stick_value(s1) > 0) and (joystick.stick_value(s2) < 0)
jback_right = lambda s1, s2: (joystick.stick_value(s1) < 0) and (joystick.stick_value(s2) < 0)
jhigh = lambda s: joystick.stick_value(s) < 0
jlow = lambda s: joystick.stick_value(s) > 0
button_down = joystick.button_down
button_up = joystick.button_up #condition check functions
context = zmq.Context()
print("Connecting to hello world server")
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:5555")
def print_help(bindings_list):
print("Press Ctrl-C to quit.")
for bindings in bindings_list:
for (test, _cmd, doc) in bindings:
if callable(doc):
doc = doc()
print("%s: %s" % (str(test[1][0]), doc)) #instruction printout
"""bindings_list includes all the condition received from the xbox controller"""
bindings_list = []
bindings = (
((button_down, ['btnUp']),
(set_ksphere_size, ['3']), "set k_sphere size to 1"),
((button_down, ['btnRight']),
(set_ksphere_size, ['2']), "set k_sphere size to 2"),
((button_down, ['btnDown']),
(set_ksphere_size, ['1']), "set k_sphere size to 3"),
((button_down, ['rightTrigger']),
(set_j, [right_row_dictionary, 'right', 'centre']), "right arm neatral position"),
((button_down, ['leftTrigger']),
(set_j, [left_row_dictionary, 'left', 'centre']), "left arm neatral position"),
((button_down, ['dPadUp']),
(set_plane, ['h']), "choose high plane"),
((button_down, ['dPadRight']),
(set_plane, ['m']), "choose middle plane"),
((button_down, ['dPadLeft']),
(set_plane, ['m']), "choose middle plane"),
((button_down, ['dPadDown']),
(set_plane, ['d']), "choose low plane"),
((button_down, ['leftBumper']),
(rotate, [left_joints]), lambda: "left: cycle joint" + left_joints[0]),
((button_down, ['rightBumper']),
(rotate, [right_joints]), lambda: "right: cycle joint" + right_joints[0]),
((button_down, ['leftStickClick']),
(rotate_mode, [left_modes, 'left']), lambda: "left: mode change to " + left_modes[0]),
((button_down, ['rightStickClick']),
(rotate_mode, [right_modes, 'right']), lambda: "right: mode change to " + right_modes[0]),
((button_down, ['function1']),
(print_help, [bindings_list]), "help"),
((button_down, ['function2']),
(print_help, [bindings_list]), "help"),
((jlow, ['leftStickHorz']),
(set_j, [left_row_dictionary, 'left', 'left']), lambda: "left arm going left " + left_joints[0]),
((jhigh, ['leftStickHorz']),
(set_j, [left_row_dictionary, 'left', 'right']), lambda: "left arm going right " + left_joints[0]),
((jlow, ['rightStickHorz']),
(set_j, [right_row_dictionary, 'right', 'left']), lambda: "right arm going left " + right_joints[0]),
((jhigh, ['rightStickHorz']),
(set_j, [right_row_dictionary, 'right', 'right']), lambda: "right arm going right " + right_joints[0]),
((jhigh, ['leftStickVert']),
(set_j, [left_row_dictionary, 'left', 'back']), lambda: "left arm going back " + left_joints[0]),
((jlow, ['leftStickVert']),
(set_j, [left_row_dictionary, 'left', 'fore']), lambda: "left arm going forward " + left_joints[0]),
((jhigh, ['rightStickVert']),
(set_j, [right_row_dictionary, 'right', 'back']), lambda: "right arm going back " + right_joints[0]),
((jlow, ['rightStickVert']),
(set_j, [right_row_dictionary, 'right', 'fore']), lambda: "right arm going forward " + right_joints[0]),
((jfor_right, ['leftStickHorz', 'leftStickVert']),
(set_j, [left_row_dictionary, 'left', 'foreright']), lambda: "left arm forward right " + left_joints[0]),
((jfor_left, ['leftStickHorz', 'leftStickVert']),
(set_j, [left_row_dictionary, 'left', 'foreleft']), lambda: "left arm forward left " + left_joints[0]),
((jback_right, ['leftStickHorz', 'leftStickVert']),
(set_j, [left_row_dictionary, 'left', 'backright']), lambda: "left arm back right " + left_joints[0]),
((jback_left, ['leftStickHorz', 'leftStickVert']),
(set_j, [left_row_dictionary, 'left', 'backleft']), lambda: "left arm back left " + left_joints[0]),
((jfor_right, ['rightStickHorz', 'rightStickVert']),
(set_j, [right_row_dictionary, 'right', 'foreright']), lambda: "right arm forward right " + right_joints[0]),
((jfor_left, ['rightStickHorz', 'rightStickVert']),
(set_j, [right_row_dictionary, 'right', 'foreleft']), lambda: "right arm forward left " + right_joints[0]),
((jback_right, ['rightStickHorz', 'rightStickVert']),
(set_j, | |
RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifySubAppIdInfoRequest(AbstractModel):
"""ModifySubAppIdInfo请求参数结构体
"""
def __init__(self):
"""
:param SubAppId: 子应用 ID。
:type SubAppId: int
:param Name: 子应用名称,长度限制:40个字符。
:type Name: str
:param Description: 子应用简介,长度限制: 300个字符。
:type Description: str
"""
self.SubAppId = None
self.Name = None
self.Description = None
def _deserialize(self, params):
self.SubAppId = params.get("SubAppId")
self.Name = params.get("Name")
self.Description = params.get("Description")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifySubAppIdInfoResponse(AbstractModel):
"""ModifySubAppIdInfo返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifySubAppIdStatusRequest(AbstractModel):
"""ModifySubAppIdStatus请求参数结构体
"""
def __init__(self):
"""
:param SubAppId: 子应用 ID。
:type SubAppId: int
:param Status: 子应用状态,取值范围:
<li>On:启用。</li>
<li>Off:停用。</li>
<li>Destroyed:销毁。</li>
当前状态如果是 Destoying ,不能进行启用操作,需要等待销毁完成后才能重新启用。
:type Status: str
"""
self.SubAppId = None
self.Status = None
def _deserialize(self, params):
self.SubAppId = params.get("SubAppId")
self.Status = params.get("Status")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifySubAppIdStatusResponse(AbstractModel):
"""ModifySubAppIdStatus返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifySuperPlayerConfigRequest(AbstractModel):
"""ModifySuperPlayerConfig请求参数结构体
"""
def __init__(self):
"""
:param Name: 播放器配置名称。
:type Name: str
:param DrmSwitch: 播放 DRM 保护的自适应码流开关:
<li>ON:开启,表示仅播放 DRM 保护的自适应码流输出;</li>
<li>OFF:关闭,表示播放未加密的自适应码流输出。</li>
:type DrmSwitch: str
:param AdaptiveDynamicStreamingDefinition: 允许输出的未加密的自适应码流模板 ID。
:type AdaptiveDynamicStreamingDefinition: int
:param DrmStreamingsInfo: 允许输出的 DRM 自适应码流模板内容。
:type DrmStreamingsInfo: :class:`tencentcloud.vod.v20180717.models.DrmStreamingsInfoForUpdate`
:param ImageSpriteDefinition: 允许输出的雪碧图模板 ID。
:type ImageSpriteDefinition: int
:param ResolutionNames: 播放器对不于不同分辨率的子流展示名字。
:type ResolutionNames: list of ResolutionNameInfo
:param Domain: 播放时使用的域名。填 Default 表示使用[默认分发配置](https://cloud.tencent.com/document/product/266/33373)中的域名。
:type Domain: str
:param Scheme: 播放时使用的 Scheme。取值范围:
<li>Default:使用[默认分发配置](https://cloud.tencent.com/document/product/266/33373)中的 Scheme;</li>
<li>HTTP;</li>
<li>HTTPS。</li>
:type Scheme: str
:param Comment: 模板描述信息,长度限制:256 个字符。
:type Comment: str
:param SubAppId: 点播[子应用](/document/product/266/14574) ID。如果要访问子应用中的资源,则将该字段填写为子应用 ID;否则无需填写该字段。
:type SubAppId: int
"""
self.Name = None
self.DrmSwitch = None
self.AdaptiveDynamicStreamingDefinition = None
self.DrmStreamingsInfo = None
self.ImageSpriteDefinition = None
self.ResolutionNames = None
self.Domain = None
self.Scheme = None
self.Comment = None
self.SubAppId = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.DrmSwitch = params.get("DrmSwitch")
self.AdaptiveDynamicStreamingDefinition = params.get("AdaptiveDynamicStreamingDefinition")
if params.get("DrmStreamingsInfo") is not None:
self.DrmStreamingsInfo = DrmStreamingsInfoForUpdate()
self.DrmStreamingsInfo._deserialize(params.get("DrmStreamingsInfo"))
self.ImageSpriteDefinition = params.get("ImageSpriteDefinition")
if params.get("ResolutionNames") is not None:
self.ResolutionNames = []
for item in params.get("ResolutionNames"):
obj = ResolutionNameInfo()
obj._deserialize(item)
self.ResolutionNames.append(obj)
self.Domain = params.get("Domain")
self.Scheme = params.get("Scheme")
self.Comment = params.get("Comment")
self.SubAppId = params.get("SubAppId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifySuperPlayerConfigResponse(AbstractModel):
"""ModifySuperPlayerConfig返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyTranscodeTemplateRequest(AbstractModel):
"""ModifyTranscodeTemplate请求参数结构体
"""
def __init__(self):
"""
:param Definition: 转码模板唯一标识。
:type Definition: int
:param Container: 封装格式,可选值:mp4、flv、hls、mp3、flac、ogg、m4a。其中,mp3、flac、ogg、m4a 为纯音频文件。
:type Container: str
:param Name: 转码模板名称,长度限制:64 个字符。
:type Name: str
:param Comment: 模板描述信息,长度限制:256 个字符。
:type Comment: str
:param RemoveVideo: 是否去除视频数据,可选值:
<li>0:保留</li>
<li>1:去除</li>
:type RemoveVideo: int
:param RemoveAudio: 是否去除音频数据,可选值:
<li>0:保留</li>
<li>1:去除</li>
:type RemoveAudio: int
:param VideoTemplate: 视频流配置参数。
:type VideoTemplate: :class:`tencentcloud.vod.v20180717.models.VideoTemplateInfoForUpdate`
:param AudioTemplate: 音频流配置参数。
:type AudioTemplate: :class:`tencentcloud.vod.v20180717.models.AudioTemplateInfoForUpdate`
:param TEHDConfig: 极速高清转码参数。
:type TEHDConfig: :class:`tencentcloud.vod.v20180717.models.TEHDConfigForUpdate`
:param SubAppId: 点播[子应用](/document/product/266/14574) ID。如果要访问子应用中的资源,则将该字段填写为子应用 ID;否则无需填写该字段。
:type SubAppId: int
"""
self.Definition = None
self.Container = None
self.Name = None
self.Comment = None
self.RemoveVideo = None
self.RemoveAudio = None
self.VideoTemplate = None
self.AudioTemplate = None
self.TEHDConfig = None
self.SubAppId = None
def _deserialize(self, params):
self.Definition = params.get("Definition")
self.Container = params.get("Container")
self.Name = params.get("Name")
self.Comment = params.get("Comment")
self.RemoveVideo = params.get("RemoveVideo")
self.RemoveAudio = params.get("RemoveAudio")
if params.get("VideoTemplate") is not None:
self.VideoTemplate = VideoTemplateInfoForUpdate()
self.VideoTemplate._deserialize(params.get("VideoTemplate"))
if params.get("AudioTemplate") is not None:
self.AudioTemplate = AudioTemplateInfoForUpdate()
self.AudioTemplate._deserialize(params.get("AudioTemplate"))
if params.get("TEHDConfig") is not None:
self.TEHDConfig = TEHDConfigForUpdate()
self.TEHDConfig._deserialize(params.get("TEHDConfig"))
self.SubAppId = params.get("SubAppId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyTranscodeTemplateResponse(AbstractModel):
"""ModifyTranscodeTemplate返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyWatermarkTemplateRequest(AbstractModel):
"""ModifyWatermarkTemplate请求参数结构体
"""
def __init__(self):
"""
:param Definition: 水印模板唯一标识。
:type Definition: int
:param Name: 水印模板名称,长度限制:64 个字符。
:type Name: str
:param Comment: 模板描述信息,长度限制:256 个字符。
:type Comment: str
:param CoordinateOrigin: 原点位置,可选值:
<li>TopLeft:表示坐标原点位于视频图像左上角,水印原点为图片或文字的左上角;</li>
<li>TopRight:表示坐标原点位于视频图像的右上角,水印原点为图片或文字的右上角;</li>
<li>BottomLeft:表示坐标原点位于视频图像的左下角,水印原点为图片或文字的左下角;</li>
<li>BottomRight:表示坐标原点位于视频图像的右下角,水印原点为图片或文字的右下角。</li>
:type CoordinateOrigin: str
:param XPos: 水印原点距离视频图像坐标原点的水平位置。支持 %、px 两种格式:
<li>当字符串以 % 结尾,表示水印 XPos 为视频宽度指定百分比,如 10% 表示 XPos 为视频宽度的 10%;</li>
<li>当字符串以 px 结尾,表示水印 XPos 为指定像素,如 100px 表示 XPos 为 100 像素。</li>
:type XPos: str
:param YPos: 水印原点距离视频图像坐标原点的垂直位置。支持 %、px 两种格式:
<li>当字符串以 % 结尾,表示水印 YPos 为视频高度指定百分比,如 10% 表示 YPos 为视频高度的 10%;</li>
<li>当字符串以 px 结尾,表示水印 YPos 为指定像素,如 100px 表示 YPos 为 100 像素。</li>
:type YPos: str
:param ImageTemplate: 图片水印模板,该字段仅对图片水印模板有效。
:type ImageTemplate: :class:`tencentcloud.vod.v20180717.models.ImageWatermarkInputForUpdate`
:param TextTemplate: 文字水印模板,该字段仅对文字水印模板有效。
:type TextTemplate: :class:`tencentcloud.vod.v20180717.models.TextWatermarkTemplateInputForUpdate`
:param SvgTemplate: SVG 水印模板,该字段仅对 SVG 水印模板有效。
:type SvgTemplate: :class:`tencentcloud.vod.v20180717.models.SvgWatermarkInputForUpdate`
:param SubAppId: 点播[子应用](/document/product/266/14574) ID。如果要访问子应用中的资源,则将该字段填写为子应用 ID;否则无需填写该字段。
:type SubAppId: int
"""
self.Definition = None
self.Name = None
self.Comment = None
self.CoordinateOrigin = None
self.XPos = None
self.YPos = None
self.ImageTemplate = None
self.TextTemplate = None
self.SvgTemplate = None
self.SubAppId = None
def _deserialize(self, params):
self.Definition = params.get("Definition")
self.Name = params.get("Name")
self.Comment = params.get("Comment")
self.CoordinateOrigin = params.get("CoordinateOrigin")
self.XPos = params.get("XPos")
self.YPos = params.get("YPos")
if params.get("ImageTemplate") is not None:
self.ImageTemplate = ImageWatermarkInputForUpdate()
self.ImageTemplate._deserialize(params.get("ImageTemplate"))
if params.get("TextTemplate") is not None:
self.TextTemplate = TextWatermarkTemplateInputForUpdate()
self.TextTemplate._deserialize(params.get("TextTemplate"))
if params.get("SvgTemplate") is not None:
self.SvgTemplate = SvgWatermarkInputForUpdate()
self.SvgTemplate._deserialize(params.get("SvgTemplate"))
self.SubAppId = params.get("SubAppId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyWatermarkTemplateResponse(AbstractModel):
"""ModifyWatermarkTemplate返回参数结构体
"""
def __init__(self):
"""
:param ImageUrl: 图片水印地址,仅当 ImageTemplate.ImageContent 非空,该字段有值。
:type ImageUrl: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ImageUrl = None
self.RequestId = None
def _deserialize(self, params):
self.ImageUrl = params.get("ImageUrl")
self.RequestId = params.get("RequestId")
class ModifyWordSampleRequest(AbstractModel):
"""ModifyWordSample请求参数结构体
"""
def __init__(self):
"""
:param Keyword: 关键词,长度限制:128 个字符。
:type Keyword: str
:param Usages: <b>关键词应用场景,可选值:</b>
1. Recognition.Ocr:通过光学字符识别技术,进行内容识别;
2. Recognition.Asr:通过音频识别技术,进行内容识别;
3. Review.Ocr:通过光学字符识别技术,进行不适宜的内容识别;
4. Review.Asr:通过音频识别技术,进行不适宜的内容识别;
<b>可合并简写为:</b>
5. Recognition:通过光学字符识别技术、音频识别技术,进行内容识别,等价于 1+2;
6. Review:通过光学字符识别技术、音频识别技术,进行不适宜的内容识别,等价于 3+4;
7. All:包含以上全部,等价于 1+2+3+4。
:type Usages: list of str
:param TagOperationInfo: 标签操作信息。
:type TagOperationInfo: :class:`tencentcloud.vod.v20180717.models.AiSampleTagOperation`
:param SubAppId: 点播[子应用](/document/product/266/14574) ID。如果要访问子应用中的资源,则将该字段填写为子应用 ID;否则无需填写该字段。
:type SubAppId: int
"""
self.Keyword = None
self.Usages = None
self.TagOperationInfo = None
self.SubAppId = None
def _deserialize(self, params):
self.Keyword = params.get("Keyword")
self.Usages = params.get("Usages")
if params.get("TagOperationInfo") is not None:
self.TagOperationInfo = AiSampleTagOperation()
self.TagOperationInfo._deserialize(params.get("TagOperationInfo"))
self.SubAppId = params.get("SubAppId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyWordSampleResponse(AbstractModel):
"""ModifyWordSample返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class MosaicInput(AbstractModel):
"""视频处理任务中的马赛克参数类型
"""
def __init__(self):
"""
:param CoordinateOrigin: 原点位置,目前仅支持:
<li>TopLeft:表示坐标原点位于视频图像左上角,马赛克原点为图片或文字的左上角。</li>
默认值:TopLeft。
:type CoordinateOrigin: str
:param XPos: 马赛克原点距离视频图像坐标原点的水平位置。支持 %、px 两种格式:
<li>当字符串以 % 结尾,表示马赛克 XPos 为视频宽度指定百分比,如 10% 表示 XPos 为视频宽度的 10%;</li>
<li>当字符串以 px 结尾,表示马赛克 XPos 为指定像素,如 100px 表示 XPos 为 100 像素。</li>
默认值:0px。
:type XPos: str
:param YPos: 马赛克原点距离视频图像坐标原点的垂直位置。支持 %、px 两种格式:
<li>当字符串以 % 结尾,表示马赛克 YPos 为视频高度指定百分比,如 10% 表示 YPos 为视频高度的 10%;</li>
<li>当字符串以 px 结尾,表示马赛克 YPos 为指定像素,如 100px 表示 YPos 为 100 像素。</li>
默认值:0px。
:type YPos: str
:param Width: 马赛克的宽度。支持 %、px 两种格式:
<li>当字符串以 % 结尾,表示马赛克 Width 为视频宽度的百分比大小,如 10% 表示 Width 为视频宽度的 10%;</li>
<li>当字符串以 px 结尾,表示马赛克 Width 单位为像素,如 100px 表示 Width 为 100 像素。</li>
默认值:10%。
:type Width: str
:param Height: 马赛克的高度。支持 %、px 两种格式:
<li>当字符串以 % 结尾,表示马赛克 Height 为视频高度的百分比大小,如 10% 表示 Height 为视频高度的 10%;</li>
<li>当字符串以 px 结尾,表示马赛克 Height 单位为像素,如 100px 表示 Height 为 100 像素。</li>
默认值:10%。
:type Height: str
:param StartTimeOffset: 马赛克的起始时间偏移,单位:秒。不填或填0,表示马赛克从画面出现时开始显现。
<li>不填或填0,表示马赛克从画面开始就出现;</li>
<li>当数值大于0时(假设为 n),表示马赛克从画面开始的第 n 秒出现;</li>
<li>当数值小于0时(假设为 -n),表示马赛克从离画面结束 n 秒前开始出现。</li>
:type StartTimeOffset: float
:param EndTimeOffset: 马赛克的结束时间偏移,单位:秒。
<li>不填或填0,表示马赛克持续到画面结束;</li>
<li>当数值大于0时(假设为 n),表示马赛克持续到第 n 秒时消失;</li>
<li>当数值小于0时(假设为 -n),表示马赛克持续到离画面结束 n 秒前消失。</li>
:type EndTimeOffset: float
"""
self.CoordinateOrigin = None
self.XPos = None
self.YPos = None
self.Width = None
self.Height = None
self.StartTimeOffset = None
self.EndTimeOffset = None
def _deserialize(self, params):
self.CoordinateOrigin = params.get("CoordinateOrigin")
self.XPos = params.get("XPos")
self.YPos = params.get("YPos")
self.Width = params.get("Width")
self.Height = params.get("Height")
self.StartTimeOffset = params.get("StartTimeOffset")
self.EndTimeOffset = params.get("EndTimeOffset")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ObjectConfigureInfo(AbstractModel):
"""物体识别任务控制参数
"""
def __init__(self):
"""
:param Switch: 物体识别任务开关,可选值:
<li>ON:开启智能物体识别任务;</li>
<li>OFF:关闭智能物体识别任务。</li>
:type Switch: str
:param ObjectLibrary: 物体库选择,可选值:
<li>Default:使用默认物体库;</li>
<li>UserDefine:使用用户自定义物体库。</li>
<li>All:同时使用默认物体库和用户自定义物体库。</li>
默认值: All,同时使用默认物体库和用户自定义物体库。
:type ObjectLibrary: str
"""
self.Switch = None
self.ObjectLibrary = None
def _deserialize(self, params):
self.Switch = params.get("Switch")
self.ObjectLibrary = params.get("ObjectLibrary")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ObjectConfigureInfoForUpdate(AbstractModel):
"""物体识别任务控制参数
"""
def __init__(self):
"""
:param Switch: 物体识别任务开关,可选值:
<li>ON:开启智能物体识别任务;</li>
<li>OFF:关闭智能物体识别任务。</li>
:type Switch: str
| |
def __hash__(self):
"""
Calculate a hash considering eventually associated objects.
"""
if self._hash is not None:
return self._hash # Return cached hash.
else:
if self.obj is None: # Anonymous symbol.
myhash = id(self)
else: # Hash of associated object.
myhash = hash(self.obj)
self._hash = myhash
return myhash
def __eq__(self, other):
"""
Test if other element equals to this symbol.
"""
if self is other:
return True
if not isinstance(other, self.__class__):
return NotImplemented
if self.obj is None or other.obj is None:
return False
else:
return self.obj == other.obj
def __lt__(self, other):
cmp = Expression.__lt__(self, other)
if cmp is not NotImplemented:
return cmp
if isinstance(other, Symbol):
if self.obj is None:
if other.obj is None:
return hash(self) < hash(other) # 2 anonymous symbols.
else:
return False # Anonymous-Symbol < Named-Symbol.
else:
if other.obj is None:
return True # Named-Symbol < Anonymous-Symbol.
else:
return self.obj.__lt__(other.obj) # 2 named symbols.
return NotImplemented
def __str__(self):
if self.obj is None:
return "S<%s>" % str(hash(self))
else:
return str(self.obj)
def __repr__(self):
if self.obj is not None:
obj = repr(self.obj)
else:
obj = hash(self)
return "%s(%s)" % (self.__class__.__name__, obj)
class Function(Expression):
"""
Boolean function.
A boolean function takes n boolean expressions as arguments (n is called
the order of the function) and maps them to one of the base elements.
Typical examples for implemented functions are AND and OR.
"""
# Specifies how many arguments a function takes. the first number gives a
# lower limit, the second an upper limit.
order = (2, float("inf"))
# Specifies an infix notation of an operator for printing.
operator = None
def __new__(cls, *args, eval=True):
length = len(args)
order = cls.order
if eval:
return cls(*args, eval=False).eval()
if order[0] > length:
raise TypeError("Too few arguments. Got %s, but need at least %s."\
% (length, order[0]))
if order[1] < length:
raise TypeError("Too many arguments. Got %s, but need at most %s."\
% (length, order[1]))
return object.__new__(cls)
def __init__(self, *args, eval=True):
# If a function in the __new__ method is evaluated the __init__ method
# will be called twice. First with the simplified then with original
# arguments. The following "if" prevents that the simplified ones are
# overwritten.
if self._args:
return
_args = [None]*len(args)
# Make sure all arguments are boolean expressions.
for i, arg in enumerate(args):
if isinstance(arg, Expression):
_args[i] = arg
elif isinstance(arg, str):
_args[i] = parse(arg)
elif arg in (0, False):
_args[i] = FALSE
elif arg in (1, True):
_args[i] = TRUE
else:
raise TypeError("Bad argument: %s" % arg)
self._args = tuple(_args)
def __str__(self):
args = self.args
if self.operator is None:
return "%s(%s)" % (self.__class__.__name__,
", ".join(str(arg) for arg in args))
elif len(args) == 1:
if self.isliteral:
return self.operator + str(args[0])
else:
return "%s(%s)" % (self.operator, str(args[0]))
else:
args = (str(arg) if arg.isliteral or arg.order == (1, 1) else "(%s)" % arg for arg in args)
return self.operator.join(args)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__,
", ".join(repr(arg) for arg in self.args))
class NOT(Function):
"""
Boolean NOT operation.
The NOT operation takes exactly one argument. If this argument is a Symbol
the resulting expression is also called a literal.
The operator "~" can be used as abbrevation for NOT, e.g. instead of
NOT(x) one can write ~x (where x is some boolean expression). Also for
printing "~" is used for better readability.
"""
order = (1, 1)
operator = "~"
@property
def isliteral(self):
"""
Return True if object is a literal otherwise False.
"""
if isinstance(self.args[0], Symbol):
return True
else:
return False
def literalize(self):
"""
Return an expression where NOTs are only occuring as literals.
"""
if self.isliteral:
return self
expr = self.demorgan()
if isinstance(expr, self.__class__):
return expr
return expr.literalize()
def eval(self, **evalkwargs):
"""
Return a simplified term in canonical form.
This means double negations are canceled out and all contained boolean
objects are in their canonical form.
"""
if self.iscanonical:
return self
term = self.literalize()
if not isinstance(term, self.__class__):
return term.eval()
elif term.args[0] in self.algebra.domain:
return term.args[0].dual
else:
expr = self.__class__(term.args[0].eval(**evalkwargs),
eval=False)
expr._iscanonical = True
return expr
def cancel(self):
"""
Cancel itself and following NOTs as far as possible.
Returns the simplified expression.
"""
term = self
while True:
arg = term.args[0]
if not isinstance(arg, self.__class__):
return term
term = arg.args[0]
if not isinstance(term, self.__class__):
return term
def demorgan(self):
"""
Return a term where the NOT function is moved inward.
This is achieved by canceling double NOTs and using de Morgan laws.
"""
term = self.cancel()
if term.isliteral or\
not isinstance(term.args[0], self.algebra.operations):
return term
op = term.args[0]
return op.dual(*tuple(self.__class__(arg, eval=False).cancel()\
for arg in op.args), eval=False)
def __lt__(self, other):
if self.args[0] == other:
return False
return self.args[0].__lt__(other)
class DualBase(Function):
"""
Base class for AND and OR function.
This class uses the duality principle to combine similar methods of AND
and OR. Both operations take 2 or more arguments and can be created using
"+" for OR and "*" for AND.
"""
# Specifies the identity element for the specific operation. (TRUE for
# AND and FALSE for OR).
_identity = None
@property
def identity(self):
"""
Return the identity element for this function.
This will be TRUE for the AND operation and FALSE for the OR operation.
"""
return BaseElement(self._identity)
@property
def annihilator(self):
"""
Return the annihilator element for this function.
This will be FALSE for the AND operation and TRUE for the OR operation.
"""
return BaseElement(not self._identity)
@property
def dual(self):
"""
Return the dual class of this function.
This means OR.getdual() returns AND and AND.getdual() returns OR.
This is just a convenient shortcut for getdual()
"""
return self.getdual()
@classmethod
def getdual(cls):
"""
Return the dual class of this function.
This means OR.getdual() returns AND and AND.getdual() returns OR.
"""
ops = cls.algebra.operations
if issubclass(cls, ops.OR):
return ops.AND
elif issubclass(cls, ops.AND):
return ops.OR
else:
raise AttributeError("Class must be in algebra.operations.")
def __contains__(self, expr):
"""
Tests if expr is a subterm.
"""
if expr in self.args:
return True
if isinstance(expr, self.__class__):
if all(arg in self.args for arg in expr.args):
return True
return False
def eval(self, **evalkwargs):
"""
Return a simplified expression in canonical form.
For simplification of AND and OR following rules are used
recursively bottom up:
- Idempotence
- Commutivity (output is always sorted)
- Associativity (output doesn't contain same operations nested)
- Annihilation
- Identity
- Complementation
- Absorption
Other boolean objects are also in their canonical form.
"""
# TODO: Refactor DualBase.eval into different "sub-evals".
# If self is already canonical do nothing.
if self.iscanonical:
return self
ops = self.algebra.operations
# Otherwise bring arguments into canonical form.
args = tuple(arg.eval() for arg in self.args)
# Create new instance of own class with canonical args. "eval" has to
# be set False - otherwise infinite recursion!
# TODO: Only create new class if some args changed.
term = self.__class__(*args, eval=False)
# Associativity:
# (A * B) * C = A * (B * C) = A * B * C
# (A + B) + C = A + (B + C) = A + B + C
term = term.flatten()
# Annihilation: A * 0 = 0, A + 1 = 1
if self.annihilator in term.args:
return self.annihilator
# Idempotence: A * A = A, A + A = A
args = []
for arg in term.args:
if arg not in args:
args.append(arg)
if len(args) == 1:
return args[0]
# Identity: A * 1 = A, A + 0 = A
if self.identity in args:
args.remove(self.identity)
if len(args) == 1:
return args[0]
# Complementation: A * ~A = 0, A + ~A = 1
for arg in args:
if ops.NOT(arg) in args:
return self.annihilator
# Elemination: (A * B) + (A * ~B) = A, (A + B) * (A + ~B) = A
i = 0
while i < len(args)-1:
j = i + 1
ai = args[i]
if not isinstance(ai, self.dual):
i += | |
#
# Modified by <NAME>
# Contact: <EMAIL>
#
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import numpy as np
import torch
import torchvision.transforms.functional as F
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
import dqrf.utils.ch_transform as CH_T
from detectron2.structures import Boxes, Instances, BoxMode
from dqrf.utils.utils import ImageMeta, FileSystemPILReader
from fvcore.transforms.transform import Transform
from typing import Dict
# from . import detection_utils as utils
# from . import transforms as T
from fvcore.common.file_io import PathManager
from PIL import Image
__all__ = ["DqrfDatasetMapper"]
# class ChAugInput(T.AugInput):
# def __init__(self, image: np.ndarray, target: Dict):
# self.image = image
# self.targets = target
#
# def transform(self, tfm: Transform) -> None:
# self.image = tfm.apply_image(self.image, self.targets)
# self.targets = tfm.apply_box(self.image, self.targets)
def build_transform_gen(cfg, is_train):
"""
Create a list of :class:`TransformGen` from config.
Returns:
list[TransformGen]
"""
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
if sample_style == "range":
assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size))
logger = logging.getLogger("detectron2.data.dataset_mapper")
tfm_gens = []
if is_train:
tfm_gens.append(T.RandomFlip())
tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style))
if is_train:
logger.info("TransformGens used in training: " + str(tfm_gens))
return tfm_gens
# Modified from DETR (https://github.com/facebookresearch/detr)
class DqrfDatasetMapper:
"""
A callable which takes a datasets dict in Detectron2 Dataset format,
and map it into a format used by DETR.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
def __init__(self, cfg, is_train=True):
if cfg.INPUT.CROP.ENABLED and is_train:
self.crop_gen = [
T.ResizeShortestEdge([400, 500, 600], sample_style="choice"),
T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE),
]
else:
self.crop_gen = None
self.tfm_gens = build_transform_gen(cfg, is_train)
logging.getLogger("detectron2.data.dataset_mapper").info(
"Full TransformGens used in training: {}, crop: {}".format(str(self.tfm_gens), str(self.crop_gen))
)
self.img_format = cfg.INPUT.FORMAT
self.is_train = is_train
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. Contains height and width already
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format) # doesn't / 255
utils.check_image_size(dataset_dict, image)
if self.crop_gen is None:
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
else:
if np.random.rand() > 0.5: # horizontal flip + resize
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
else: # horizontal flip + cropping
image, transforms = T.apply_transform_gens(
self.tfm_gens[:-1] + self.crop_gen + self.tfm_gens[-1:], image
)
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if not self.is_train:
# not used during eval, since evaluator will load GTs
dataset_dict.pop("annotations", None)
return dataset_dict
if "annotations" in dataset_dict:
# USER: Modify this if you want to keep them for some reason.
for anno in dataset_dict["annotations"]:
anno.pop("segmentation", None)
anno.pop("keypoints", None)
# USER: Implement additional transformations if you have other types of data
annos = [
# this clips box to image size, hence can't be used for crowd human
utils.transform_instance_annotations(obj, transforms, image_shape)
for obj in dataset_dict.pop("annotations") # pop returns
if obj.get("iscrowd", 0) == 0
]
# output box is XYXY_ABS
instances = utils.annotations_to_instances(annos, image_shape)
dataset_dict["instances"] = utils.filter_empty_instances(instances)
return dataset_dict
def make_transforms(cfg, is_train):
normalize = CH_T.Compose([
CH_T.ToTensor(), # this func will /255
CH_T.Normalize()
])
if is_train:
scales = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
crop_size = cfg.INPUT.CROP.SIZE
# scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800]
return CH_T.Compose([
CH_T.RandomHorizontalFlip(),
CH_T.RandomSelect(
CH_T.RandomResize(scales, max_size=max_size),
CH_T.Compose([
CH_T.RandomResize([400, 500, 600]),
CH_T.RandomCropCH(crop_size[0], crop_size[1]),
CH_T.RandomResize(scales, max_size=max_size),
])
),
normalize
])
if not is_train:
scales = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
return CH_T.Compose([
CH_T.RandomResize([scales], max_size=max_size),
normalize
])
raise ValueError(f'unknown {is_train}')
class CH_DqrfDatasetMapper:
"""
A callable which takes a datasets dict in Detectron2 Dataset format,
and map it into a format used by DETR.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
def __init__(self, cfg, is_train=True):
self._transform = make_transforms(cfg, is_train)
self.image_read = FileSystemPILReader()
logging.getLogger("detectron2.data.dataset_mapper").info(
"Full TransformGens used in training: {}".format(str(self._transform))
)
self.img_format = cfg.INPUT.FORMAT
self.is_train = is_train
@staticmethod
def _fake_zero_data(*size):
return torch.zeros(size)
@staticmethod
def get_image_size(img):
""" return image size in (h, w) format
"""
w, h = img.size
return h, w
def __call__(self, dataset_dict):
"""
DO NOT CLAMP FULLBOX
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # avoid modify our original list[dict]
dataset_dict = ImageMeta.decode(dataset_dict)
filename = dataset_dict["file_name"]
gt_bboxes = []
gt_bboxes_v = []
ig_bboxes = []
classes = []
areas = []
# bbox_mode = []
for instance in dataset_dict['annotations']: # data[-1] is instances
if not instance['is_ignored']: # instance[-1] is `is_ignored`
assert instance['category_id'] > 0, '{} has invalid label {}'.format(
filename, instance['category_id'])
gt_bboxes.append(instance['bbox'] )
gt_bboxes_v.append(instance['vbbox'] )
classes.append(instance['category_id'])
areas.append(instance['area'])
else:
ig_bboxes.append(instance['bbox'])
# bbox_mode.append(instance['bbox_mode'])
if len(ig_bboxes) == 0:
ig_bboxes = self._fake_zero_data(1, 4)
if len(gt_bboxes) == 0:
gt_bboxes = self._fake_zero_data(1, 4)
if len(gt_bboxes_v) == 0:
gt_bboxes_v = self._fake_zero_data(1, 4)
image = self.image_read(filename)
image_shape = self.get_image_size(image) # h, w
dataset_dict["height"] = image_shape[0]
dataset_dict["width"] = image_shape[1]
boxes = torch.as_tensor(gt_bboxes, dtype=torch.float32).reshape(-1, 4)
vboxes = torch.as_tensor(gt_bboxes_v, dtype=torch.float32).reshape(-1, 4)
vboxes[:, 0::2].clamp_(min=0, max=image_shape[1])
vboxes[:, 1::2].clamp_(min=0, max=image_shape[0])
ig_bboxes = torch.as_tensor(ig_bboxes, dtype=torch.float32)
classes = torch.as_tensor(classes, dtype=torch.int64)
areas = torch.as_tensor(areas, dtype=torch.float32)
# bbox_mode = torch.as_tensor(bbox_mode, dtype=torch.int64)
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) & (vboxes[:, 3] > vboxes[:, 1]) & (
vboxes[:, 2] > vboxes[:, 0])
boxes = boxes[keep]
vboxes = vboxes[keep]
classes = classes[keep]
areas = areas[keep]
target = {"boxes": boxes, "vboxes": vboxes,
"iboxes": ig_bboxes, "labels": classes,
'area': areas}
image, target = self._transform(image, target)
dataset_dict["image"] = image
if not self.is_train:
# not used during eval, since evaluator will load GTs
dataset_dict.pop("annotations", None)
return dataset_dict
if "annotations" in dataset_dict:
dataset_dict.pop("annotations", None)
dataset_dict["instances"] = target
return dataset_dict
class CH_DqrfDatasetTestMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by the model.
This is the default callable to be used to map your dataset dict into training data.
You may need to follow it to implement your own one for customized logic,
such as a different way to read or transform images.
See :doc:`/tutorials/data_loading` for details.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies cropping/geometric transforms to the image and annotations
3. Prepare data and annotations to Tensor and :class:`Instances`
"""
def __init__(self, cfg, is_train=True):
if cfg.INPUT.CROP.ENABLED and is_train:
self.crop_gen = T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)
logging.getLogger(__name__).info("CropGen used in training: " + str(self.crop_gen))
else:
self.crop_gen = None
self.tfm_gens = utils.build_transform_gen(cfg, is_train)
# fmt: off
self.img_format = cfg.INPUT.FORMAT
self.mask_on = cfg.MODEL.MASK_ON
self.mask_format = cfg.INPUT.MASK_FORMAT
self.keypoint_on = cfg.MODEL.KEYPOINT_ON
self.load_proposals = cfg.MODEL.LOAD_PROPOSALS
self.allow_oob = cfg.MODEL.ALLOW_BOX_OUT_OF_BOUNDARY
# fmt: on
if self.keypoint_on and is_train:
# Flip only makes sense in training
self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
else:
self.keypoint_hflip_indices = None
if self.load_proposals:
self.min_box_side_len = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE
self.proposal_topk = (
cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
if is_train
else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
)
self.is_train = is_train
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
# USER: Write your own image loading if it's not from a file
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
if "annotations" not in dataset_dict:
image, transforms = T.apply_transform_gens(
([self.crop_gen] if self.crop_gen else []) + self.tfm_gens, image
)
else:
# Crop around an instance if there are instances in the image.
# USER: Remove if you don't use cropping
if self.crop_gen:
crop_tfm = utils.gen_crop_transform_with_instance(
self.crop_gen.get_crop_size(image.shape[:2]),
image.shape[:2],
np.random.choice(dataset_dict["annotations"]),
)
image = crop_tfm.apply_image(image)
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
if self.crop_gen:
transforms = crop_tfm + transforms
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient | |
search_output['aur'].items():
res.new.append(self.aur_mapper.map_api_data(apidata, None, self.categories))
res.update_total()
return res
def _fill_aur_pkgs(self, aur_pkgs: dict, output: List[ArchPackage], disk_loader: DiskCacheLoader, internet_available: bool,
arch_config: dict, rebuild_check: Optional[Thread], rebuild_ignored: Optional[Thread], rebuild_output: Optional[Dict[str, Set[str]]]):
if internet_available:
try:
pkgsinfo = self.aur_client.get_info(aur_pkgs.keys())
except requests.exceptions.ConnectionError:
self.logger.warning('Could not retrieve installed AUR packages API data. It seems the internet connection is off.')
self.logger.info("Reading only local AUR packages data")
return
if pkgsinfo:
editable_pkgbuilds = self._read_editable_pkgbuilds() if arch_config['edit_aur_pkgbuild'] is not False else None
ignore_rebuild_check = None
if rebuild_ignored and rebuild_output is not None:
rebuild_ignored.join()
ignore_rebuild_check = rebuild_output['ignored']
to_rebuild = None
if rebuild_check and rebuild_output is not None:
self.logger.info("Waiting for rebuild-detector")
rebuild_check.join()
to_rebuild = rebuild_output['to_rebuild']
for pkgdata in pkgsinfo:
pkg = self.aur_mapper.map_api_data(pkgdata, aur_pkgs, self.categories)
pkg.pkgbuild_editable = pkg.name in editable_pkgbuilds if editable_pkgbuilds is not None else None
if pkg.installed:
if disk_loader:
disk_loader.fill(pkg, sync=True)
pkg.update = self._check_aur_package_update(pkg=pkg,
installed_data=aur_pkgs.get(pkg.name, {}),
api_data=pkgdata)
pkg.aur_update = pkg.update # used in 'set_rebuild_check'
if ignore_rebuild_check is not None:
pkg.allow_rebuild = pkg.name not in ignore_rebuild_check
if to_rebuild and not pkg.update and pkg.name in to_rebuild:
pkg.require_rebuild = True
pkg.update_state()
pkg.status = PackageStatus.READY
output.append(pkg)
else:
editable_pkgbuilds = self._read_editable_pkgbuilds() if arch_config['edit_aur_pkgbuild'] is not False else None
for name, data in aur_pkgs.items():
pkg = ArchPackage(name=name, version=data.get('version'),
latest_version=data.get('version'), description=data.get('description'),
installed=True, repository='aur', i18n=self.i18n)
pkg.categories = self.categories.get(pkg.name)
pkg.pkgbuild_editable = pkg.name in editable_pkgbuilds if editable_pkgbuilds is not None else None
if disk_loader:
disk_loader.fill(pkg)
pkg.status = PackageStatus.READY
output.append(pkg)
def _check_aur_package_update(self, pkg: ArchPackage, installed_data: dict, api_data: dict) -> bool:
if pkg.last_modified is None: # if last_modified is not available, then the install_date will be used instead
install_date = installed_data.get('install_date')
if install_date:
try:
pkg.install_date = datetime_as_milis(parse_date(install_date))
except ValueError:
self.logger.error("Could not parse 'install_date' ({}) from AUR package '{}'".format(install_date, pkg.name))
else:
self.logger.error("AUR package '{}' install_date was not retrieved".format(pkg.name))
return self.aur_mapper.check_update(pkg=pkg, last_modified=api_data['LastModified'])
def _fill_repo_updates(self, updates: dict):
updates.update(pacman.list_repository_updates())
def _fill_repo_pkgs(self, repo_pkgs: dict, pkgs: list, aur_index: Optional[Set[str]], disk_loader: DiskCacheLoader):
updates = {}
thread_updates = Thread(target=self._fill_repo_updates, args=(updates,), daemon=True)
thread_updates.start()
repo_map = pacman.map_repositories(repo_pkgs)
thread_updates.join()
self.logger.info("Repository updates found" if updates else "No repository updates found")
for name, data in repo_pkgs.items():
pkgversion = data.get('version')
pkgrepo = repo_map.get(name)
pkg = ArchPackage(name=name,
version=pkgversion,
latest_version=pkgversion,
description=data.get('description'),
maintainer=pkgrepo,
i18n=self.i18n,
installed=True,
repository=pkgrepo,
categories=self.categories.get(name, []))
if updates:
update_version = updates.get(pkg.name)
if update_version:
pkg.latest_version = update_version
pkg.update = True
if disk_loader:
disk_loader.fill(pkg, sync=True)
if pkg.repository == 'aur':
pkg.repository = None
if aur_index and pkg.name not in aur_index:
removed_cat = self.i18n['arch.category.remove_from_aur']
if removed_cat not in pkg.categories:
pkg.categories.append(removed_cat)
pkgs.append(pkg)
def _wait_for_disk_cache(self):
if self.disk_cache_updater and self.disk_cache_updater.is_alive():
self.logger.info("Waiting for disk cache to be ready")
self.disk_cache_updater.join()
self.logger.info("Disk cache ready")
def __fill_packages_to_rebuild(self, output: Dict[str, Set[str]], ignore_binaries: bool):
if rebuild_detector.is_installed():
self.logger.info("rebuild-detector: checking")
to_rebuild = rebuild_detector.list_required_rebuild()
if to_rebuild and ignore_binaries:
to_rebuild = {p for p in to_rebuild if not RE_PKG_ENDS_WITH_BIN.match(p)}
output['to_rebuild'].update(to_rebuild)
self.logger.info("rebuild-detector: {} packages require rebuild".format(len(to_rebuild)))
def __fill_ignored_by_rebuild_detector(self, output: Dict[str, Set[str]]):
output['ignored'].update(rebuild_detector.list_ignored())
def read_installed(self, disk_loader: Optional[DiskCacheLoader], limit: int = -1, only_apps: bool = False, pkg_types: Set[Type[SoftwarePackage]] = None, internet_available: bool = None, names: Iterable[str] = None, wait_disk_cache: bool = True) -> SearchResult:
self.aur_client.clean_caches()
arch_config = self.configman.get_config()
aur_supported, repos_supported = aur.is_supported(arch_config), arch_config['repositories']
if not aur_supported and not repos_supported:
return SearchResult.empty()
rebuild_output, rebuild_check, rebuild_ignored = None, None, None
if aur_supported and arch_config['aur_rebuild_detector']:
rebuild_output = {'to_rebuild': set(), 'ignored': set()}
rebuild_check = Thread(target=self.__fill_packages_to_rebuild,
args=(rebuild_output, arch_config['aur_rebuild_detector_no_bin']),
daemon=True)
rebuild_check.start()
rebuild_ignored = Thread(target=self.__fill_ignored_by_rebuild_detector, args=(rebuild_output, ), daemon=True)
rebuild_ignored.start()
installed = pacman.map_installed(names=names)
aur_pkgs, repo_pkgs, aur_index = None, None, None
if repos_supported:
repo_pkgs = installed['signed']
if installed['not_signed']:
if aur_supported:
if self.index_aur:
self.index_aur.join()
aur_index = self.aur_client.read_index()
for pkg in {*installed['not_signed']}:
if pkg not in aur_index:
if repos_supported:
repo_pkgs[pkg] = installed['not_signed'][pkg]
if aur_supported and installed['not_signed']:
del installed['not_signed'][pkg]
aur_pkgs = installed['not_signed']
elif repos_supported:
repo_pkgs.update(installed['not_signed'])
pkgs = []
if repo_pkgs or aur_pkgs:
if wait_disk_cache:
self._wait_for_disk_cache()
map_threads = []
if aur_pkgs:
t = Thread(target=self._fill_aur_pkgs, args=(aur_pkgs, pkgs, disk_loader, internet_available, arch_config, rebuild_check, rebuild_ignored, rebuild_output), daemon=True)
t.start()
map_threads.append(t)
if repo_pkgs:
t = Thread(target=self._fill_repo_pkgs, args=(repo_pkgs, pkgs, aur_index, disk_loader), daemon=True)
t.start()
map_threads.append(t)
for t in map_threads:
t.join()
if pkgs:
ignored = self._list_ignored_updates()
if ignored:
for p in pkgs:
if p.name in ignored:
p.update_ignored = True
return SearchResult(pkgs, None, len(pkgs))
def _downgrade_aur_pkg(self, context: TransactionContext):
if context.commit:
self.logger.info("Package '{}' current commit {}".format(context.name, context.commit))
else:
self.logger.warning("Package '{}' has no commit associated with it. Downgrading will only compare versions.".format(context.name))
context.build_dir = '{}/build_{}'.format(get_build_dir(context.config), int(time.time()))
try:
if not os.path.exists(context.build_dir):
build_dir = context.handler.handle(SystemProcess(new_subprocess(['mkdir', '-p', context.build_dir])))
if build_dir:
context.handler.watcher.change_progress(10)
base_name = context.get_base_name()
context.watcher.change_substatus(self.i18n['arch.clone'].format(bold(context.name)))
cloned, _ = context.handler.handle_simple(git.clone_as_process(url=URL_GIT.format(base_name), cwd=context.build_dir))
context.watcher.change_progress(30)
if cloned:
context.watcher.change_substatus(self.i18n['arch.downgrade.reading_commits'])
clone_path = '{}/{}'.format(context.build_dir, base_name)
context.project_dir = clone_path
srcinfo_path = '{}/.SRCINFO'.format(clone_path)
logs = git.log_shas_and_timestamps(clone_path)
context.watcher.change_progress(40)
if not logs or len(logs) == 1:
context.watcher.show_message(title=self.i18n['arch.downgrade.error'],
body=self.i18n['arch.downgrade.impossible'].format(context.name),
type_=MessageType.ERROR)
return False
if context.commit:
target_commit, target_commit_timestamp = None, None
for idx, log in enumerate(logs):
if context.commit == log[0] and idx + 1 < len(logs):
target_commit = logs[idx + 1][0]
target_commit_timestamp = logs[idx + 1][1]
break
if not target_commit:
self.logger.warning("Could not find '{}' target commit to revert to".format(context.name))
else:
context.watcher.change_substatus(self.i18n['arch.downgrade.version_found'])
checkout_proc = new_subprocess(['git', 'checkout', target_commit], cwd=clone_path)
if not context.handler.handle(SystemProcess(checkout_proc, check_error_output=False)):
context.watcher.print("Could not rollback to current version's commit")
return False
context.watcher.change_substatus(self.i18n['arch.downgrade.install_older'])
context.last_modified = target_commit_timestamp
context.commit = target_commit
return self._build(context)
# trying to downgrade by version comparison
commit_found, commit_date = None, None
srcfields = {'pkgver', 'pkgrel', 'epoch'}
for idx in range(1, len(logs)):
commit, date = logs[idx][0], logs[idx][1]
with open(srcinfo_path) as f:
pkgsrc = aur.map_srcinfo(string=f.read(), pkgname=context.name, fields=srcfields)
reset_proc = new_subprocess(['git', 'reset', '--hard', commit], cwd=clone_path)
if not context.handler.handle(SystemProcess(reset_proc, check_error_output=False)):
context.handler.watcher.print('Could not downgrade anymore. Aborting...')
return False
epoch, version, release = pkgsrc.get('epoch'), pkgsrc.get('pkgver'), pkgsrc.get('pkgrel')
if epoch:
current_version = '{}:{}-{}'.format(epoch, version, release)
else:
current_version = '{}-{}'.format(version, release)
if commit_found:
context.watcher.change_substatus(self.i18n['arch.downgrade.version_found'])
checkout_proc = new_subprocess(['git', 'checkout', commit_found], cwd=clone_path)
if not context.handler.handle(SystemProcess(checkout_proc, check_error_output=False)):
context.watcher.print("Could not rollback to current version's commit")
return False
reset_proc = new_subprocess(['git', 'reset', '--hard', commit_found], cwd=clone_path)
if not context.handler.handle(SystemProcess(reset_proc, check_error_output=False)):
context.watcher.print("Could not downgrade to previous commit of '{}'. Aborting...".format(commit_found))
return False
break
elif current_version == context.get_version(): # current version found:
commit_found, commit_date = commit, date
context.watcher.change_substatus(self.i18n['arch.downgrade.install_older'])
context.last_modified = commit_date
context.commit = commit_found
return self._build(context)
finally:
if os.path.exists(context.build_dir) and context.config['aur_remove_build_dir']:
context.handler.handle(SystemProcess(subproc=new_subprocess(['rm', '-rf', context.build_dir])))
return False
def _downgrade_repo_pkg(self, context: TransactionContext):
context.watcher.change_substatus(self.i18n['arch.downgrade.searching_stored'])
if not os.path.isdir('/var/cache/pacman/pkg'):
context.watcher.show_message(title=self.i18n['arch.downgrade.error'],
body=self.i18n['arch.downgrade.repo_pkg.no_versions'],
type_=MessageType.ERROR)
return False
available_files = glob.glob("/var/cache/pacman/pkg/{}-*.pkg.tar.*".format(context.name))
if not available_files:
context.watcher.show_message(title=self.i18n['arch.downgrade.error'],
body=self.i18n['arch.downgrade.repo_pkg.no_versions'],
type_=MessageType.ERROR)
return False
reg = re.compile(r'{}-([\w.\-]+)-(x86_64|any|i686).pkg'.format(context.name))
versions, version_files = [], {}
for file_path in available_files:
found = reg.findall(os.path.basename(file_path))
if found:
ver = found[0][0]
if ver not in versions and ver < context.get_version():
versions.append(ver)
version_files[ver] = file_path
context.watcher.change_progress(40)
if not versions:
context.watcher.show_message(title=self.i18n['arch.downgrade.error'],
body=self.i18n['arch.downgrade.repo_pkg.no_versions'],
type_=MessageType.ERROR)
return False
versions.sort(reverse=True)
context.watcher.change_progress(50)
context.install_files = version_files[versions[0]] # TODO verify
if not self._handle_missing_deps(context=context):
return False
context.watcher.change_substatus(self.i18n['arch.downgrade.install_older'])
context.watcher.change_progress(60)
return self._install(context)
def downgrade(self, pkg: ArchPackage, root_password: str, watcher: ProcessWatcher) -> bool:
self.aur_client.clean_caches()
if not self._check_action_allowed(pkg, watcher):
return False
handler = ProcessHandler(watcher)
if self._is_database_locked(handler, root_password):
return False
arch_config = self.configman.get_config()
aur_supported = pkg.repository == 'aur' or aur.is_supported(arch_config)
context = TransactionContext(name=pkg.name, base=pkg.get_base_name(), skip_opt_deps=True,
change_progress=True, dependency=False, repository=pkg.repository, pkg=pkg,
arch_config=arch_config, watcher=watcher, handler=handler, root_password=<PASSWORD>,
installed=set(), removed={},
aur_supported=aur_supported,
commit=pkg.commit)
self._sync_databases(arch_config=context.config, aur_supported=aur_supported,
root_password=<PASSWORD>, handler=handler)
watcher.change_progress(5)
if pkg.repository == 'aur':
return self._downgrade_aur_pkg(context)
else:
return self._downgrade_repo_pkg(context)
def clean_cache_for(self, pkg: ArchPackage):
if os.path.exists(pkg.get_disk_cache_path()):
shutil.rmtree(pkg.get_disk_cache_path())
def _check_action_allowed(self, pkg: ArchPackage, watcher: ProcessWatcher) -> bool:
if user.is_root() and pkg.repository == 'aur':
watcher.show_message(title=self.i18n['arch.install.aur.root_error.title'],
body=self.i18n['arch.install.aur.root_error.body'],
type_=MessageType.ERROR)
return False
return True
def _is_database_locked(self, handler: ProcessHandler, root_password: str) -> bool:
if os.path.exists('/var/lib/pacman/db.lck'):
handler.watcher.print('pacman database is locked')
msg = '<p>{}</p><p>{}</p><br/>'.format(self.i18n['arch.action.db_locked.body.l1'],
self.i18n['arch.action.db_locked.body.l2'])
if handler.watcher.request_confirmation(title=self.i18n['arch.action.db_locked.title'].capitalize(),
body=msg,
confirmation_label=self.i18n['arch.action.db_locked.confirmation'].capitalize(),
deny_label=self.i18n['cancel'].capitalize()):
try:
if not handler.handle_simple(SimpleProcess(['rm', '-rf', '/var/lib/pacman/db.lck'], root_password=root_password)):
handler.watcher.show_message(title=self.i18n['error'].capitalize(),
body=self.i18n['arch.action.db_locked.error'],
type_=MessageType.ERROR)
return True
except:
self.logger.error("An error occurred while removing the pacman database lock")
traceback.print_exc()
handler.watcher.show_message(title=self.i18n['error'].capitalize(),
body=self.i18n['arch.action.db_locked.error'],
type_=MessageType.ERROR)
return True
else:
handler.watcher.print('Action cancelled by the user. Aborting...')
return True
return False
def _map_conflicting_file(self, output: str) -> List[MultipleSelectComponent]:
error_idx = None
lines = output.split('\n')
for idx, l in enumerate(lines):
if l and l.strip().lower().startswith('error: failed to commit transaction (conflicting files)'):
error_idx = idx
break
files = []
if error_idx and error_idx + 1 < len(lines):
for idx in range(error_idx + 1, len(lines)):
line = lines[idx].strip()
if line and self.re_file_conflict.match(line):
files.append(InputOption(label=line, value=idx, read_only=True))
return [MultipleSelectComponent(options=files, default_options={*files}, label='')]
def _map_dependencies_breakage(self, output: str) -> List[ViewComponent]:
errors = RE_DEPENDENCY_BREAKAGE.findall(output)
if errors:
opts = []
for idx, err in enumerate(errors):
opts.append(InputOption(label=self.i18n['arch.upgrade.error.dep_breakage.item'].format(*err), value=idx, read_only=True))
return [MultipleSelectComponent(label='',
options=opts,
default_options={*opts})]
else:
return [TextComponent(output)]
def list_related(self, pkgs: Iterable[str], all_pkgs: Iterable[str], data: Dict[str, dict], related: Set[str], provided_map: Dict[str, Set[str]]) | |
from .state import State
from .object import Object, Void
from .listdeforg import ListDefOrigin
from .value import *
from ..error import StoryError
from random import Random
from .container import Container
from .callstack import StackType
from .path import Path
from .tag import Tag
from ..util import Event
from .divert import Divert
from io import StringIO
from .cmd import Cmd, CmdType
from .varassign import VarAssign
from .varref import VarRef
from .prim import Primitive
from .choice import Choice
from .choicepoint import ChoicePoint
class Story(Object):
# Constructor
def __init__(self, root_container, lists):
super().__init__()
self.__state = None
self.__list_defs = None if lists is None else ListDefOrigin(lists)
self.__prev_cont_set = None
self.__tmp_eval_container = None
self.__validated_exts = False
self.__externals = {}
self.__observers = {}
self.__main = root_container
self.allow_ext_func_fallbacks = False
self.reset()
# Properties
@property
def choices(self):
ch = []
for c in self.state.current_choices:
if not c.choicepoint.is_invisible_defaults:
c.idx = len(ch)
ch.append(c)
return ch
@property
def gtags(self):
return self.__tags_at("")
@property
def state(self):
return self.__state
@property
def list_defs(self):
return self.__list_defs
@property
def main(self):
if self.__tmp_eval_container: return self.__tmp_eval_container
return self.__main
# Methods
def continue_(self, max_=False):
if not self.__validated_exts:
print("validating externals")
self.validate_exts()
if max_:
s = StringIO()
while self.state.can_continue:
s.write(self.__continue())
return s.getvalue()
print("continuing...")
return self.__continue()
def choose(self, idx):
ch = self.choices
self.__assert(0 <= idx < len(ch), "Choice out of range")
c = ch[idx]
self.state.callstack.current_thread = c.threadatgen
self.goto(c.choicepoint.choice_target.path)
def goto(self, path, *args):
if isinstance(path, str):
self.state.pass_args_to_eval_stack(*args)
path = Path(path)
self.state.set_chosen_path(path)
self.__visit_changed()
def tagsat(self, path):
return self.__tags_at(path)
def reset(self):
self.__state = State(self)
self.__state.lexenv.variableChanged += self.__var_changed
self.__reset_globals()
def watch(self, var, f):
if self.__observers is None: self.__observers = {}
if isinstance(var, list):
for v in var:
self.watch(v, f)
else:
if var not in self.__observers:
self.__observers[var] = Event()
self.__observers[var] += f
def unwatch(self, f, var=None):
if self.__observers is None: return
if var:
if var in self.__observers:
self.__observers[var] -= f
else:
for observer in self.__observers.values():
observer -= f
def bindfun(self, fname, f, *argtypes):
self.__assert(f is not None, "Can't bind to None")
self.__assert(fname not in self.__externals,
"Function {0} has already been bound.".format(fname))
self.__externals[fname] = f
def unbindfun(self, fname):
self.__assert(fname in self.__externals,
"Function {0} has not been found".format(fname))
del self.__externals[fname]
def content_at(self, path):
return self.main.content_at_path(path)
def validate_exts(self):
missing = set()
self.__validate(self.__main, missing)
if len(missing) == 0: self.__validated_exts = True
else:
msg = "ERROR: Missing function binding for external{0}: '{1}' {2}".format(
"s" if len(missing) > 1 else "", ", ".join(missing),
", and now fallback ink function found." if
self.allow_ext_func_fallbacks else " (ink fallbacks disabled)")
self.__error(msg)
def call_ext(self, fname, nargs):
f = None
fallback_cont = None
ext = self.__externals.get(fname)
if ext is None:
if self.allow_ext_func_fallbacks:
fallback_cont = self.content_at(Path(fname))
self.__assert(
isinstance(fallback_cont, Container),
"Trying to call EXTERNAL function '{0}'"
"which has not been bound,"
"and fallback ink function could not be found."
.format(fname))
self.state.callstack.push(StackType.FUNCTION)
self.divert_target_obj = fallback_cont
return
else:
self.__assert(False, "Trying to call EXTERNAL function '{0}'"
"which has not been bound,"
"and fallback ink functions are disabled."
.format(fname))
args = []
for i in range(nargs):
args.append(self.state.pop_eval_stack().value)
args = reversed(args)
fres = f(*args)
ret = None
if fres:
ret = Value.create(fres)
self.__assert(
ret is not None,
"Could not create ink value from returned object of type {0}".
format(type(fres)))
else:
ret = Void()
self.state.push_eval_stack(ret)
def eval_expr(self, cont):
start_csh = len(self.state.callstack.callstack)
self.state.callstach.push(StackType.TUNNEL)
self.__tmp_eval_container = cont
self.state.go_to_start()
eval_sh = len(self.state.eval_stack)
self.continue_()
self.__tmp_eval_container = None
if len(self.state.callstack.callstack) > start_csh:
self.state.callstack.pop()
end_sh = len(self.state.eval_stack)
if end_sh > eval_sh:
return self.state.pop_eval_stack()
else:
return None
def eval_fun(self, fname, *args):
return self.eval_fun_with_output(fname, *args)["result"]
def eval_fun_with_output(self, fname, *args):
if fname is None: raise TypeError("Function is None")
elif fname.isspace():
raise TypeError("Function is empty or white space")
cont = None
try:
cont = self.content_at(Path(fname))
if not isinstance(cont, Container): cont = None
except StoryError as e:
if "not found" in e.message:
raise NameError(
"Function {0} doesn't exist".format(fname)) from e
else:
raise e
self.state.start_ext_function_eval(cont, *args)
out = StringIO()
while self.state.can_continue:
out.write(self.continue_())
res = self.state.complete_ext_function_eval()
return {"output": out.getvalue(), "result": res}
def has_fun(self, fname):
try:
return isinstance(self.content_at(Path(fname)), Container)
except StoryError:
return False
# Private methods
def __snapshot(self):
return self.state.copy()
def __restore_snapshot(self, state):
self.__state = state
def __continue(self):
if not self.state.can_continue:
raise StoryError("Can't continue")
self.state.reset_output()
self.state.did_safe_exit = False
self.state.lexenv.batchObserving = True
try:
state_at_last_nl = None
while True: # Begin DO
print("stepping...")
self.__step()
if not self.state.can_continue:
self.__try_follow_default_choice()
if not self.state.in_str_eval:
if state_at_last_nl:
curtxt = self.state.text
prvtxtlen = len(state_at_last_nl.text)
prvtagcnt = len(state_at_last_nl.tags)
if curtxt != state_at_last_nl.text or prvtagcnt != len(
self.state.tags):
if len(curtxt) >= prvtxtlen and curtxt[prvtxtlen -
1] == '\n':
self.__restore_snapshot(state_at_last_nl)
break
else:
state_at_last_nl = None
if self.state.output_ends_in_nl:
if self.state.can_continue:
if state_at_last_nl is None:
state_at_last_nl = self.__snapshot()
else:
state_at_last_nl = None
if not self.state.can_continue: break # End DO
if state_at_last_nl: self.__restore_snapshot(state_at_last_nl)
if not self.state.can_continue:
self.__assert(
not self.state.callstack.can_pop_thread,
"Thread available to pop."
"Threads should always be flat at the end of evaluation.")
if (len(self.state.generated_choices) == 0
and not self.state.did_safe_exit
and self.__tmp_eval_container is None):
self.__assert(
not self.state.callstack.can_pop_t(StackType.TUNNEL),
"Unexpectedly reached end of content."
"Do you need a `->->` to return from a tunnel?")
self.__assert(
not self.state.callstack.can_pop_t(StackType.FUNCTION),
"Unexpectedly reached end of content."
"Do you need a `~ return`?")
self.__assert(not self.state.callstack.can_pop,
"Ran out of content."
"Do you need a `-> DONE` or `-> END`?")
self.__error("Unexpectedly reached end of content."
"Reason unknown."
"That's clearly a compiler bug.")
except StoryError as e:
self.__add_error(e.message, e.useln)
finally:
self.state.did_safe_exit = False
self.state.lexenv.batchObserving = False
return self.state.text
def __step(self):
shouldout = True
cur = self.state.current_content
if cur is None: return
curcont = cur if isinstance(cur, Container) else None
while curcont:
self.__visit(curcont, True)
if len(curcont.content) == 0: break
cur = curcont.content[0]
self.state.callstack.current_element.content_idx = 0
self.state.callstack.current_element.container = curcont
curcont = cur if isinstance(cur, Container) else None
curcont = self.state.callstack.current_element.container
is_flow = self.__perform_flow(cur)
if self.state.current_content is None: return
if is_flow: shouldout = False
if isinstance(cur, ChoicePoint):
ch = self.__process_choice(cur)
if ch: self.state.generated_choices.append(ch)
cur = None
shouldout = False
if isinstance(cur, Container):
shouldout = False
if shouldout:
if isinstance(cur, VarPtrValue) and cur.ctx_idx == -1:
ctx = self.state.callstack.context(cur.value)
cur = VarPtrValue(cur.value, ctx)
if self.state.in_expr_eval:
self.state.push_eval_stack(cur)
else:
self.state.push_to_output(cur)
self.__next()
if isinstance(cur, Cmd) and cur.cmd_type == CmdType.START_THREAD:
self.state.callstack.push_thread()
def __visit(self, cont, at_start):
if not cont.count_at_start or at_start:
if cont.count_visits:
self.__inc_visit_count(cont)
if cont.count_turns:
self.__record_turn_idx_visit(cont)
def __visit_changed(self):
prv = self.state.prev_ct_obj
new = self.state.current_content
if not new: return
if self.__prev_cont_set is None: self.__prev_cont_set = set()
self.__prev_cont_set.clear()
if prv:
prv_anc = prv if isinstance(prv, Container) else prv.parent
while isinstance(prv_anc, Container):
self.__prev_cont_set.add(prv_anc)
prv_anc = prv_anc.parent
child = new
anc = child.parent
while isinstance(anc, Container) and anc not in self.__prev_cont_set:
at_start = len(anc.content) > 0 and child == anc.content[0]
self.__visit(anc, at_start)
child = anc
anc = anc.parent
def __process_choice(self, cp):
chshow = True
if cp.has_condition:
condval = self.state.pop_eval_stack()
if not self.__is_true(condval):
chshow = False
start_txt = ""
chonly_txt = ""
if cp.has_choice_only_content:
chonly_str = self.state.pop_eval_stack()
chonly_txt = chonly_str.value
if cp.has_start_content:
start_str = self.state.pop_eval_stack()
start_txt = start_str.value
if cp.once_only:
visits = self.__visit_count(cp.choice_target)
if visits > 0:
chshow = False
ch = Choice(cp)
ch.threadatgen = self.state.callstack.current_thread.copy()
if not chshow: return None
ch.text = start_txt + chonly_txt
return ch
def __is_true(self, o):
if isinstance(o, Value):
if isinstance(o, DivertTargetValue):
self.__error(
"Shouldn't use a divert target as a conditional value."
"Did you intend a function call `likeThis()` or a read-count check `likeThis`? (no arrows)"
)
return False
return bool(o)
return False
def __perform_flow(self, o):
if o is None: return False
if isinstance(o, Divert):
return self.__perform_flow_divert(o)
elif isinstance(o, Cmd):
return self.__perform_flow_cmd(o)
elif isinstance(o, VarAssign):
return self.__perform_flow_varass(o)
elif isinstance(o, VarRef):
return self.__perform_flow_varref(o)
elif isinstance(o, Primitive):
return self.__perform_flow_prim(o)
return False
def __validate(self, o, missing):
if isinstance(o, Container):
for c in o.content:
if not isinstance(c, Container) or not c.has_valid_name:
self.__validate(c, missing)
for c in o.named_content.values():
self.__validate(c, missing)
return
if isinstance(o, Divert) and o.is_external:
n = o.target_path_str
if n not in self.__externals:
if self.allow_ext_func_fallbacks:
if name not in self.main.named_content: missing.add(n)
else: missing.add(n)
def __tags_at(self, path):
path = Path(path)
cont = self.content_at(path)
while True:
if isinstance(cont.content[0], Container): cont = cont.content[0]
else: break
tags = None
for c in cont.content:
if isinstance(c, Tag):
tags = tags or []
tags.append(c.txt)
else:
break
return tags
def __next(self):
self.state.prev_ct_obj = self.state.current_content
if self.state.divert_target_obj:
self.state.current_content = self.state.divert_target_obj
self.divert_target_obj = None
self.__visit_changed()
if self.state.current_content: return
if not self.__inc_ct_ptr():
didpop = False
if (self.state.callstack.can_pop_t(StackType.FUNCTION)):
self.state.callstack.pop(StackType.FUNCTION)
if self.state.in_expr_eval:
| |
"""Functionality for generating feature vectors from relational data"""
# Copyright (c) 2018 <NAME>. This is free software released
# under the MIT License. See `LICENSE.txt` for details.
# Features are functions that convert a set of input fields to a value.
# They have input and output types. They have a conversion type. For
# basic cases, the function can be determined from the input, output,
# and conversion types.
#
# A feature can apply to multiple fields, and a field can be relevant to
# multiple features. While it would be possible to apply each feature
# to each example, features are likely to be sparse in their
# applicability, so one could look them up by a key related to their
# applicability. For example, such a key could be (table_name,
# field_name).
from enum import Enum
import io
from barnapy import files
from barnapy import logging
import esal
from . import general
from . import records
class RandomVariableType(Enum): # TODO fix this at some point: need proper RandomVariable object with a domain of values and how those values can be interpreted, their algebraic structure (order, interval, field)
"""Types of random variables"""
none = 0 # None or unknown, the null type
binary = 1
categorical = 2
ordinal = 3
count = 4
interval = 5
# differentiate finite and infinite discrete? different distributions apply
continuous = 6
def is_continuous(self):
return self == RandomVariableType.continuous
def is_discrete(self):
return (self != RandomVariableType.none
and self != RandomVariableType.continuous)
class SetEncoding(Enum):
"""Encodings of sets"""
none = 0 # None or unknown, the null encoding
values = 1 # Each value in the set is itself
indices = 2 # Values are encoded by their 1-based index in the list of sorted values
indicators = 3 # Values are encoded as binary indicators
def str_w_empty_none(obj):
if obj is None:
return ''
return str(obj)
def make_identifier(*objs):
return '-'.join(
str_w_empty_none(o).strip().replace(' ', '_') for o in objs)
class Feature: # TODO rework to separate out random variable aspects from association to relation
# Features do not have IDs because they need to be numbered as a
# contiguous set of 1-based indices. Maintaining this is
# incompatible with allowing users to set their own IDs.
_data_to_rv_types = {
bool: RandomVariableType.binary,
float: RandomVariableType.continuous,
int: RandomVariableType.continuous,
str: RandomVariableType.categorical,
object: RandomVariableType.categorical,
}
_names_to_data_types = {
'bool': bool,
'int': int,
'float': float,
'object': object,
'str': str,
}
field_names = (
'name', 'table_name', 'field_name', 'value', 'data_type', 'rv_type')
def __init__( # TODO validate arguments
self,
name=None,
table_name=None,
field_name=None,
value=None,
data_type=None,
rv_type=None,
function=None,
):
self._name = name
self._table_name = table_name
self._field_name = field_name
self._value = value
self._data_type = (
data_type
if isinstance(data_type, type)
else self._names_to_data_types.get(data_type, object))
self._rv_type = None
if isinstance(rv_type, RandomVariableType):
self._rv_type = rv_type
elif isinstance(rv_type, str):
self._rv_type = RandomVariableType[rv_type]
self._gen_name = make_identifier(
*(name for name in (table_name, field_name, value)
if name is not None))
self._function = function
if function is None:
if (self._rv_type == RandomVariableType.binary
and value is not None):
self._function = make_value_exists(
table_name, field_name, value)
elif (self._rv_type == RandomVariableType.count
and value is not None):
self._function = make_count_values(
table_name, field_name, value)
else:
self._function = make_get_value(table_name, field_name)
if not callable(self._function):
raise ValueError(
'Bad feature function: {!r}'.format(self._function))
self._key = None
@property
def key(self):
if self._key is None:
self._key = (
(self._table_name, self._field_name, self._value)
if self._value
else (self._table_name, self._field_name))
return self._key
@property
def name(self):
return (self._name
if self._name is not None
else self._gen_name)
@property
def table_name(self):
return self._table_name
@property
def field_name(self):
return self._field_name
@property
def value(self):
return self._value
@property
def data_type(self):
return (self._data_type
if self._data_type is not None
else object)
@property
def data_type_name(self):
return self.data_type.__name__
@property
def rv_type(self):
if self._rv_type is not None:
return self._rv_type
elif (self._data_type is not None
and self._data_type in self._data_to_rv_types):
return self._data_to_rv_types[self._data_type]
return RandomVariableType.categorical
def apply(self, thing):
value = self._function(thing)
if value is not None and self.data_type not in (None, object):
value = self.data_type(value)
return value
def as_record(self):
return tuple(getattr(self, name) for name in self.field_names)
def as_strings(self):
return map(str_w_empty_none, (
self.name,
self.table_name,
self.field_name,
self.value,
self.data_type_name,
self.rv_type.name,
))
def __repr__(self):
key_value_strs = ('{}={!r}'.format(name, getattr(self, name))
for name in self.field_names)
return '{}({})'.format(
general.fq_typename(self), ', '.join(key_value_strs))
_spcl_attrs_tbl_nm = '_special_attrs'
# Loading, saving, detecting features
def load(table): # TODO? add support for reading from file?
table = table.order_by('id').project(*Feature.field_names)
return [Feature(*record) for record in table]
def save(features, file):
file = files.new(file)
with file.open('wt') as csv_file:
# Write header
print('id', *Feature.field_names, sep='|', file=csv_file)
# Write features
for id, feat in enumerate(features, start=1):
print(id, *feat.as_strings(), sep='|', file=csv_file)
def detect(
fact_tables,
event_tables,
positive_label,
fact_key_field=0,
event_type_field=1,
numeric_features=False,
features_are_counts=True,
):
rv_type = (RandomVariableType.count
if features_are_counts
else RandomVariableType.binary)
assert positive_label is not None
fact_features = [
Feature(
table_name=_spcl_attrs_tbl_nm,
field_name='id',
data_type=int,
rv_type=RandomVariableType.continuous,
),
Feature(
table_name=_spcl_attrs_tbl_nm,
field_name='label',
value=positive_label,
data_type=(int if numeric_features else bool),
rv_type=RandomVariableType.binary,
),
]
event_features = []
names2tables = {}
for table in fact_tables:
fact_features.extend(make_fact_features(table, fact_key_field))
names2tables[table.name] = table
for table in event_tables:
event_features.extend(make_event_features(
table, event_type_field, rv_type, numeric_features))
names2tables[table.name] = table
# Encode categorical features as numeric if requested
if numeric_features:
fact_features = encode_categorical_features(
fact_features, names2tables,
numeric_features=numeric_features)
return fact_features + event_features
def make_fact_features(table, key_field=0):
key_idx = table.header.index_of(key_field)
features = []
for idx, field in enumerate(table.header.fields()):
# Skip the key column
if idx == key_idx:
continue
features.append(Feature(
table_name=table.name,
field_name=field.name,
data_type=field.pytype,
))
return features
def make_event_features(
table,
event_type_field=0,
rv_type=RandomVariableType.binary,
numeric_features=False,
):
features = []
event_types = set(
tup[0] for tup in table.project(event_type_field) if tup)
event_types.discard(None)
for ev_type in sorted(event_types):
features.append(Feature(
name=make_identifier(table.name, ev_type),
table_name=table.name,
field_name=table.header[event_type_field].name,
value=ev_type,
rv_type=rv_type,
data_type=(int
if rv_type == RandomVariableType.count
or numeric_features
else bool),
))
return features
def encode_categorical_features(
features,
names2tables,
rv_type=RandomVariableType.binary,
numeric_features=False,
):
new_feats = []
for feature in features:
# Copy non-categorical features to output
if feature.rv_type != RandomVariableType.categorical:
new_feats.append(feature)
continue
# Encode categorical features with binary indicators
# Get unique values
table = names2tables[feature.table_name]
values = set(
tup[0] for tup in table.project(feature.field_name) if tup)
values.discard(None) # Do not encode None
# Make a feature for each value
for value in sorted(values):
new_feats.append(Feature(
table_name=feature.table_name,
field_name=feature.field_name,
value=value,
rv_type=rv_type, # TODO why is this not always binary?
data_type=(int
if rv_type == RandomVariableType.count
or numeric_features
else bool),
))
return new_feats
# Feature functions
def make_get_value(table_name, field_name):
if not isinstance(table_name, str):
raise ValueError('Bad table name: {!r}'.format(table_name))
if not isinstance(field_name, str):
raise ValueError('Bad field name: {!r}'.format(field_name))
def get_value(event_sequence):
return event_sequence.fact((table_name, field_name))
return get_value
def make_count_values(table_name, field_name, value):
if not isinstance(table_name, str):
raise ValueError('Bad table name: {!r}'.format(table_name))
if not isinstance(field_name, str):
raise ValueError('Bad field name: {!r}'.format(field_name))
def count_values(event_sequence):
return event_sequence.n_events_of_type(
(table_name, field_name, value))
return count_values
def make_value_exists(table_name, field_name, value):
if not isinstance(table_name, str):
raise ValueError('Bad table name: {!r}'.format(table_name))
if not isinstance(field_name, str):
raise ValueError('Bad field name: {!r}'.format(field_name))
def value_exists(event_sequence):
return (
event_sequence.fact((table_name, field_name)) == value or
event_sequence.has_type((table_name, field_name, value)))
return value_exists
# Feature vectors
def lookup_feature(features_key2idx, *keys):
for key in keys:
feat_idx = features_key2idx.get(key)
if feat_idx is not None:
return feat_idx
return None
def apply_feature(feature_vector, feature_id, feature, event_sequence):
feat_val = feature.apply(event_sequence)
# Warn about bad feature values
if feat_val is None:
logger = logging.getLogger(__name__)
strio = io.StringIO()
event_sequence.pprint(margin=2, file=strio)
logger.warning('Value of feature {} is `None`: {!r}\n{}',
feature_id, feature, strio.getvalue())
# Only record the value if it is nonzero
elif feat_val:
feature_vector[feature_id] = feat_val
def generate_feature_vectors(
id, facts, events, examples, features, features_key2idx):
# Create an event sequence to efficiently answer feature queries
event_sequence = esal.EventSequence(
(esal.Event(e[0], e[1], e[2]) for e in events), facts, id)
# Build a feature vector for each example definition
for example_def in examples:
# Limit the event records to the window specified in the example
# definition
id, ex_beg, ex_end, label = example_def
es = event_sequence.subsequence(ex_beg, ex_end)
# Set the special attributes as facts
es[_spcl_attrs_tbl_nm, 'id'] = id
es[_spcl_attrs_tbl_nm, 'label'] = label
# Create the feature vector. Be efficient by applying only the
# relevant feature functions.
feature_vector = {}
# Apply features to facts
for key, val in es.facts():
# Lookup the feature either by (table, field, value) or by
# (table, field). (`key` is (table, field).)
feat_idx = lookup_feature(
features_key2idx, (*key, val), key)
if feat_idx is not None:
apply_feature(feature_vector,
# External feature ID is 1-based index
feat_idx + 1, features[feat_idx], es)
# Apply features to events
for ev_type in es.types():
# | |
import os
import pandas as pd
import arff
import numpy as np
from functools import reduce
import sqlite3
import logging
from libs.planet_kaggle import to_multi_label_dict, get_file_count, enrich_with_feature_encoding, featurise_images, generate_validation_files
import tensorflow as tf
from keras.applications.resnet50 import ResNet50
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
_FRAUD_PATH = 'fraud_detection', 'credit_card_fraud_kaggle', 'creditcard.csv'
_IOT_PATH = 'iot', 'sensor_stream_berkeley', 'sensor.arff'
_AIRLINE_PATH = 'airline', 'airline_14col.data'
_FOOTBALL_PATH = 'football', 'database.sqlite'
_BCI_PATH = 'bci', 'data.npz'
_HIGGS_PATH = 'higgs', 'HIGGS.csv'
_KAGGLE_ROOT = 'planet'
_PLANET_KAGGLE_LABEL_CSV = 'train_v2.csv'
_PLANET_KAGGLE_TRAIN_DIR = 'train-jpg'
_PLANET_KAGGLE_VAL_DIR = 'validate-jpg'
def _get_datapath():
try:
datapath = os.environ['MOUNT_POINT']
except KeyError:
logger.info("MOUNT_POINT not found in environment. Defaulting to /fileshare")
datapath = '/fileshare'
return datapath
def load_fraud():
""" Loads the credit card fraud data
The datasets contains transactions made by credit cards in September 2013 by european cardholders.
This dataset presents transactions that occurred in two days, where we have 492 frauds out of 284,807 transactions.
The dataset is highly unbalanced, the positive class (frauds) account for 0.172% of all transactions.
It contains only numerical input variables which are the result of a PCA transformation.
Unfortunately, due to confidentiality issues, we cannot provide the original features and more background information about
the data.
Features V1, V2, ... V28 are the principal components obtained with PCA, the only features which have not been transformed
with PCA are 'Time' and 'Amount'. Feature 'Time' contains the seconds elapsed between each transaction and the first
transaction in the dataset.
The feature 'Amount' is the transaction Amount, this feature can be used for example-dependant cost-senstive learning.
Feature 'Class' is the response variable and it takes value 1 in case of fraud and 0 otherwise.
Given the class imbalance ratio, we recommend measuring the accuracy using the Area Under the Precision-Recall Curve
(AUPRC).
Confusion matrix accuracy is not meaningful for unbalanced classification.
The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group
(http://mlg.ulb.ac.be) of ULB (Universite Libre de Bruxelles) on big data mining and fraud detection. More details
on current and past projects on related topics are available on http://mlg.ulb.ac.be/BruFence
and http://mlg.ulb.ac.be/ARTML
Please cite: <NAME>, <NAME>, <NAME> and <NAME>. Calibrating Probability with
Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015
Returns
-------
pandas DataFrame
"""
return pd.read_csv(reduce(os.path.join, _FRAUD_PATH, _get_datapath()))
def load_iot():
""" Loads iot data
Sensor stream contains information (temperature, humidity, light, and sensor voltage) collected from 54 sensors deployed
in Intel Berkeley Research Lab. The whole stream contains consecutive information recorded over a 2 months
period (1 reading per 1-3 minutes). I used the sensor ID as the class label, so the learning task of the stream is
to correctly identify the sensor ID (1 out of 54 sensors) purely based on the sensor data and the corresponding recording
time.
While the data stream flow over time, so does the concepts underlying the stream. For example, the lighting during
the working hours is generally stronger than the night, and the temperature of specific sensors (conference room)
may regularly rise during the meetings.
Returns
-------
pandas DataFrame
"""
dataset = arff.load(open(reduce(os.path.join, _IOT_PATH, _get_datapath())))
columns = [i[0] for i in dataset['attributes']]
return pd.DataFrame(dataset['data'], columns=columns)
def load_airline():
""" Loads airline data
The dataset consists of a large amount of records, containing flight arrival and departure details for all the
commercial flights within the USA, from October 1987 to April 2008. Its size is around 116 million records and
5.76 GB of memory.
There are 13 attributes, each represented in a separate column: Year (1987-2008), Month (1-12), Day of Month (1-31),
Day of Week (1:Monday - 7:Sunday), CRS Departure Time (local time as hhmm), CRS Arrival Time (local time as hhmm),
Unique Carrier, Flight Number, Actual Elapsed Time (in min), Origin, Destination, Distance (in miles), and Diverted
(1=yes, 0=no).
The target attribute is Arrival Delay, it is a positive or negative value measured in minutes.
Link to the source: http://kt.ijs.si/elena_ikonomovska/data.html
Returns
-------
pandas DataFrame
"""
cols = ['Year', 'Month', 'DayofMonth', 'DayofWeek', 'CRSDepTime', 'CRSArrTime', 'UniqueCarrier', 'FlightNum', 'ActualElapsedTime', 'Origin', 'Dest', 'Distance', 'Diverted', 'ArrDelay']
return pd.read_csv(reduce(os.path.join, _AIRLINE_PATH, _get_datapath()), names=cols)
def load_football():
""" Loads football data
Dataset of football stats. +25,000 matches, +10,000 players from 11 European Countries with their lead championship
Seasons 2008 to 2016. It also contains players attributes sourced from EA Sports' FIFA video game series,
including the weekly updates, team line up with squad formation (X, Y coordinates), betting odds from up to 10
providers and detailed match events (goal types, possession, corner, cross, fouls, cards etc...) for +10,000 matches.
The meaning of the columns can be found here: http://www.football-data.co.uk/notes.txt
Number of attributes in each table (size of the dataframe):
countries (11, 2)
matches (25979, 115)
leagues (11, 3)
teams (299, 5)
players (183978, 42)
Link to the source: https://www.kaggle.com/hugomathien/soccer
Returns
-------
list of pandas DataFrame
"""
database_path = reduce(os.path.join, _FOOTBALL_PATH, _get_datapath())
with sqlite3.connect(database_path) as con:
countries = pd.read_sql_query("SELECT * from Country", con)
matches = pd.read_sql_query("SELECT * from Match", con)
leagues = pd.read_sql_query("SELECT * from League", con)
teams = pd.read_sql_query("SELECT * from Team", con)
players = pd.read_sql("SELECT * FROM Player_Attributes;", con)
return countries, matches, leagues, teams, players
def load_bci():
""" Loads BCI data
Contains measurements from 64 EEG sensors on the scalp of a single participant.
The purpose of the recording is to determine from the electrical brain activity when the participant is paying attention.
Returns
-------
A tuple containing four numpy arrays
train features
train labels
test features
test labels
"""
npzfile = np.load(reduce(os.path.join, _BCI_PATH, _get_datapath()))
return npzfile['train_X'], npzfile['train_y'], npzfile['test_X'], npzfile['test_y']
def load_higgs():
""" Loads HIGGS data
Dataset of atomic particles measurements. The total size of the data is 11 millions of observations.
It can be used in a classification problem to distinguish between a signal process which produces Higgs
bosons and a background process which does not.
The data has been produced using Monte Carlo simulations. The first 21 features (columns 2-22) are kinematic
properties measured by the particle detectors in the accelerator. The last seven features are functions of
the first 21 features; these are high-level features derived by physicists to help discriminate between the
two classes. The first column is the class label (1 for signal, 0 for background), followed by the 28
features (21 low-level features then 7 high-level features): lepton pT, lepton eta, lepton phi,
missing energy magnitude, missing energy phi, jet 1 pt, jet 1 eta, jet 1 phi, jet 1 b-tag, jet 2 pt, jet 2 eta,
jet 2 phi, jet 2 b-tag, jet 3 pt, jet 3 eta, jet 3 phi, jet 3 b-tag, jet 4 pt, jet 4 eta, jet 4 phi,
jet 4 b-tag, m_jj, m_jjj, m_lv, m_jlv, m_bb, m_wbb, m_wwbb.
Link to the source: https://archive.ics.uci.edu/ml/datasets/HIGGS
Returns
-------
pandas DataFrame
"""
cols = ['boson','lepton_pT','lepton_eta','lepton_phi','missing_energy_magnitude','missing_energy_phi','jet_1_pt','jet_1_eta','jet_1_phi','jet_1_b-tag','jet_2_pt','jet_2_eta','jet_2_phi','jet_2_b-tag','jet_3_pt','jet_3_eta','jet_3_phi','jet_3_b-tag','jet_4_pt','jet_4_eta','jet_4_phi','jet_4_b-tag','m_jj','m_jjj','m_lv','m_jlv','m_bb','m_wbb','m_wwbb']
return pd.read_csv(reduce(os.path.join, _HIGGS_PATH, _get_datapath()), names=cols)
def load_planet_kaggle():
""" Loads Planet Kaggle data
Dataset of satellite images of the Amazon. The objective of this dataset is to label satellite image chips
with atmospheric conditions and various classes of land cover/land use. Resulting algorithms will help the
global community better understand where, how, and why deforestation happens all over the world. The images
use the GeoTiff format and each contain four bands of data: red, green, blue, and near infrared.
To treat the images we used transfer learning with the CNN ResNet50. The images are featurized with this
deep neural network. Once the features are generated we can use a boosted tree to classify them.
Link to the source: https://www.kaggle.com/c/planet-understanding-the-amazon-from-space/data
Returns
-------
A tuple containing four numpy arrays
train_features
y_train
validation_features
y_val
"""
csv_path = reduce(os.path.join, (_KAGGLE_ROOT, _PLANET_KAGGLE_LABEL_CSV), _get_datapath())
train_path = reduce(os.path.join, (_KAGGLE_ROOT, _PLANET_KAGGLE_TRAIN_DIR), _get_datapath())
val_path = reduce(os.path.join, (_KAGGLE_ROOT, _PLANET_KAGGLE_VAL_DIR), _get_datapath())
assert os.path.isfile(csv_path)
assert os.path.exists(train_path)
if not os.path.exists(val_path): os.mkdir(val_path)
if not os.listdir(val_path):
logger.info('Validation folder is empty, moving files...')
generate_validation_files(train_path, val_path)
logger.info('Reading in labels')
labels_df = pd.read_csv(csv_path).pipe(enrich_with_feature_encoding)
| |
<gh_stars>0
import random
from random import shuffle
import numpy as np
from datetime import datetime
import time
import queue
import threading
import logging
from PIL import Image
import itertools
import re
import os
import glob
import shutil
import sys
import copy
import h5py
from netCDF4 import Dataset
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.parallel.data_parallel import data_parallel
import torch.utils.checkpoint as cp
from collections import OrderedDict
from torch import Tensor
from typing import Any, List, Tuple
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
target_city = 'R3'
target_out_var_index = 3
global_step_start = 248000
initial_checkpoint = 'model' + ('/%09d_model.pth' % (global_step_start))
out_dir = target_city + '_' + str(target_out_var_index)
input_data_folder_path = '../../0_data_heldout/' + target_city
num_frame_per_day = 96
num_frame_before = 4
num_frame_out = 32
num_frame_sequence = 36
height=256
width =256
num_channel_1 = 9
num_channel_2_src = 16
num_channel_2 = 107 + num_channel_2_src
num_channel = (num_channel_1*2 + num_channel_2)
num_channel_out= 4
NUM_INPUT_CHANNEL = num_channel * num_frame_before
NUM_OUTPUT_CHANNEL = num_channel_out * num_frame_out
SEED = 0
num_groups = 8
EPS = 1e-12
np.set_printoptions(precision=6)
class Deconv3x3Block(nn.Sequential):
def __init__(self,
in_size: int,
h_size: int, ) -> None:
super(Deconv3x3Block, self).__init__()
self.add_module('deconv', nn.ConvTranspose2d(in_size, h_size, kernel_size=3, stride=2, padding=1, bias=True))
self.add_module('elu', nn.ELU(inplace=True))
self.add_module('norm', nn.GroupNorm(num_groups=num_groups, num_channels=h_size))
class Conv1x1Block(nn.Sequential):
def __init__(self,
in_size: int,
h_size: int, ) -> None:
super(Conv1x1Block, self).__init__()
self.add_module('conv', nn.Conv2d(in_size, h_size, kernel_size=1, stride=1, padding=0, bias=True))
class Conv3x3Block(nn.Sequential):
def __init__(self,
in_size: int,
h_size: int, ) -> None:
super(Conv3x3Block, self).__init__()
self.add_module('conv', nn.Conv2d(in_size, h_size, kernel_size=3, stride=1, padding=1, bias=True))
self.add_module('elu', nn.ELU(inplace=True))
self.add_module('norm', nn.GroupNorm(num_groups=num_groups, num_channels=h_size))
class AvgBlock(nn.Sequential):
def __init__(self,
kernel_size: int,
stride: int,
padding: int) -> None:
super(AvgBlock, self).__init__()
self.add_module('pool', nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=padding))
class MaxBlock(nn.Sequential):
def __init__(self,
kernel_size: int,
stride: int,
padding: int) -> None:
super(MaxBlock, self).__init__()
self.add_module('pool', nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding))
class DownBlock(nn.Module):
def __init__(self,
in_size: int,
h_size: int,
out_size: int,
do_pool: int = True):
super(DownBlock, self).__init__()
self.do_pool = do_pool
self.pool = None
if self.do_pool:
self.pool = AvgBlock(kernel_size=2, stride=2, padding=0)
in_size_cum = in_size
self.conv_1 = Conv3x3Block( in_size=in_size_cum, h_size=h_size)
in_size_cum += h_size
self.conv_3 = Conv3x3Block( in_size=in_size_cum, h_size=h_size)
in_size_cum += h_size
self.conv_2 = Conv1x1Block( in_size=in_size_cum, h_size=out_size)
def forward(self, x):
batch_size = len(x)
if self.do_pool:
x = self.pool(x)
x_list = []
x_list.append(x)
x = self.conv_1(x)
x_list.append(x)
x = torch.cat(x_list, 1)
x = self.conv_3(x)
x_list.append(x)
x = torch.cat(x_list, 1)
x = self.conv_2(x)
return x
def cuda(self, ):
super(DownBlock, self).cuda()
self.conv_1.cuda()
self.conv_3.cuda()
self.conv_2.cuda()
return self
class UpBlock(nn.Module):
def __init__(self,
in_size: int,
in_size_2: int,
h_size: int,
out_size: int,
):
super(UpBlock, self).__init__()
self.deconv = Deconv3x3Block( in_size=in_size, h_size=h_size)
self.out_conv = Conv3x3Block( in_size=h_size + in_size_2, h_size=out_size)
def forward(self, x1, x2):
x1 = self.deconv(x1)
x1 = F.interpolate(x1, size=x2.size()[2:4], scale_factor=None, mode='bilinear', align_corners=False, recompute_scale_factor=None)
x = torch.cat([x2, x1], dim=1)
return self.out_conv(x)
def cuda(self, ):
super(UpBlock, self).cuda()
self.deconv.cuda()
self.out_conv.cuda()
return self
class NetA(nn.Module):
def __init__(self,):
super(NetA, self).__init__()
self.block0 = DownBlock(in_size=NUM_INPUT_CHANNEL, h_size=128, out_size=128, do_pool=False)
self.block1 = DownBlock(in_size=128, h_size=128, out_size=128,)
self.block2 = DownBlock(in_size=128, h_size=128, out_size=128, )
self.block3 = DownBlock(in_size=128, h_size=128, out_size=128, )
self.block4 = DownBlock(in_size=128, h_size=128, out_size=128, )
self.block5 = DownBlock(in_size=128, h_size=128, out_size=128, )
self.block6 = DownBlock(in_size=128, h_size=128, out_size=128,)
self.block20 = Conv3x3Block(in_size=128, h_size=128)
self.block15 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,)
self.block14 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,)
self.block13 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,)
self.block12 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,)
self.block11 = UpBlock(in_size=128, in_size_2=128 , h_size=128, out_size=128,)
self.block10 = UpBlock(in_size=128, in_size_2=128 , h_size=128, out_size=128,)
self.out_conv = nn.Sequential(
nn.Conv2d(128*1, NUM_OUTPUT_CHANNEL, kernel_size=3, stride=1, padding=1, bias=True)
)
if 1:
for name, m in self.named_modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.GroupNorm):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x):
batch_size = len(x)
x0 = self.block0(x)
x1 = self.block1(x0)
x2 = self.block2(x1)
x3 = self.block3(x2)
x4 = self.block4(x3)
x5 = self.block5(x4)
x6 = self.block6(x5)
x = self.block20(x6)
x = self.block15(x, x5)
x = self.block14(x, x4)
x = self.block13(x, x3)
x = self.block12(x, x2)
x = self.block11(x, x1)
x = self.block10(x, x0)
x = self.out_conv(x)
x = torch.reshape(x, (batch_size, num_channel_out, 1, num_frame_out, height, width))
return x[:,0,:,:,:,:], x[:,1,:,:,:,:], x[:,2,:,:,:,:], x[:,3,:,:,:,:]
def cuda(self, ):
super(NetA, self).cuda()
self.block0.cuda()
self.block1.cuda()
self.block2.cuda()
self.block3.cuda()
self.block4.cuda()
self.block5.cuda()
self.block6.cuda()
self.block20.cuda()
self.block15.cuda()
self.block14.cuda()
self.block13.cuda()
self.block12.cuda()
self.block11.cuda()
self.block10.cuda()
self.out_conv.cuda()
return self
continuous_data_info_list = np.zeros((num_channel_1, 3), np.float32)
if 1:
continuous_data_info_filepath = os.path.join('../0_data', 'continuous_data_info_all.txt')
c=0
with open(continuous_data_info_filepath) as info_file:
content = info_file.readlines()
for line in content:
cols = line.strip().split('\t')
d_min = int( cols[0])
d_max = int( cols[1])
d_avg = float(cols[2])
continuous_data_info_list[c,:] = (d_min,d_max,d_avg)
c += 1
assert c == num_channel_1
continuous_data_info_list_min = continuous_data_info_list[np.newaxis,:, 0, np.newaxis,np.newaxis,]
continuous_data_info_list_max = continuous_data_info_list[np.newaxis,:, 1, np.newaxis,np.newaxis,]
continuous_output_info_list = np.zeros((3, 2), np.float32)
continuous_output_info_list[0,:] = (130, 350)
continuous_output_info_list[1,:] = (0, 50)
continuous_output_info_list[2,:] = (0, 100)
continuous_output_info_list = continuous_output_info_list[np.newaxis, :, :, np.newaxis,np.newaxis,]
discrete_data_info_list = np.zeros((num_channel_2_src, ), np.uint8)
if 1:
discrete_data_info_filepath = os.path.join('../0_data_heldout/', 'discrete_data_info.txt')
c=0
with open(discrete_data_info_filepath) as info_file:
content = info_file.readlines()
for line in content:
cols = line.strip().split('\t')
num_flag = int(cols[0])
discrete_data_info_list[c] = (num_flag+1)
c += 1
assert c == num_channel_2_src
assert np.sum(discrete_data_info_list) == num_channel_2
cum_num_flag_list = np.zeros((num_channel_2_src, 2), np.uint8)
cc = 0
for c in range(num_channel_2_src):
cum_num_flag_list[c,0] = cc
cc+=discrete_data_info_list[c]
cum_num_flag_list[c,1] = cc
assert cc < 256
if __name__ == '__main__':
COMMON_STRING ='@%s: \n' % os.path.basename(__file__)
COMMON_STRING += '\tset random seed\n'
COMMON_STRING += '\t\tSEED = %d\n'%SEED
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
COMMON_STRING += '\tset cuda environment\n'
COMMON_STRING += '\t\ttorch.__version__ = %s\n'%torch.__version__
COMMON_STRING += '\t\ttorch.version.cuda = %s\n'%torch.version.cuda
COMMON_STRING += '\t\ttorch.backends.cudnn.version() = %s\n'%torch.backends.cudnn.version()
try:
COMMON_STRING += '\t\tos[\'CUDA_VISIBLE_DEVICES\'] = %s\n'%os.environ['CUDA_VISIBLE_DEVICES']
NUM_CUDA_DEVICES = len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))
except Exception:
COMMON_STRING += '\t\tos[\'CUDA_VISIBLE_DEVICES\'] = None\n'
NUM_CUDA_DEVICES = 1
COMMON_STRING += '\t\ttorch.cuda.device_count() = %d\n'%torch.cuda.device_count()
print(COMMON_STRING)
try:
if not os.path.exists(out_dir):
os.makedirs(out_dir)
except Exception:
print('out_dir not made')
exit(-1)
net = NetA().cuda()
asii_logit_m = -torch.logit(torch.from_numpy(np.array(0.003,np.float32)).float().cuda())
assert initial_checkpoint is not None
if 1:
print('Loading ', initial_checkpoint)
state_dict_0 = torch.load(initial_checkpoint, map_location=lambda storage, loc: storage)
net.load_state_dict(state_dict_0, strict=True)
net.eval()
index_list2 = np.arange(num_frame_before * height * width)
def get_data(input_data_1, input_data_2):
input_data_out_3 = np.ones((num_frame_before, num_channel_1, height, width), np.float32)
input_data_out_1 = \
(input_data_1
- continuous_data_info_list_min)\
/ (continuous_data_info_list_max
- continuous_data_info_list_min)
input_data_out_1[input_data_1==65535] = 0
input_data_out_3[input_data_1==65535] = 0
input_data_out_1 = np.moveaxis(input_data_out_1, 1, 0)
input_data_out_3 = np.moveaxis(input_data_out_3, 1, 0)
input_data_2 = input_data_2.astype(np.uint16)
input_data_2 += 1
input_data_2[input_data_2==256] = 0
input_data_2 = input_data_2.astype(np.uint8)
one_hot_list = np.zeros((num_frame_before * height * width, num_channel_2), np.uint8)
for c in range(num_channel_2_src):
one_hot_list[index_list2, cum_num_flag_list[c,0] + input_data_2[:,c,:,:].reshape(-1)] = 1
input_data_out_2 = np.moveaxis(one_hot_list, -1, 0).reshape(num_channel_2, num_frame_before, height, width)
input_data_out = np.concatenate([input_data_out_1, input_data_out_3, input_data_out_2, ], axis=0)
input_data_out = input_data_out.reshape(-1, height, width)
return input_data_out
asii_frame_file_name_prefix = 'S_NWC_ASII-TF_MSG4_Europe-VISIR_'
asii_frame_file_name_prefix_len = len(asii_frame_file_name_prefix)
input_folder_path = input_data_folder_path + '/' + 'test'
num_day_done = 0
for day_folder_name in os.listdir(input_folder_path):
day_folder_path = os.path.join(input_folder_path, day_folder_name)
if os.path.isdir(day_folder_path) == False:
continue
day = int(day_folder_name)
frame_file_name_list = []
for frame_file_name in os.listdir(os.path.join(day_folder_path, 'ASII')):
if frame_file_name.split('.')[-1] != 'nc':
continue
assert frame_file_name[asii_frame_file_name_prefix_len-1] == '_'
assert frame_file_name[asii_frame_file_name_prefix_len+8] == 'T'
frame_file_name_list.append(frame_file_name)
assert len(frame_file_name_list) == num_frame_before
frame_file_name_list = sorted(frame_file_name_list)
min_before = 0
ymd_list = []
for frame_file_name in frame_file_name_list:
ymd = int(frame_file_name[asii_frame_file_name_prefix_len : (asii_frame_file_name_prefix_len+8)])
hour = int(frame_file_name[asii_frame_file_name_prefix_len+9 : (asii_frame_file_name_prefix_len+11)])
minute = int(frame_file_name[asii_frame_file_name_prefix_len+11 : (asii_frame_file_name_prefix_len+13)])
ymd_list.append((ymd, hour, minute))
min_now = (ymd-20190000)*24*60 + hour*60 + minute
assert min_before < min_now
min_before = min_now
file_list=[]
for (ymd, hour, minute) in ymd_list:
file_list.append(\
(os.path.join(input_folder_path, str(day), 'CTTH', 'S_NWC_CTTH_MSG4_Europe-VISIR_' + str(ymd) + ('T%02d%02d%02d' % (hour, minute, 0)) + 'Z.nc'),
os.path.join(input_folder_path, str(day), 'CRR', 'S_NWC_CRR_MSG4_Europe-VISIR_' + str(ymd) + ('T%02d%02d%02d' % (hour, minute, 0)) + 'Z.nc'),
os.path.join(input_folder_path, str(day), 'ASII', 'S_NWC_ASII-TF_MSG4_Europe-VISIR_' + str(ymd) + ('T%02d%02d%02d' % (hour, minute, 0)) + 'Z.nc'),
os.path.join(input_folder_path, str(day), 'CMA', 'S_NWC_CMA_MSG4_Europe-VISIR_' + str(ymd) + ('T%02d%02d%02d' % (hour, minute, 0)) + 'Z.nc'),
os.path.join(input_folder_path, str(day), 'CT', 'S_NWC_CT_MSG4_Europe-VISIR_' + str(ymd) + ('T%02d%02d%02d' % (hour, minute, 0)) + 'Z.nc'),
))
file_list_2=[]
for (ymd, hour, minute) in ymd_list:
file_list_2.append(\
(os.path.join(input_folder_path, str(day), 'CTTH', 'S_NWC_CTTH_MSG2_Europe-VISIR_' + str(ymd) + ('T%02d%02d%02d' % (hour, minute, 0)) + 'Z.nc'),
os.path.join(input_folder_path, str(day), 'CRR', 'S_NWC_CRR_MSG2_Europe-VISIR_' + str(ymd) + ('T%02d%02d%02d' % (hour, minute, 0)) | |
"true"
else:
#we don't have meta, we delete the resource
resource_node = self.get_resource(res_query)
if isinstance(resource_node, dict) and ''.join(resource_node['path']) in self.resources['children']:
# user wants to delete the entire TB
if ''.join(resource_node['path']) in self.resources['children']:
self.resources['children'].pop(resource_node['path'][0])
issaved = self.save_tb(props)
if not issaved:
msg = "We could not save this TB: {}.".format(res_query)
logDebug(msg)
return "*ERROR* " + msg
#user wants to delete a component of the TB
else:
reserved_node = self.get_reserved_resource(res_query, props)
if not reserved_node:
logError('Cannot access reserved resource, path or ID `{}` !'.format(res_query))
return False
#get the direct parent of the resource
base_path = '/'.join(reserved_node['path'][1:-1])
extract_r = self.get_path(base_path, reserved_node)
try:
extract_r['children'].pop(reserved_node['path'][-1])
except:
logError('User {}: Can not find child. Maybe it just was renamed.'.format(user_info[0]))
return False
extract_r['path'] = reserved_node['path'][:-1]
return "true"
@cherrypy.expose
def rename_tb(self, query, new_name, props={}):
"""
Rename a resource.
"""
logDebug('Rename TB `{}`, new name `{}`, props {}.'.format(query, new_name, props))
user_info = self.user_info(props)
if ':' in query:
meta = query.split(':')[1]
query = query.split(':')[0]
else:
meta = ''
_is_res_reserved = self.is_resource_reserved(query, props)
if _is_res_reserved and _is_res_reserved != user_info[0]:
msg = 'User {}: The resource is reserved for {} !'.format(user_info[0], _is_res_reserved)
logWarning(msg)
return '*ERROR* ' + msg
_is_res_locked = self.is_resource_locked(query)
if _is_res_locked and _is_res_locked != user_info[0]:
msg = 'User {}: The resource is locked for {} !'.format(user_info[0], _is_res_locked)
logWarning(msg)
return '*ERROR* ' + msg
if '/' in new_name or ':' in new_name:
logWarning('New resource name cannot contain `/` or `:`!')
return False
# If no resources...
if not self.resources.get('children'):
msg = 'There are no resources defined !'
logError(msg)
return '*ERROR* ' + msg
# Correct node path
result = self.get_reserved_resource(query, props)
if not result:
logWarning('Cannot access reserved TB `{}` !')
return False
if result['path'][-1] == new_name:
logWarning('Nothing to rename for TB `{}` !'.format(new_name))
return True
with self.ren_lock:
# If must rename a Meta info
if meta:
try:
# Modify meta to the parent
if len(result['path']) == 1:
child = result
# Modify to a component
else:
base_path = '/'.join(result['path'][1:])
child = self.get_path(base_path, result)
child['meta'][new_name] = child['meta'].pop(meta)
except:
msg = 'Rename meta `{}` error, for TB `{}`!'.format(meta, result['path'][-1])
logWarning(msg)
return 'false'
return self.save_reserved_tb(query, props)
# If must rename a normal node
else:
# The parent is directly from the root and we want to rename its immediate children
if len(result['path']) == 2:
result['children'][new_name] = result['children'].pop(result['path'][-1])
# The component we want to rename is deep in the tree
elif len(result['path']) > 2:
base_path = '/'.join(result['path'][1:-1])
parent = self.get_path(base_path, result)
if not isinstance(parent, dict):
msg = msg = 'Rename error for TB `{}`, invalid parent \
on {}!'.format(result['path'][-1], result['id'])
logWarning(msg)
return 'false'
parent['children'][new_name] = parent['children'].pop(result['path'][-1])
else:
result['path'] = [new_name]
# Only have to change the current path and the path of the children
result['path'] = [result['path'][0]]
# Recursive update paths
self.change_path(result, result['path'])
return True
@cherrypy.expose
def create_component_tb(self, name, parent=None, props={}):
"""
Create a component for an existing TB.
Return new component's id.
"""
user_info = self.user_info(props)
props = self.valid_props(props)
if parent == '/' or parent == '1':
msg = "The parent value is not an existing TB. Maybe you want to add a new TB. Parent: {}".format(parent)
logError(msg)
return "*ERROR* " + msg
_is_res_reserved = self.is_resource_reserved(parent, props)
if _is_res_reserved and _is_res_reserved != user_info[0]:
msg = 'User {}: The resource is reserved for {} !'.format(user_info[0], _is_res_reserved)
logError(msg)
return '*ERROR* ' + msg
_is_res_locked = self.is_resource_locked(parent)
if _is_res_locked and _is_res_locked != user_info[0]:
msg = 'User {}: The resource is locked for {} !'.format(user_info[0], _is_res_locked)
logError(msg)
return '*ERROR* ' + msg
with self.acc_lock:
#the resource should be reserved previously
parent_p = self.get_reserved_resource(parent, props)
if not parent_p:
msg = "User {}: Could not find this TB: '{}'".format(user_info[0], parent)
logDebug(msg)
return "*ERROR* " + msg
#the resources is deep in the tree, we have to get its direct parent
if len(parent_p['path']) >= 2:
full_path = parent_p['path']
base_path = '/'.join(parent_p['path'][1:])
parent_p = self.get_path(base_path, parent_p)
parent_p['path'] = full_path
if '/' in name:
logDebug('Stripping slash characters from `{}`...'.format(name))
name = name.replace('/', '')
if name in parent_p['children']:
msg = "A component with this name '{}' already exists for this TB: '{}'".format(name, parent)
logDebug(msg)
return "*ERROR* " + msg
# the resource doesn't exist - create it
res_id = self.generate_index()
parent_p['children'][name] = {'id': res_id, 'meta': props, 'children': {}, \
'path': parent_p['path'] + [name]}
return res_id
@cherrypy.expose
def create_new_tb(self, name, parent=None, props={}):
"""
Create new test bed.
Return the id of the new created tb.
"""
user_info = self.user_info(props)
resources = self.resources
if parent != '/' and parent != '1':
msg = "The parent value is not root. Maybe you want to add a component\
to an existing SUT. Parent: {}".format(parent)
logError(msg)
return "*ERROR* " + msg
props = self.valid_props(props)
with self.acc_lock:
# root can not be reserved so we just take it
parent_p = self.get_resource('/', resources)
if not parent_p or isinstance(parent_p, str):
logFull("User: {} no result for query `{}`" .format(user_info[0], parent))
return None
if '/' in name:
logDebug('Stripping slash characters from `{}`...'.format(name))
name = name.replace('/', '')
if name in self.resources['children']:
msg = "User {}: A TB with name `{}` already exists!".format(user_info[0], name)
logDebug(msg)
return "*ERROR* " + msg
# the resource doesn't exist - create it
res_id = self.generate_index()
parent_p['children'][name] = {'id': res_id, 'meta': props, 'children': {}, 'path': [name]}
issaved = self.save_tb(props)
if not issaved:
msg = "User {}: Could not save TB `{}`".format(user_info[0], name)
logDebug(msg)
return "*ERROR* " + msg
return res_id
@cherrypy.expose
def update_meta_tb(self, name, parent=None, props={}):
"""
Modify a resource, using a name, a parent Path or ID and some properties.
"""
logDebug('parent = {} -- props = {} -- name = {}'.format(parent, props, name))
user_info = self.user_info(props)
resources = self.resources
props = self.valid_props(props)
if not props or not self.valid_props(props):
msg = "Wrong format for props = {}".format(props)
logDebug(msg)
return "*ERROR* " + msg
if parent == '/' or parent == "1":
#we can not reserve the root so we just take the TB we need
if name[0] != '/':
name = '/' + name
verify_reserved = name
else:
#take the TB that has the component we need
verify_reserved = parent
_is_res_reserved = self.is_resource_reserved(verify_reserved, props)
if _is_res_reserved and _is_res_reserved != user_info[0]:
msg = 'User {}: The resource is reserved for {} !'.format(user_info[0], _is_res_reserved)
logError(msg)
return '*ERROR* ' + msg
_is_res_locked = self.is_resource_locked(verify_reserved)
if _is_res_locked and _is_res_locked != user_info[0]:
msg = 'User {}: Reserve resource: The resource is locked for {} !'.format(user_info[0], _is_res_locked)
logError(msg)
return '*ERROR* ' + msg
with self.acc_lock:
l_props = dict(props)
if '__user' in l_props:
del l_props['__user']
# If this is the root resource, update the properties
if name == '/' and parent == '/':
resources['meta'].update(l_props)
# Write changes for Device or SUT
issaved = self.save_tb(props)
if not issaved:
msg = "User {}: We didnt save this entry = {} having props = {}".format(user_info[0], name, props)
logDebug(msg)
return "*ERROR* " + msg
return "true"
parent_p = self.get_reserved_resource(verify_reserved, props)
if not parent_p:
logError('User {}: Cannot access reserved resource `{}` !'.format(user_info[0], verify_reserved))
return False
#the resources is deep in the tree, we have to get its direct parent
if len(parent_p['path']) >= 2:
full_path = parent_p['path']
base_path = '/'.join(parent_p['path'][1:])
parent_p = self.get_path(base_path, parent_p)
parent_p['path'] = full_path
if '/' in name:
logDebug('User {}: Stripping slash characters from `{}`...'.format(user_info[0], name))
name = name.replace('/', '')
# the resource exists
if name in parent_p['children']:
child_p = parent_p['children'][name]
elif name in parent_p['path']:
child_p = parent_p
else:
return "User {}: *ERROR* the resource {} can not be found!".format(user_info[0], name)
# We have to update the props
child_p['meta'].update(l_props)
return "true"
@cherrypy.expose
def set_tb(self, name, parent=None, props={}):
"""
Higher level wrapper over functions Create new TB, create component and update meta.
"""
pdata = self.get_resource(parent)
user_info = self.user_info(props)
if not isinstance(pdata, dict):
logWarning('User `{}`: No such parent `{}`!'.format(user_info[0], parent))
return False
if (parent == '/' or parent == '1') and name not in pdata['children']:
return self.create_new_tb(name, parent, props)
# If exists, update meta
if name in pdata['children']:
return self.update_meta_tb(name, parent, props)
# This | |
t_seq = torch.stack(tmp, 1)
t_seq = t_seq.view(self.num_seq, self.seq_len,
C, H, W).transpose(1, 2)
#print (vpath, vpath.split('/'))
try:
#print ('try', vpath, vpath.split('/'))
vname = vpath.split('/')[-2]
#print (vname)
vid = self.encode_action(int(vname))
except:
#print ('except', vpath)
vname = vpath.split('/')[-3]
#print (vname)
vid = self.encode_action(int(vname))
label = torch.LongTensor([vid])
# OLD: return sequence only
# return t_seq
# NEW: return all useful information in a dictionary
result = {'t_seq': t_seq, 'idx_block': idx_block, 'vpath': vpath}
return result
def __len__(self):
return len(self.video_info) * self.num_sample
def encode_action(self, action_name):
'''give action name, return category'''
return self.action_dict_encode[action_name]
def decode_action(self, action_code):
'''give action code, return action name'''
return self.action_dict_decode[action_code]
class block_toy_imagenet(data.Dataset):
'''
mode: train or test split
seq_len: number of frames in a video block
num_seq: number of video block
downsample: temporal downsample rate of frames
num_sample: number of 'sequence of video blocks' sampled from one video
drive: where the data is located
num: which block toy tier to use
'''
def __init__(self,
mode='train',
num=1,
transform=None,
seq_len=1,
num_seq=2,
downsample=1,
drive='ssd',
num_sample=5):
print('-- WARNING! -- using obsolete dataset class, see utils/dataset_epic.py and dataset_other.py instead')
self.mode = mode
self.num = num
self.transform = transform
self.seq_len = seq_len
self.num_seq = num_seq
self.downsample = downsample
self.drive = drive
self.num_sample = num_sample # number of sequences sampled from one video
# splits
if mode == 'train':
split = '/proj/vondrick/lovish/data/block_toy/block_toy_imagenet_' + \
num + '/train_split_%s.csv' % self.drive
video_info = pd.read_csv(split, header=None)
elif (mode == 'val') or (mode == 'test'):
# use test for val, temporary
split = '/proj/vondrick/lovish/data/block_toy/block_toy_imagenet_' + \
num + '/test_split_%s.csv' % self.drive
video_info = pd.read_csv(split, header=None)
else:
raise ValueError('wrong mode')
# get action list [here just the values of y]
self.action_dict_encode = {}
self.action_dict_decode = {}
action_file = os.path.join(
'/proj/vondrick/lovish/data/block_toy', 'classInd.txt')
action_df = pd.read_csv(action_file, sep=' ', header=None)
for _, row in action_df.iterrows():
act_id, act_name = row
act_id = int(act_id) - 1 # let id start from 0
self.action_dict_decode[act_id] = act_name
self.action_dict_encode[act_name] = act_id
# for k,v in self.action_dict_encode.items():
# print (k,v, type(k), type(v))
# filter out too short videos
# although we just require 2 sequences of length 1 here
# practically no sequence will be dropped as there
drop_idx = []
for idx, row in video_info.iterrows():
vpath, vlen = row
if vlen - self.num_seq * self.seq_len * self.downsample <= 0:
drop_idx.append(idx)
self.video_info = video_info.drop(drop_idx, axis=0)
# [part of the original code] why do we need it ?
if mode == 'val':
self.video_info = self.video_info.sample(frac=1.0)
def idx_sampler(self, vlen, vpath):
'''sample index from a video'''
if vlen - self.num_seq * self.seq_len * self.downsample <= 0:
return [None]
n = 1
# if self.mode == 'test':
# # all possible frames with downsampling
# seq_idx_block = np.arange(0, vlen, self.downsample)
# return [seq_idx_block, vpath]
start_idx = np.random.choice(
range(vlen - self.num_seq * self.seq_len * self.downsample), n)
#print ("start_idx:", start_idx)
seq_idx = np.expand_dims(
np.arange(self.num_seq), -1) * self.downsample * self.seq_len + start_idx
#print ("seq_idx:", seq_idx)
seq_idx_block = seq_idx + \
np.expand_dims(np.arange(self.seq_len), 0) * self.downsample
#print ("seq_idx_block:", seq_idx_block)
return [seq_idx_block, vpath]
def __getitem__(self, index):
vpath, vlen = self.video_info.iloc[index // self.num_sample]
items = self.idx_sampler(vlen, vpath)
if items is None:
print(vpath)
idx_block, vpath = items
assert idx_block.shape == (self.num_seq, self.seq_len)
idx_block = idx_block.reshape(self.num_seq * self.seq_len)
#print ("idx_block, vpath: ", idx_block, vpath)
seq = [pil_loader(os.path.join(vpath, 'image_%05d.jpg' % (i + 1)))
for i in idx_block]
# do we need it here
t_seq = self.transform(seq) # apply same transform
num_crop = None
try:
(C, H, W) = t_seq[0].size()
t_seq = torch.stack(t_seq, 0)
except:
(C, H, W) = t_seq[0][0].size()
tmp = [torch.stack(i, 0) for i in t_seq]
assert len(tmp) == 5
num_crop = 5
t_seq = torch.stack(tmp, 1)
t_seq = t_seq.view(self.num_seq, self.seq_len,
C, H, W).transpose(1, 2)
#print (vpath, vpath.split('/'))
try:
#print ('try', vpath, vpath.split('/'))
vname = vpath.split('/')[-2]
#print (vname)
vid = self.encode_action(int(vname))
except:
#print ('except', vpath)
vname = vpath.split('/')[-3]
#print (vname)
vid = self.encode_action(int(vname))
label = torch.LongTensor([vid])
# OLD: return sequence only
# return t_seq
# NEW: return all useful information in a dictionary
result = {'t_seq': t_seq, 'idx_block': idx_block, 'vpath': vpath}
return result
def __len__(self):
return len(self.video_info) * self.num_sample
def encode_action(self, action_name):
'''give action name, return category'''
return self.action_dict_encode[action_name]
def decode_action(self, action_code):
'''give action code, return action name'''
return self.action_dict_decode[action_code]
class Kinetics400_full_3d(data.Dataset):
def __init__(self,
mode='train',
transform=None,
seq_len=10,
num_seq=5,
downsample=3,
epsilon=5,
unit_test=False,
big=False,
return_label=False,
drive='hdd'):
print('-- WARNING! -- using obsolete dataset class, see utils/dataset_epic.py and dataset_other.py instead')
self.mode = mode
self.transform = transform
self.drive = drive
# number of frames in one pack
self.seq_len = seq_len
# number of sequences of frames
self.num_seq = num_seq
# choose the nth frame
self.downsample = downsample
# no use in the code
self.epsilon = epsilon
# check what this does
self.unit_test = unit_test
# return video label along with frames
self.return_label = return_label
# which dataset to use
if big:
print('Using Kinetics400 full data (256x256)')
else:
print('Using Kinetics400 full data (150x150)')
# get action list
self.action_dict_encode = {}
self.action_dict_decode = {}
# where to get the action_file
action_file = os.path.join(
'/proj/vondrick/lovish/data/kinetics-partial', 'classInd.txt')
# read as a python dataframes
action_df = pd.read_csv(action_file, sep=' ', header=None)
# action name to id's and vice versa
for _, row in action_df.iterrows():
act_id, act_name = row
# ids already start from 0.
act_id = int(act_id) - 1 # let id start from 0
self.action_dict_decode[act_id] = act_name
self.action_dict_encode[act_name] = act_id
# splits
# change the directories
if big:
if mode == 'train':
split = '/proj/vondrick/lovish/data/kinetics400_256/train_split_%s.csv' % self.drive
video_info = pd.read_csv(split, header=None)
elif (mode == 'val') or (mode == 'test'):
split = '/proj/vondrick/lovish/data/kinetics400_256/val_split_%s.csv' % self.drive
video_info = pd.read_csv(split, header=None)
else:
raise ValueError('wrong mode')
else: # small
if mode == 'train':
split = '/proj/vondrick/lovish/data/kinetics-partial/train_split_%s.csv' % self.drive
video_info = pd.read_csv(split, header=None)
elif (mode == 'val') or (mode == 'test'):
split = '/proj/vondrick/lovish/data/kinetics-partial/val_split_%s.csv' % self.drive
video_info = pd.read_csv(split, header=None)
else:
raise ValueError('wrong mode')
# drop videos which are smaller than what we can afford
drop_idx = []
print('filter out too short videos ...')
for idx, row in tqdm(video_info.iterrows(), total=len(video_info)):
vpath, vlen = row
# if number of frames less than 150 or 5 sec.
if vlen - self.num_seq * self.seq_len * self.downsample <= 0:
drop_idx.append(idx)
self.video_info = video_info.drop(drop_idx, axis=0)
if mode == 'val':
self.video_info = self.video_info.sample(
frac=0.3, random_state=666)
if self.unit_test:
self.video_info = self.video_info.sample(32, random_state=666)
# shuffle not necessary because use RandomSampler
def idx_sampler(self, vlen, vpath):
'''sample index from a video'''
# if video too short return None
if vlen - self.num_seq * self.seq_len * self.downsample <= 0:
return [None]
n = 1
# choose a start frame
start_idx = np.random.choice(
range(vlen - self.num_seq * self.seq_len * self.downsample), n)
# get a sequence of frames to start with
seq_idx = np.expand_dims(
np.arange(self.num_seq), -1) * self.downsample * self.seq_len + start_idx
# indices of consecutive frames
seq_idx_block = seq_idx + \
np.expand_dims(np.arange(self.seq_len), 0) * self.downsample
return [seq_idx_block, vpath]
def __getitem__(self, index):
# get the path and number of frames in the video
vpath, vlen = self.video_info.iloc[index]
# sample the starting frame randomly
items = self.idx_sampler(vlen, vpath)
if items is None:
print(vpath)
# takes vpath, returns vpath
idx_block, vpath = items
assert idx_block.shape == (self.num_seq, self.seq_len)
idx_block = idx_block.reshape(self.num_seq * self.seq_len)
# load all the frames needed to complete the sequence
seq = [pil_loader(os.path.join(vpath, 'image_%05d.jpg' % (i + 1)))
for i in idx_block]
t_seq = self.transform(seq) # apply same transform
# each part is a RGB image
(C, H, W) = t_seq[0].size()
t_seq = torch.stack(t_seq, 0)
t_seq = t_seq.view(self.num_seq, self.seq_len, C, H, W).transpose(1, 2)
if self.return_label:
try:
vname = vpath.split('/')[-3]
vid = self.encode_action(vname)
except:
vname = vpath.split('/')[-2]
vid = self.encode_action(vname)
label = torch.LongTensor([vid])
# OLD: return sequence only
# return t_seq, label
# NEW: return all useful information in a dictionary
result = {'t_seq': t_seq, 'label': label,
'idx_block': idx_block, 'vpath': vpath}
return result
# OLD: return sequence only
# return t_seq
# NEW: return all useful information in a dictionary
result = {'t_seq': t_seq, 'idx_block': idx_block, 'vpath': vpath}
return result
def __len__(self):
return len(self.video_info)
def encode_action(self, action_name):
| |
#! /bin/python
# jessehogandeliamariahogan
#vim: set ts=4 sw=4 et
import copy
import os
import curses
import time
import sys
false=False
true=True
class char:
def __init__(self, x, y, char):
self.x = x
self.y = y
self.letter = char
def str(self):
return "%s,%s %s" % (self.x, self.y, self.letter)
class board:
def __init__(self):
# create matrix with 5 elements for both dimentions
# to hold char objects
self.chars=[None]*5
for i in range(5):
self.chars[i] = [None] * 5
y=0
if false:
for line in str.split("\n"):
x=0
for c in line:
self.chars[y][x] = char(x, y, c)
x += 1
y += 1
def isvalid(self):
for x in range(5):
for y in range(5):
c = self.getchar(x,y)
if c == None or (c.letter.isupper() and c.letter != 'Q'):
return false
return true
def getchar(self, x, y):
return self.chars[y][x]
def setchar(self, x, y, c):
self.chars[y][x] = char(x, y, c)
def str(self):
r=''
for w in self.words:
r+= "%s\n" % w.str()
return r
class word:
def __init__(self):
self.word=[]
def str(self):
r=""
for c in self.word:
l = c.letter
if l == 'Q':
l='qu'
r+=l
return r
def append(self, c):
self.word.append(c)
def contains(self, char):
for c in self.word:
if c is char:
return True
return False
def pop(self):
self.word.pop()
def len(self):
return len(self.word)
def graph(self, board):
r=""
for x in range(5):
for y in range(5):
c = board.getchar(x,y)
inword=false
for c0 in self.word:
if c.x == c0.x and c.y == c0.y:
r += c.letter.upper()
inword=true
break
if not inword:
r += c.letter.lower()
r += "\n"
return r
class words:
def __init__(self):
self.words=[]
def clear(self):
self.words=[]
def append(self, word):
self.words.append(word)
self.raiseonappend(word)
def raiseonappend(self, word):
self.onappend(word)
def uniqappend(self, word):
if not self.contains(word):
self.append(word)
def contains(self, word):
for w in self.words:
if word.str() == w.str():
return true
return false
def str(self):
r=''
for w in self.words:
r+= "%s\n" % w.str()
return r
def graph(self, board):
r=''
for w in self.words:
r += "%s\n\n\n\n" % w.graph(board)
return r
def sort(self):
new=[]
smalllist=copy.copy(self.words)
lennew = len(new)
lenwords = len(self.words)
while lennew < lenwords:
smallest=None
for w in smalllist:
if smallest == None or w.len() < smallest:
smallest = w.len()
smallestword = w
smalllist.remove(smallestword)
new.append(smallestword)
lennew += 1
new.reverse()
self.words=new
class finger:
def __init__(self, board):
self.b=board
self.word=word()
self.reset()
def raiseonboardupd(self):
self.onboardupd(self)
def reset(self):
self.startchar=None
def nextstart(self):
if self.startchar == None:
self.startchar = self.b.getchar(0,0)
else:
x=self.startchar.x
y=self.startchar.y
if x < 4:
x += 1
elif y < 4:
x = 0
y += 1
else:
return false # we would be at the end
self.startchar = self.b.getchar(x,y)
self.x=self.startchar.x
self.y=self.startchar.y
#print "starting at (%s,%s)" % (self.x, self.y)
self.word=word()
self.word.append(self.b.getchar(self.x, self.y))
return true
def mv(self, direction):
xincr=0
yincr=0
d0=direction[0]
if len(direction) == 2:
if direction[1] == 'l':
xincr=-1
else:
xincr=1 # assume 'r'
if d0 == 'u':
yincr=-1
elif d0 == 'd':
yincr=1
elif d0 == 'l':
xincr=-1
elif d0 == 'r':
xincr=1
prevchar = self.b.getchar(self.x, self.y)
self.x = self.x + xincr
self.y = self.y + yincr
if self.x < 0 or self.y < 0 or self.x > 4 or self.y > 4:
self.x=prevchar.x
self.y=prevchar.y
return false
char = self.b.getchar(self.x, self.y)
if self.word.contains(char):
self.x=prevchar.x
self.y=prevchar.y
return False
self.word.append(char)
return true
def curchar(self):
return self.b.getchar(self.x, self.y)
def revert(self):
self.word.word.pop()
if len(self.word.word) > 0:
c = self.word.word[-1]
self.x = c.x
self.y = c.y
else:
self.x = None
self.y = None
def strword(self):
r=""
for i in range(self.word.len()):
l=self.word.word[i].letter
if l == 'Q':
l='qu'
r += l
return r
def str(self):
r=""
for y in range(5):
for x in range(5):
char = self.b.getchar(x,y)
letter = char.letter
for c in self.word.word:
if c is char:
letter = letter.upper()
r += letter + ' '
r += "\n"
return r
class boogler:
def __init__(self, dict, board):
self.words=words()
self.dict = dict
self.b = board
self.f = finger(self.b)
self.depth = 0
def find(self):
self.words.clear()
self.f.reset()
while self.f.nextstart():
self.find_()
def find_(self):
#print "depth: %s" % self.depth
self.depth +=1
if self.dict.startswith(self.f.strword()):
for d in ('d', 'u', 'l', 'r', 'dl', 'dr', 'ul', 'ur'):
if self.f.mv(d):
#self.f.raiseonboardupd()
#print self.f.str()
strword = self.f.strword()
if len(strword) > 3:
#print "--reverting--"
#print self.f.str()
if self.dict.find(strword):
self.words.uniqappend(copy.deepcopy(self.f.word))
self.find_()
self.f.revert()
self.depth -=1
def str(self):
return self.words.str()
def graph(self):
return self.words.graph(self.b)
class dict:
def __init__(self, file):
self.d={}
self.l=[]
f = open(file)
try:
for w in f:
if w[0].islower():
self.d[w.rstrip()] = ''
self.l.append(w.rstrip())
self.l.sort()
finally:
f.close()
def find(self, k):
return (k in self.d)
def len(self):
return len(self.d)
def startswith(self, str):
hi=len(self.l)
lo=0
while lo < hi:
mid = (lo+hi)//2
word=self.l[mid]
if word.startswith(str):
return true
elif str < word:
hi = mid
else:
lo = mid+1
class cscr:
def cboard_onenter(self, obj):
if self.board.isvalid():
self.msgstat("finding")
self.boogler.find()
self.msgstat("sorting")
self.boogler.words.sort()
self.msgstat("displaying")
self.wrdlst.refresh(None)
self.msgstat(None)
def wrdlst_onchange(self, c):
self.cboard.graph(c.word)
def __init__(self):
self.stdscr = curses.initscr()
curses.start_color()
curses.noecho()
curses.cbreak()
self.stdscr.keypad(1)
def run(self):
self.msgstat("loading dictionary")
d = dict('/usr/share/dict')
self.msgstat()
self.board = board()
self.boogler = boogler(d, self.board)
self.cwidgets=cwidgets()
# widget: cboard
cb=cboard(self, self.boogler)
cb.top = 3
cb.left = 3
self.cwidgets.append(cb)
self.cboard = cb
cb.onenter = self.cboard_onenter
# widget: wrdlst
h=self.stdscr.getmaxyx()[0]-3
w=self.stdscr.getmaxyx()[1]-20
wl = wrdlst(self, self.boogler, 3, 15, w, h )
wl.onchange = self.wrdlst_onchange
self.boogler.words.onappend=wl.onappend
self.cwidgets.append(wl)
self.wrdlst=wl
self.cwidgets.show()
def msgstat(self, msg=None):
self.stdscr.addstr(0,0, ' ' * 40)
if msg != None:
self.stdscr.addstr(0,0, msg, curses.A_REVERSE)
self.stdscr.refresh()
def destroy(self):
self.stdscr.keypad(0)
curses.echo()
curses.nocbreak()
curses.endwin()
class cwidgets:
def __init__(self):
self.widgets=[]
self.TAB='\t'
self._curwidget=None
def firstwidget(self):
for w in self.widgets:
if w.tabord == 0:
return w
def lastwidget(self):
maxwidget = self.firstwidget()
for w in self.widgets:
if w.tabord > maxwidget.tabord:
maxwidget = w
return maxwidget.tabord
def curwidget(self, v=None):
if v!=None:
self._curwidget=v
if self._curwidget == None:
self._curwidget = self.firstwidget()
return self._curwidget
def show(self):
while 1:
w=self.curwidget()
r = w.setfocus()
if r == ord("\t"):
w = self.curwidget(self.next())
# fixme
if r == "":
w = self.curwidget(self.prev())
def prev(self):
curtab = self.curwidget().tabord
for w in self.widgets:
if curtab - 1 == w.tabord:
return w
return self.lastwidget()
def next(self):
curtab = self.curwidget().tabord
for w in self.widgets:
if curtab + 1 == w.tabord:
return w
return self.firstwidget()
def append(self, w):
if w.tabord == None:
w.tabord = self.maxtab() + 1
self.widgets.append(w)
def maxtab(self):
max=-1
for w in self.widgets:
if max < w.tabord:
max = w.tabord
return max
class cwidget:
def __init__(self, cscr):
self.cscr=cscr
self.stdscr=cscr.stdscr
self.tabord=None
class cboard(cwidget):
def __init__(self, cscr, boogler):
self.x=0
self.y=0
self.cmdmode=false
self.top=self.left=0
self.board=boogler.b
self.boogler=boogler
cwidget.__init__(self, cscr)
boogler.f.onboardupd=self.onboardupd
def clean(self):
self.x=self.y=0
done=false
while not done:
c = self.board.getchar(self.x,self.y)
self.stdscr.addstr(self.offy(), self.offx(), c.letter)
self.mvnext()
done = (self.y==0 and self.x==0)
def cx(self):
return self.left + self.x
def cy(self):
return self.top + self.y
def setfocus(self):
while 1:
c = self.stdscr.getch(self.cy(), self.cx())
if not self.cmdmode:
if c == curses.KEY_LEFT:
self.mv('l')
elif c == curses.KEY_RIGHT:
self.mv('r')
elif c == curses.KEY_DOWN:
self.mv('d')
elif c == curses.KEY_UP:
self.mv('u')
elif c == 263: # BS
self.backspace()
elif c == 27: # ESC
self.cmdmode=true
elif c == ord("\n"):
self.onenter(self)
elif c == ord("\t"):
return c
elif c in (range(97, 123) + [81]): # [a-zQ]
self.stdscr.addstr(self.cy(), self.cx(), chr(c))
self.board.setchar(self.x, self.y, chr(c))
if self.board.isvalid():
self.cscr.msgstat()
else:
self.cscr.msgstat('board invalid')
self.boogler.f.reset()
self.mvnext()
else:
if c in (curses.KEY_LEFT, ord('h')):
self.mv('l')
elif c in (curses.KEY_RIGHT, ord('l')):
self.mv('r')
elif c in (curses.KEY_DOWN, ord('j')):
self.mv('d')
elif c in (curses.KEY_UP, ord('k')):
self.mv('u')
elif c == ord('a'):
self.mvnext()
self.cmdmode=false
elif c == ord('i'):
self.cmdmode=false
def mvnext(self):
if self.x < 4:
self.mv('r')
else:
self.enter()
def enter(self):
if self.y < 4:
self.x=0
self.y+=1
elif self.x == 4:
self.x=self.y=0
def backspace(self):
if self.x>0:
self.x -= 1
elif self.y>0:
self.x=4
self.y -=1
def mv(self, d):
xincr=yincr=0
if d == 'u':
yincr=-1
elif d == 'd':
yincr=1
elif d == 'l':
xincr=-1
elif d == 'r':
xincr=1
self.x = self.x + xincr
self.y = self.y + yincr
if self.x < 0 or self.y < 0 or self.x > 4 or self.y > 4:
self.x = self.x - xincr
self.y = self.y - yincr
def graph(self, w):
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(6, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
for y in range(5):
for x in range(5):
sx=x+3
sy=y+3
c = self.board.getchar(x,y)
inword=false
for c0 in | |
Approximation([group_1, group_other])
**Summing Up**
When you have created all the groups they need to pass all the groups to :class:`Approximation`.
It does not accept any other parameter rather than `groups`
.. code:: python
>>> approx = Approximation(my_groups)
See Also
--------
:class:`Approximation`
References
----------
- <NAME>., & <NAME>. (2014).
`Auto-Encoding Variational Bayes. stat, 1050, 1. <https://arxiv.org/abs/1312.6114>`_
"""
# needs to be defined in init
shared_params = None
symbolic_initial = None
replacements = None
input = None
# defined by approximation
supports_batched = True
has_logq = True
# some important defaults
initial_dist_name = 'normal'
initial_dist_map = 0.
# for handy access using class methods
__param_spec__ = dict()
short_name = ''
alias_names = frozenset()
__param_registry = dict()
__name_registry = dict()
@classmethod
def register(cls, sbcls):
assert frozenset(sbcls.__param_spec__) not in cls.__param_registry, 'Duplicate __param_spec__'
cls.__param_registry[frozenset(sbcls.__param_spec__)] = sbcls
assert sbcls.short_name not in cls.__name_registry, 'Duplicate short_name'
cls.__name_registry[sbcls.short_name] = sbcls
for alias in sbcls.alias_names:
assert alias not in cls.__name_registry, 'Duplicate alias_name'
cls.__name_registry[alias] = sbcls
return sbcls
@classmethod
def group_for_params(cls, params):
if pm.variational.flows.seems_like_flow_params(params):
return pm.variational.approximations.NormalizingFlowGroup
if frozenset(params) not in cls.__param_registry:
raise KeyError('No such group for the following params: {!r}, '
'only the following are supported\n\n{}'
.format(params, cls.__param_registry))
return cls.__param_registry[frozenset(params)]
@classmethod
def group_for_short_name(cls, name):
if pm.variational.flows.seems_like_formula(name):
return pm.variational.approximations.NormalizingFlowGroup
if name.lower() not in cls.__name_registry:
raise KeyError('No such group: {!r}, '
'only the following are supported\n\n{}'
.format(name, cls.__name_registry))
return cls.__name_registry[name.lower()]
def __new__(cls, group=None, vfam=None, params=None, *args, **kwargs):
if cls is Group:
if vfam is not None and params is not None:
raise TypeError('Cannot call Group with both `vfam` and `params` provided')
elif vfam is not None:
return super().__new__(cls.group_for_short_name(vfam))
elif params is not None:
return super().__new__(cls.group_for_params(params))
else:
raise TypeError('Need to call Group with either `vfam` or `params` provided')
else:
return super().__new__(cls)
def __init__(self, group,
vfam=None,
params=None,
random_seed=None,
model=None,
local=False,
rowwise=False,
options=None,
**kwargs):
if local and not self.supports_batched:
raise LocalGroupError('%s does not support local groups' % self.__class__)
if local and rowwise:
raise LocalGroupError('%s does not support local grouping in rowwise mode')
if isinstance(vfam, str):
vfam = vfam.lower()
if options is None:
options = dict()
self.options = options
self._vfam = vfam
self._local = local
self._batched = rowwise
self._rng = tt_rng(random_seed)
model = modelcontext(model)
self.model = model
self.group = group
self.user_params = params
self._user_params = None
# save this stuff to use in __init_group__ later
self._kwargs = kwargs
if self.group is not None:
# init can be delayed
self.__init_group__(self.group)
@classmethod
def get_param_spec_for(cls, **kwargs):
res = dict()
for name, fshape in cls.__param_spec__.items():
res[name] = tuple(eval(s, kwargs) for s in fshape)
return res
def _check_user_params(self, **kwargs):
R"""*Dev* - checks user params, allocates them if they are correct, returns True.
If they are not present, returns False
Parameters
----------
kwargs : special kwargs needed sometimes
Returns
-------
bool indicating whether to allocate new shared params
"""
user_params = self.user_params
if user_params is None:
return False
if not isinstance(user_params, dict):
raise TypeError('params should be a dict')
givens = set(user_params.keys())
needed = set(self.__param_spec__)
if givens != needed:
raise ParametrizationError(
'Passed parameters do not have a needed set of keys, '
'they should be equal, got {givens}, needed {needed}'.format(
givens=givens, needed=needed))
self._user_params = dict()
spec = self.get_param_spec_for(d=self.ddim, **kwargs.pop('spec_kw', {}))
for name, param in self.user_params.items():
shape = spec[name]
if self.local:
shape = (-1, ) + shape
elif self.batched:
shape = (self.bdim, ) + shape
self._user_params[name] = tt.as_tensor(param).reshape(shape)
return True
def _initial_type(self, name):
R"""*Dev* - initial type with given name. The correct type depends on `self.batched`
Parameters
----------
name : str
name for tensor
Returns
-------
tensor
"""
if self.batched:
return tt.tensor3(name)
else:
return tt.matrix(name)
def _input_type(self, name):
R"""*Dev* - input type with given name. The correct type depends on `self.batched`
Parameters
----------
name : str
name for tensor
Returns
-------
tensor
"""
if self.batched:
return tt.matrix(name)
else:
return tt.vector(name)
@change_flags(compute_test_value='off')
def __init_group__(self, group):
if not group:
raise GroupError('Got empty group')
if self.group is None:
# delayed init
self.group = group
if self.batched and len(group) > 1:
if self.local: # better error message
raise LocalGroupError('Local groups with more than 1 variable are not supported')
else:
raise BatchedGroupError('Batched groups with more than 1 variable are not supported')
self.symbolic_initial = self._initial_type(
self.__class__.__name__ + '_symbolic_initial_tensor'
)
self.input = self._input_type(
self.__class__.__name__ + '_symbolic_input'
)
# I do some staff that is not supported by standard __init__
# so I have to to it by myself
self.ordering = ArrayOrdering([])
self.replacements = dict()
self.group = [get_transformed(var) for var in self.group]
for var in self.group:
if isinstance(var.distribution, pm.Discrete):
raise ParametrizationError('Discrete variables are not supported by VI: {}'
.format(var))
begin = self.ddim
if self.batched:
if var.ndim < 1:
if self.local:
raise LocalGroupError('Local variable should not be scalar')
else:
raise BatchedGroupError('Batched variable should not be scalar')
self.ordering.size += (np.prod(var.dshape[1:])).astype(int)
if self.local:
shape = (-1, ) + var.dshape[1:]
else:
shape = var.dshape
else:
self.ordering.size += var.dsize
shape = var.dshape
end = self.ordering.size
vmap = VarMap(var.name, slice(begin, end), shape, var.dtype)
self.ordering.vmap.append(vmap)
self.ordering.by_name[vmap.var] = vmap
vr = self.input[..., vmap.slc].reshape(shape).astype(vmap.dtyp)
vr.name = vmap.var + '_vi_replacement'
self.replacements[var] = vr
self.bij = DictToArrayBijection(self.ordering, {})
def _finalize_init(self):
"""*Dev* - clean up after init
"""
del self._kwargs
local = property(lambda self: self._local)
batched = property(lambda self: self._local or self._batched)
@property
def params_dict(self):
# prefixed are correctly reshaped
if self._user_params is not None:
return self._user_params
else:
return self.shared_params
@property
def params(self):
# raw user params possibly not reshaped
if self.user_params is not None:
return collect_shared_to_list(self.user_params)
else:
return collect_shared_to_list(self.shared_params)
def _new_initial_shape(self, size, dim, more_replacements=None):
"""*Dev* - correctly proceeds sampling with variable batch size
Parameters
----------
size : scalar
sample size
dim : scalar
latent fixed dim
more_replacements : dict
replacements for latent batch shape
Returns
-------
shape vector
"""
if self.batched:
bdim = tt.as_tensor(self.bdim)
bdim = theano.clone(bdim, more_replacements)
return tt.stack([size, bdim, dim])
else:
return tt.stack([size, dim])
@node_property
def bdim(self):
if not self.local:
if self.batched:
return self.ordering.vmap[0].shp[0]
else:
return 1
else:
return next(iter(self.params_dict.values())).shape[0]
@node_property
def ndim(self):
return self.ordering.size * self.bdim
@property
def ddim(self):
return self.ordering.size
def _new_initial(self, size, deterministic, more_replacements=None):
"""*Dev* - allocates new initial random generator
Parameters
----------
size : scalar
sample size
deterministic : bool or scalar
whether to sample in deterministic manner
more_replacements : dict
more replacements passed to shape
Notes
-----
Suppose you have a AEVB setup that:
- input `X` is purely symbolic, and `X.shape[0]` is needed to `initial` second dim
- to perform inference, `X` is replaced with data tensor, however, since `X.shape[0]` in `initial`
remains symbolic and can't be replaced, you get `MissingInputError`
- as a solution, here we perform a manual replacement for the second dim in `initial`.
Returns
-------
tensor
"""
if size is None:
size = 1
if not isinstance(deterministic, tt.Variable):
deterministic = np.int8(deterministic)
dim, dist_name, dist_map = (
self.ddim,
self.initial_dist_name,
self.initial_dist_map
)
dtype = self.symbolic_initial.dtype
dim = tt.as_tensor(dim)
size = tt.as_tensor(size)
shape = self._new_initial_shape(size, dim, more_replacements)
# apply optimizations if possible
if not isinstance(deterministic, tt.Variable):
if deterministic:
return tt.ones(shape, dtype) * dist_map
else:
return getattr(self._rng, dist_name)(shape)
else:
sample = getattr(self._rng, dist_name)(shape)
initial = tt.switch(
deterministic,
tt.ones(shape, dtype) * dist_map,
sample
)
return initial
@node_property
def symbolic_random(self):
"""*Dev* - abstract node that takes `self.symbolic_initial` and creates
approximate posterior that is parametrized with `self.params_dict`.
Implementation should take in account `self.batched`. If `self.batched` is `True`, then
`self.symbolic_initial` is 3d tensor, else 2d
Returns
-------
tensor
"""
raise NotImplementedError
@node_property
def symbolic_random2d(self):
"""*Dev* - `self.symbolic_random` flattened to matrix"""
if self.batched:
return self.symbolic_random.flatten(2)
else:
return self.symbolic_random
@change_flags(compute_test_value='off')
def set_size_and_deterministic(self, node, s, d, more_replacements=None):
"""*Dev* - after node is sampled via :func:`symbolic_sample_over_posterior` or
:func:`symbolic_single_sample` new random generator can be allocated and applied to node
Parameters
----------
node : :class:`Variable`
Theano node with symbolically applied VI replacements
s : scalar
desired number of samples
d : bool or int
whether sampling is done deterministically
more_replacements : dict
more replacements to apply
Returns
-------
:class:`Variable` with applied replacements, ready to use
"""
flat2rand = self.make_size_and_deterministic_replacements(s, d, more_replacements)
node_out = theano.clone(node, flat2rand)
try_to_set_test_value(node, node_out, s)
return node_out
def to_flat_input(self, node):
"""*Dev* - replace | |
import StringIO, sys
from xml.dom import Node # MUST be first
from xml.dom import implementation, DOMException
from xml.dom import HIERARCHY_REQUEST_ERR, NOT_FOUND_ERR
from xml.dom import INDEX_SIZE_ERR, INVALID_CHARACTER_ERR, SYNTAX_ERR
from xml.dom.ext.reader.Sax2 import FromXml
from xml.dom.ext import PrettyPrint
# Internal test function: traverse a DOM tree, then verify that all
# the parent pointers are correct. Do NOT take this function as an
# example of using the Python DOM interface; it knows about the hidden
# details of the DOM implementation in order to check them.
def _check_dom_tree(t):
"Verify that all the parent pointers in a DOM tree are correct"
parent = {} # Dict mapping _nodeData instances to their parent
nodes = [] # Cumulative list of all the _nodeDatas encountered
Queue = [t] # Queue for breadth-first traversal of tree
# Do a breadth-first traversal of the DOM tree t
while Queue:
node = Queue[0]
children = node.childNodes
for c in children:
# Store this node as the parent of each child
parent[c] = node
# Add each child to the cumulative list
nodes.append(c)
# Append each child to the queue
Queue.append(c)
# Remove the node we've just processed
Queue = Queue[1:]
# OK, now walk over all the children, checking that .parentNode
# is correct.
count = 0
for n in nodes:
p = n.parentNode
if p is None:
assert not parent.has_key(n)
else:
assert p == parent[n]
count = count + 1
test_text = """<?xml version="1.0"?>
<doc>
<title>This is a test</title>
<h1>Don't panic</h1>
<p>Maybe it will work.</p>
<h2>We can handle it</h2>
<h3>Yes we can</h3>
<h3>Or maybe not</h3>
End of test.
</doc>
"""
doc = FromXml(test_text)
_check_dom_tree(doc)
print 'Simple document'
PrettyPrint(doc, sys.stdout)
print
# Example from the docstring at the top of xml.dom.core.py
doc = implementation.createDocument(None,None,None)
html = doc.createElement('html')
html.setAttribute('attr', 'value')
head = doc.createElement('head')
title = doc.createElement('title')
text = doc.createTextNode("Title goes here")
title.appendChild(text)
head.appendChild(title)
html.appendChild(head)
doc.appendChild (html)
_check_dom_tree(doc)
print '\nOutput of docstring example'
PrettyPrint(doc, sys.stdout)
print
# Detailed test suite for the DOM
from xml.dom import Document
print '\nRunning detailed test suite'
def check(cond, explanation, expected=0):
truth = eval(cond)
if not truth:
if expected:
print "XFAIL:",
else:
print ' *** Failed:',
print explanation, '\n\t', cond
doc = implementation.createDocument(None,None,None)
check('isinstance(doc, Document.Document)', 'createDocument returns a Document')
check('doc.parentNode == None', 'Documents have no parent')
# Check that documents can only have one child
n1 = doc.createElement('n1') ; n2 = doc.createElement('n2')
pi = doc.createProcessingInstruction("Processing", "Instruction")
doc.appendChild(pi)
doc.appendChild(n1)
try:
doc.appendChild(n1) # n1 should be removed, and then added again
except DOMException:
print "XFAIL: 4DOM does not support multiple insertion of same node"
try:
doc.appendChild(n2)
except DOMException,e:
assert e.code==HIERARCHY_REQUEST_ERR
else:
print " *** Failed: Document.insertBefore didn't raise HierarchyRequestException"
doc.replaceChild(n2, n1) # Should work
try: doc.replaceChild(n1, pi)
except DOMException,e:
assert e.code==HIERARCHY_REQUEST_ERR
else:
print " *** Failed: Document.replaceChild didn't raise HierarchyRequestException"
doc.replaceChild(n2, pi) # Should also work
check('pi.parentNode == None',
'Document.replaceChild: PI should have no parent')
try:
doc.removeChild(n2)
except DOMException:
print "XFAIL"
check('n2.parentNode == None',
'Document.removeChild: n2 should have no parent')
# Check adding and deletion with DocumentFragments
fragment = doc.createDocumentFragment() ; fragment.appendChild( n1 )
doc.appendChild( fragment )
check('fragment.parentNode == None',
'Doc.appendChild: fragment has no parent')
check('n1.parentNode.nodeType == Node.DOCUMENT_NODE',
'Doc.appendChild: n1 now has document as parent')
fragment = doc.createDocumentFragment() ; fragment.appendChild( n1 )
n2 = doc.createElement('n2') ; fragment.appendChild( n2 )
try: doc.appendChild( fragment )
except DOMException,e:
assert e.code == HIERARCHY_REQUEST_ERR
else:
print " *** Failed: Document.fragment.appendChild didn't raise HierarchyRequestException"
fragment = doc.createDocumentFragment() ; fragment.appendChild( n1 )
n2 = doc.createElement('n2') ; fragment.appendChild( n2 )
doc.appendChild( pi )
try: doc.replaceChild(fragment, pi)
except DOMException:
assert e.code==HIERARCHY_REQUEST_ERR
else:
print " *** Failed: Document.fragment.replaceChild didn't raise HierarchyRequestException"
#FIXME - fragment.removeChild(n2)
fragment.appendChild(pi)
doc.appendChild( fragment)
check('n1.parentNode == doc',
"Document.fragment.replaceChild parent node is correct")
_check_dom_tree(doc)
# Check adding and deleting children for ordinary nodes
n1 = doc.createElement('n1') ; n2 = doc.createElement('n2')
check( 'n1.parentNode == None', 'newly created Element has no parent')
e1 = doc.createTextNode('e1') ; e2 = doc.createTextNode('e2')
e3 = doc.createTextNode('e3')
n1.appendChild( e1 ) ; n1.appendChild( e2 ) ; n2.appendChild(e3)
# Test .insertBefore with refChild set to a node
n2.insertBefore(e1, e3)
check('len(n1.childNodes) == 1', "insertBefore: node1 has 1 child")
check('len(n2.childNodes) == 2', "insertBefore: node2 has 2 children")
check('n1.firstChild.data=="e2"', "insertBefore: node1's child is e2")
check('n2.firstChild.data=="e1"', "insertBefore: node2's first child is e1")
check('n2.lastChild.data=="e3"', "insertBefore: node2's last child is e3")
check('e1.parentNode.tagName == "n2"', "insertBefore: e1's parent is n2")
check('e2.parentNode.tagName == "n1"', "insertBefore: e2's parent is n1")
check('e3.parentNode.tagName == "n2"', "insertBefore: e3's parent is n3")
try: n2.insertBefore(e1, e2)
except DOMException,e:
assert e.code==NOT_FOUND_ERR
else:
print " *** Failed: insertBefore didn't raise NotFoundException"
# Test .insertBefore with refChild==None
n2.insertBefore(e1, None)
check('len(n2.childNodes) == 2', "None insertBefore: node1 has 2 children")
check('n2.firstChild.data=="e3"', "None insertBefore: node2's first child is e3")
check('n2.lastChild.data=="e1"', "None insertBefore: node2's last child is e1")
# Test replaceChild
ret = n1.replaceChild(e1, e2)
check('e2.parentNode == None', "replaceChild: e2 has no parent")
check('len(n1.childNodes) == 1', "replaceChild: node1 has 1 child")
check('n1.firstChild.data=="e1"', "replaceChild: node1's only child is e1")
check('ret.data == "e2"', "replaceChild: returned value node1's only child is e1")
try: n1.replaceChild(e2, e2)
except DOMException,e:
assert e.code==NOT_FOUND_ERR
else:
print " *** Failed: insertBefore didn't raise NotFoundException"
# Test removeChild
ret = n1.removeChild( e1 )
check('e1.parentNode == None', "removeChild: e1 has no parent")
check('ret.data == "e1"', "removeChild: e1 is the returned value")
try: n1.removeChild(e2)
except DOMException,e:
assert e.code==NOT_FOUND_ERR
else:
print " *** Failed: removeChild didn't raise NotFoundException"
# XXX two more cases for adding stuff: normal, Document, DocumentFragment
# Test the functions in the CharacterData interface
text = doc.createTextNode('Hello world')
#FIXME - check('text[0:5].value == "Hello"', 'text: slicing a node')
try: text.substringData(-5, 5)
except DOMException,e:
assert e.code==INDEX_SIZE_ERR
else:
print " *** Failed: substringData didn't raise IndexSizeException (negative)"
try: text.substringData(200, 5)
except DOMException,e:
assert e.code==INDEX_SIZE_ERR
else:
print " *** Failed: substringData didn't raise IndexSizeException (larger)"
try: text.substringData(5, -5)
except DOMException,e:
assert e.code==INDEX_SIZE_ERR
else:
print " *** Failed: substringData didn't raise IndexSizeException (negcount)"
text.appendData('!')
check('text.data == "Hello world!"', 'text: appendData')
try: text.insertData(-5, 'string')
except DOMException,e:
assert e.code==INDEX_SIZE_ERR
else:
print " *** Failed: insertData didn't raise IndexSizeException (negative)"
try: text.insertData(200, 'string')
except DOMException,e:
assert e.code==INDEX_SIZE_ERR
else:
print " *** Failed: insertData didn't raise IndexSizeException (larger)"
text.insertData(5, ',')
check('text.data == "Hello, world!"', 'text: insertData of ","')
try: text.deleteData(-5, 5)
except DOMException,e:
assert e.code==INDEX_SIZE_ERR
else:
print " *** Failed: deleteData didn't raise IndexSizeException (negative)"
try: text.deleteData(200, 5)
except DOMException,e:
assert e.code==INDEX_SIZE_ERR
else:
print " *** Failed: deleteData didn't raise IndexSizeException (larger)"
text.deleteData(0, 5)
check('text.data == ", world!"', 'text: deleteData of first 5 chars')
try: text.replaceData(-5, 5, 'Top of the')
except DOMException,e:
assert e.code==INDEX_SIZE_ERR
else:
print " *** Failed: replaceData didn't raise IndexSizeException (negative)"
try: text.replaceData(200, 5, 'Top of the')
except DOMException,e:
assert e.code==INDEX_SIZE_ERR
else:
print " *** Failed: replaceData didn't raise IndexSizeException (larger)"
text.replaceData(0, 1, 'Top of the')
check('text.data == "Top of the world!"', 'text: deleteData of first 5 chars')
# Test the Element class
e = doc.createElement('elem')
attr = doc.createAttribute('attr2')
attr.value = "v2"
#check('e.toxml() == "<elem />"', 'Element: empty element')
check('e.tagName == "elem"', 'Element: tag name')
check('len(e.attributes) == 0', 'Element: empty get_attributes')
check('e.getAttribute("dummy") == ""', 'Element: empty getAttribute')
check('e.getAttributeNode("dummy") == None', 'Element: empty getAttributeNode')
try: e.setAttribute('dummy', attr)
except DOMException,x:
assert x.code == SYNTAX_ERR
# Spec says invalid character for name not value
# assert x.code==INVALID_CHARACTER_ERR
else:
print " *** Failed: setAttribute didn't raise InvalidCharacterException"
e.setAttribute('dummy', 'value')
#check('e.toxml() == "<elem dummy=\'value\' />"', 'Element with 1 attribute')
check('e.getAttribute("dummy") == "value"', 'Element: getAttribute w/ value')
check('e.getAttributeNode("dummy").value == "value"', 'Element: getAttributeNode w/ value')
a2 = e.getAttributeNode( 'dummy' )
check('a2.parentNode == None', 'Attribute: should have no parent')
check('a2.value == "value"', 'Attribute: value is correct')
e.removeAttribute('dummy')
check('len(e.attributes) == 0', 'Element: attribute removed')
e.setAttributeNode(attr)
check('e.attributes[0].value == "v2"', 'Element: attribute node added')
a2 = doc.createAttribute('attr2')
a2.value = 'v3'
ret = e.setAttributeNode(a2)
check('e.attributes[0].value == "v3"', 'Element: attribute node replaced')
check('ret.value == "v2"', 'Element: deleted attribute node returned')
e.removeAttributeNode(a2)
check('len(e.attributes) == 0', 'Element: attribute node removed')
# Check handling of namespace prefixes
#FIXME (start)
#e.setAttribute('xmlns', 'http://defaulturi')
#e.setAttribute('xmlns:html', 'http://htmluri')
#check('e.ns_prefix[""] == "http://defaulturi"',
# 'Default namespace with setAttribute')
#check('e.ns_prefix["html"] == "http://htmluri"',
# 'Prefixed namespace with setAttribute')
#e.removeAttribute('xmlns:html')
#check('not e.ns_prefix.has_key("html")',
# 'Prefixed namespace with removeAttribute')
#e.removeAttribute('xmlns')
#check('len(e.ns_prefix) == 0', 'Default namespace with removeAttribute')
#default = doc.createAttribute('xmlns') ; default.value = "http://defaulturi"
#html = doc.createAttribute('xmlns:html') ; html.value = "http://htmluri"
#e.setAttributeNode(default) ; e.setAttributeNode(html)
#check('e.ns_prefix[""] == "http://defaulturi"',
# 'Default namespace with setAttributeNode')
#check('e.ns_prefix["html"] == "http://htmluri"',
# 'Prefixed namespace with setAttributeNode')
#e.removeAttributeNode(html)
#check('not e.ns_prefix.has_key("html")',
# 'Prefixed namespace with removeAttribute')
#e.removeAttributeNode(default)
#FIXME (end)
#
# Check getElementsByTagName
#
check('len(e.getElementsByTagName("elem")) == 0',
"getElementsByTagName doesn't return element")
check('len(e.getElementsByTagName("*")) == 0',
"getElementsByTagName doesn't return element")
# Check CharacterData interfaces using Text nodes
t1 = doc.createTextNode('first') ; e.appendChild( t1 )
t2 = doc.createTextNode('second') ; e.appendChild( t2 )
t3 = doc.createTextNode('third') ; e.appendChild( t3 )
#check('e.toxml() == "<elem>firstsecondthird</elem>"',
# "Element: content of three Text nodes as children")
check('len(e.childNodes) == 3', 'Element: three Text nodes as children')
e.normalize()
check('e.firstChild.data == "firstsecondthird"',
"Element: normalized Text nodes")
check('len(e.childNodes) == 1', 'Element: should be one normalized Text node')
check('t2.parentNode == None', 'Element: normalized t2 should have no parent')
check('t3.parentNode == None', 'Element: normalized t3 should have no parent')
# Text node
t1.splitText(5)
check('e.firstChild.data == "first"',
"Element: newly split Text nodes")
check('len(e.childNodes) == 2', 'Text: should be two split Text nodes')
check('e.lastChild.data == "secondthird"',
"Element: newly split Text nodes")
# Check comparisons; e1 and e2 are different proxies for the same underlying
# node
n1 = doc.createElement('n1') ; n2 = doc.createElement('n2')
n1.appendChild(n2)
e1 = n1 ; e2 = n2.parentNode
check('e1 is e2', 'Two proxies are different according to "is" operator')
check('e1 == e2', 'Two proxies | |
<reponame>EVS-ATMOS/cmdv-rrm-anl
# This module handles all of the time lookups for soundings and radar
# All of the file_name_str entries will have to be adjusted to fit to
# your radar dataset's naming convention
import glob
import numpy as np
import math
import matplotlib
matplotlib.use('agg')
import pyart
import time
from copy import deepcopy
data_path_berr = ('/lcrc/group/earthscience/radar/stage/radar_disk_two' +
'/berr_rapic/')
out_data_path = '/lcrc/group/earthscience/rjackson/multidop_grids/'
cpol_grid_data_path = '/lcrc/group/earthscience/rjackson/data/radar/grids/'
data_path_sounding = '/lcrc/group/earthscience/rjackson/soundings/'
berr_data_file_path = ('/lcrc/group/earthscience/radar/stage/' +
'/radar_disk_two/berr_rapic/')
data_path_cpol = ('/lcrc/group/earthscience/radar/stage/radar_disk_two/' +
'/cpol_rapic/')
data_path_cpol_cfradial = '/lcrc/group/earthscience/rjackson/cpol'
data_path_berr_cfradial = '/lcrc/group/earthscience/rjackson/berr'
out_file_path = '/lcrc/group/earthscience/rjackson/quicklook_plots/'
# Get a Radar object given a time period in the CPOL dataset
def get_radar_from_berr(time):
from datetime import timedelta, datetime
year_str = "%04d" % time.year
month_str = "%02d" % time.month
day_str = "%02d" % time.day
hour_str = "%02d" % time.hour
minute_str = "%02d" % time.minute
second_str = "%02d" % time.second
file_name_str = (data_path_berr +
'BerrimaVol' +
year_str +
month_str +
day_str +
'_' +
hour_str +
minute_str +
second_str +
'_deal.uf')
radar = pyart.io.read(file_name_str)
return radar
""" get_grid_times_cpol
start_year = Start year of animation
start_month = Start month of animation
start_day = Start day of animation
start_hour = Start hour of animation
end_year = End year of animation
end_month = End month of animation
end_day = End day of animation
end_minute = End minute of animation
minute_interval = Interval in minutes between scans (default is 5)
This procedure acquires an array of Grid classes between
start_time and end_time """
def get_grid_times_cpol(start_year, start_month, start_day,
start_hour, start_minute, end_year,
end_month, end_day, end_hour,
end_minute, minute_interval=5):
from datetime import timedelta, datetime
start_time = datetime(start_year,
start_month,
start_day,
start_hour,
start_minute,
)
end_time = datetime(end_year,
end_month,
end_day,
end_hour,
end_minute,
)
deltatime = end_time - start_time
if(deltatime.seconds > 0 or deltatime.minute > 0):
no_days = deltatime.days + 1
else:
no_days = deltatime.days
if(start_day != end_day):
no_days = no_days + 1
days = range(0, no_days)
print('We are about to load grid files for ' + str(no_days) + ' days')
# Find the list of files for each day
cur_time = start_time
file_list = []
time_list = []
date_list_final = []
for i in days:
year_str = "%04d" % cur_time.year
day_str = "%02d" % cur_time.day
month_str = "%02d" % cur_time.month
dir_str = year_str + '/' + month_str + '/' + day_str + '/'
format_str = (cpol_grid_data_path + dir_str + 'cpol_' + year_str +
month_str + day_str + '*.nc')
print('Looking for files with format ' + format_str)
data_list = glob.glob(format_str)
if(len(data_list) > 0):
day = datetime(cur_time.year, cur_time.month, cur_time.day,
0, 0, 1)
date_list_final.append(day)
for j in range(0, len(data_list)):
file_list.append(data_list[j])
cur_time = cur_time + timedelta(days=1)
# Parse all of the dates and time in the interval
# and add them to the time list
past_time = []
for file_name in file_list:
date_str = file_name[-15:-3]
year_str = date_str[0:4]
month_str = date_str[4:6]
day_str = date_str[6:8]
hour_str = date_str[8:10]
minute_str = date_str[10:12]
cur_time = datetime(int(year_str), int(month_str), int(day_str),
int(hour_str), int(minute_str), 0)
time_list.append(cur_time)
# Sort time list and make sure time are at least xx min apart
time_list.sort()
time_list_sorted = deepcopy(time_list)
time_list_final = []
past_time = []
for times in time_list_sorted:
cur_time = times
if(past_time == []):
past_time = cur_time
if(cur_time - past_time >= timedelta(minutes=minute_interval) and
cur_time >= start_time and cur_time <= end_time):
time_list_final.append(cur_time)
past_time = cur_time
return time_list_final
""" get_radar_times_cpol
start_year = Start year of animation
start_month = Start month of animation
start_day = Start day of animation
start_hour = Start hour of animation
end_year = End year of animation
end_month = End month of animation
end_day = End day of animation
end_minute = End minute of animation
minute_interval = Interval in minutes between scans (default is 5)
This procedure acquires an array of Radar
classes between start_time and end_time. """
def get_radar_times_cpol(start_year, start_month, start_day,
start_hour, start_minute, end_year,
end_month, end_day, end_hour,
end_minute, minute_interval=1):
from datetime import timedelta, datetime
from parse import parse
start_time = datetime(start_year, start_month, start_day,
start_hour, start_minute,)
end_time = datetime(end_year, end_month, end_day,
end_hour, end_minute,)
deltatime = end_time - start_time
if(deltatime.seconds > 0 or deltatime.minute > 0):
no_days = deltatime.days + 1
else:
no_days = deltatime.days
if(start_day != end_day):
no_days = no_days + 1
days = range(0, no_days)
print(('We are about to load grid files for ' +
str(no_days) + ' days'))
# Find the list of files for each day
cur_time = start_time
file_list = []
time_list = []
date_list_final = []
for i in days:
year_str = "%04d" % cur_time.year
day_str = "%02d" % cur_time.day
month_str = "%02d" % cur_time.month
# Adjust to your dataset
if(cur_time.year > 2007):
format_str = (data_path_cpol_cfradial + '/' + year_str + '/' +
year_str + month_str + day_str + '/cfrad.' +
year_str + month_str + day_str + '*UNKNOWN_SUR.nc')
else:
format_str = (data_path_cpol_cfradial + '/' + year_str + '/' +
year_str + month_str + day_str + '/Gunn_pt*' +
year_str + month_str + day_str + '*ppi.nc')
print('Looking for files with format ' + format_str)
data_list = glob.glob(format_str)
if(len(data_list) > 0):
day = datetime(cur_time.year, cur_time.month, cur_time.day,
0, 0, 1)
date_list_final.append(day)
for j in range(0, len(data_list)):
file_list.append(data_list[j])
cur_time = cur_time + timedelta(days=1)
# Parse all of the dates and time in the interval and
# add them to the time list
past_time = []
for file_name in file_list:
if(not file_name[-6:] == 'ppi.nc'):
new_format_str = (data_path_cpol_cfradial +
'/{:d}/{:d}/' +
'cfrad.{:d}_{:d}.{:d}_to_{:d}_{:d}' +
'.{:d}_Gunn_Pt_v{:d}_UNKNOWN_SUR.nc')
print(file_name)
parameters = parse(new_format_str, file_name)
year_str = np.floor(parameters[2]/10000)
month_str = np.floor((parameters[2] - year_str*10000)/100)
day_str = np.floor(parameters[2] - year_str*10000 - month_str*100)
hour_str = np.floor(parameters[3]/10000)
minute_str = np.floor((parameters[3] - hour_str*10000)/100)
second_str = np.floor(parameters[3] - hour_str*10000 -
minute_str*100)
else:
date_str = file_name[-20:-6]
year_str = date_str[0:4]
month_str = date_str[4:6]
day_str = date_str[6:8]
hour_str = date_str[8:10]
minute_str = date_str[10:12]
second_str = date_str[12:14]
print(year_str)
cur_time = datetime(int(year_str), int(month_str), int(day_str),
int(hour_str), int(minute_str), int(second_str))
time_list.append(cur_time)
# Sort time list and make sure time are at least xx min apart
time_list.sort()
time_list_sorted = deepcopy(time_list)
time_list_final = []
past_time = []
for times in time_list_sorted:
cur_time = times
if(past_time == []):
past_time = cur_time
if(cur_time >= start_time and cur_time <= end_time):
time_list_final.append(cur_time)
past_time = cur_time
return time_list_final, date_list_final
""" get_radar_times_cpol
start_year = Start year of animation
start_month = Start month of animation
start_day = Start day of animation
start_hour = Start hour of animation
end_year = End year of animation
end_month = End month of animation
end_day = End day of animation
end_minute = End minute of animation
minute_interval = Interval in minutes between scans (default is 5)
This procedure acquires an array of Radar classes
between start_time and end_time """
def get_radar_times_berr(start_year, start_month, start_day,
start_hour, start_minute, end_year,
end_month, end_day, end_hour,
end_minute, minute_interval=5):
from datetime import timedelta, datetime
from parse import parse
start_time = datetime(start_year, start_month, start_day,
start_hour, start_minute,)
end_time = datetime(end_year, end_month, end_day, end_hour, end_minute,)
deltatime = end_time - start_time
if(deltatime.seconds > 0 or deltatime.minute > 0):
no_days = deltatime.days + 1
else:
no_days = deltatime.days
if(start_day != end_day):
no_days = no_days + 1
days = range(0, no_days)
print('We are about to load grid files for ' + str(no_days) + ' days')
# Find the list of files for each day
cur_time = start_time
file_list = []
time_list = []
date_list_final = []
for i in days:
year_str = "%04d" % cur_time.year
day_str = "%02d" % cur_time.day
month_str = "%02d" % cur_time.month
# Adjust to your dataset
format_str = (data_path_berr_cfradial +
'/' + year_str + '/' + year_str +
month_str + day_str + '/cfrad.' +
year_str + month_str + day_str + '*.nc')
print('Looking for files with format ' + format_str)
data_list = glob.glob(format_str)
if(len(data_list) > 0):
day = datetime(cur_time.year, cur_time.month, cur_time.day,
0, 0, 1)
date_list_final.append(day)
for j in range(0, len(data_list)):
file_list.append(data_list[j])
cur_time = cur_time + timedelta(days=1)
# Parse all of the dates and time in the interval
# and add them to the time list
past_time = []
for file_name in file_list:
if(file_name[-13:] == 'el0.50_SUR.nc'):
new_format_str = (data_path_berr_cfradial +
'/{:d}/{:d}/' +
'cfrad.{:d}_{:d}.{:d}_to_{:d}_{:d}.' +
'{:d}_Berr_v{:d}_s{:d}_el0.50_SUR.nc')
else:
new_format_str = (data_path_berr_cfradial +
'/{:d}/{:d}/' +
'cfrad.{:d}_{:d}.{:d}_to_{:d}_{:d}.' +
'{:d}_Berrima_v{:d}_UNKNOWN_SUR.nc')
parameters = parse(new_format_str, file_name)
year_str = np.floor(parameters[2]/10000)
month_str = np.floor((parameters[2] - year_str*10000)/100)
day_str = np.floor(parameters[2] - year_str*10000 - month_str*100)
hour_str = np.floor(parameters[3]/10000)
minute_str = np.floor((parameters[3] - hour_str*10000)/100)
second_str = np.floor(parameters[3] | |
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class ParticipantBasic(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ParticipantBasic - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'str',
'start_time': 'datetime',
'end_time': 'datetime',
'connected_time': 'datetime',
'name': 'str',
'user_uri': 'str',
'user_id': 'str',
'external_contact_id': 'str',
'external_organization_id': 'str',
'queue_id': 'str',
'group_id': 'str',
'team_id': 'str',
'queue_name': 'str',
'purpose': 'str',
'participant_type': 'str',
'consult_participant_id': 'str',
'address': 'str',
'ani': 'str',
'ani_name': 'str',
'dnis': 'str',
'locale': 'str',
'wrapup_required': 'bool',
'wrapup_prompt': 'str',
'wrapup_timeout_ms': 'int',
'wrapup_skipped': 'bool',
'wrapup': 'Wrapup',
'conversation_routing_data': 'ConversationRoutingData',
'alerting_timeout_ms': 'int',
'monitored_participant_id': 'str',
'coached_participant_id': 'str',
'attributes': 'dict(str, str)',
'calls': 'list[CallBasic]',
'callbacks': 'list[CallbackBasic]',
'chats': 'list[ConversationChat]',
'cobrowsesessions': 'list[Cobrowsesession]',
'emails': 'list[Email]',
'messages': 'list[Message]',
'screenshares': 'list[Screenshare]',
'social_expressions': 'list[SocialExpression]',
'videos': 'list[Video]',
'evaluations': 'list[Evaluation]',
'screen_recording_state': 'str',
'flagged_reason': 'str',
'start_acw_time': 'datetime',
'end_acw_time': 'datetime'
}
self.attribute_map = {
'id': 'id',
'start_time': 'startTime',
'end_time': 'endTime',
'connected_time': 'connectedTime',
'name': 'name',
'user_uri': 'userUri',
'user_id': 'userId',
'external_contact_id': 'externalContactId',
'external_organization_id': 'externalOrganizationId',
'queue_id': 'queueId',
'group_id': 'groupId',
'team_id': 'teamId',
'queue_name': 'queueName',
'purpose': 'purpose',
'participant_type': 'participantType',
'consult_participant_id': 'consultParticipantId',
'address': 'address',
'ani': 'ani',
'ani_name': 'aniName',
'dnis': 'dnis',
'locale': 'locale',
'wrapup_required': 'wrapupRequired',
'wrapup_prompt': 'wrapupPrompt',
'wrapup_timeout_ms': 'wrapupTimeoutMs',
'wrapup_skipped': 'wrapupSkipped',
'wrapup': 'wrapup',
'conversation_routing_data': 'conversationRoutingData',
'alerting_timeout_ms': 'alertingTimeoutMs',
'monitored_participant_id': 'monitoredParticipantId',
'coached_participant_id': 'coachedParticipantId',
'attributes': 'attributes',
'calls': 'calls',
'callbacks': 'callbacks',
'chats': 'chats',
'cobrowsesessions': 'cobrowsesessions',
'emails': 'emails',
'messages': 'messages',
'screenshares': 'screenshares',
'social_expressions': 'socialExpressions',
'videos': 'videos',
'evaluations': 'evaluations',
'screen_recording_state': 'screenRecordingState',
'flagged_reason': 'flaggedReason',
'start_acw_time': 'startAcwTime',
'end_acw_time': 'endAcwTime'
}
self._id = None
self._start_time = None
self._end_time = None
self._connected_time = None
self._name = None
self._user_uri = None
self._user_id = None
self._external_contact_id = None
self._external_organization_id = None
self._queue_id = None
self._group_id = None
self._team_id = None
self._queue_name = None
self._purpose = None
self._participant_type = None
self._consult_participant_id = None
self._address = None
self._ani = None
self._ani_name = None
self._dnis = None
self._locale = None
self._wrapup_required = None
self._wrapup_prompt = None
self._wrapup_timeout_ms = None
self._wrapup_skipped = None
self._wrapup = None
self._conversation_routing_data = None
self._alerting_timeout_ms = None
self._monitored_participant_id = None
self._coached_participant_id = None
self._attributes = None
self._calls = None
self._callbacks = None
self._chats = None
self._cobrowsesessions = None
self._emails = None
self._messages = None
self._screenshares = None
self._social_expressions = None
self._videos = None
self._evaluations = None
self._screen_recording_state = None
self._flagged_reason = None
self._start_acw_time = None
self._end_acw_time = None
@property
def id(self):
"""
Gets the id of this ParticipantBasic.
A globally unique identifier for this conversation.
:return: The id of this ParticipantBasic.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ParticipantBasic.
A globally unique identifier for this conversation.
:param id: The id of this ParticipantBasic.
:type: str
"""
self._id = id
@property
def start_time(self):
"""
Gets the start_time of this ParticipantBasic.
The timestamp when this participant joined the conversation in the provider clock. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:return: The start_time of this ParticipantBasic.
:rtype: datetime
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""
Sets the start_time of this ParticipantBasic.
The timestamp when this participant joined the conversation in the provider clock. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:param start_time: The start_time of this ParticipantBasic.
:type: datetime
"""
self._start_time = start_time
@property
def end_time(self):
"""
Gets the end_time of this ParticipantBasic.
The timestamp when this participant disconnected from the conversation in the provider clock. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:return: The end_time of this ParticipantBasic.
:rtype: datetime
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""
Sets the end_time of this ParticipantBasic.
The timestamp when this participant disconnected from the conversation in the provider clock. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:param end_time: The end_time of this ParticipantBasic.
:type: datetime
"""
self._end_time = end_time
@property
def connected_time(self):
"""
Gets the connected_time of this ParticipantBasic.
The timestamp when this participant was connected to the conversation in the provider clock. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:return: The connected_time of this ParticipantBasic.
:rtype: datetime
"""
return self._connected_time
@connected_time.setter
def connected_time(self, connected_time):
"""
Sets the connected_time of this ParticipantBasic.
The timestamp when this participant was connected to the conversation in the provider clock. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:param connected_time: The connected_time of this ParticipantBasic.
:type: datetime
"""
self._connected_time = connected_time
@property
def name(self):
"""
Gets the name of this ParticipantBasic.
A human readable name identifying the participant.
:return: The name of this ParticipantBasic.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this ParticipantBasic.
A human readable name identifying the participant.
:param name: The name of this ParticipantBasic.
:type: str
"""
self._name = name
@property
def user_uri(self):
"""
Gets the user_uri of this ParticipantBasic.
If this participant represents a user, then this will be an URI that can be used to fetch the user.
:return: The user_uri of this ParticipantBasic.
:rtype: str
"""
return self._user_uri
@user_uri.setter
def user_uri(self, user_uri):
"""
Sets the user_uri of this ParticipantBasic.
If this participant represents a user, then this will be an URI that can be used to fetch the user.
:param user_uri: The user_uri of this ParticipantBasic.
:type: str
"""
self._user_uri = user_uri
@property
def user_id(self):
"""
Gets the user_id of this ParticipantBasic.
If this participant represents a user, then this will be the globally unique identifier for the user.
:return: The user_id of this ParticipantBasic.
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""
Sets the user_id of this ParticipantBasic.
If this participant represents a user, then this will be the globally unique identifier for the user.
:param user_id: The user_id of this ParticipantBasic.
:type: str
"""
self._user_id = user_id
@property
def external_contact_id(self):
"""
Gets the external_contact_id of this ParticipantBasic.
If this participant represents an external contact, then this will be the globally unique identifier for the external contact.
:return: The external_contact_id of this ParticipantBasic.
:rtype: str
"""
return self._external_contact_id
@external_contact_id.setter
def external_contact_id(self, external_contact_id):
"""
Sets the external_contact_id of this ParticipantBasic.
If this participant represents an external contact, then this will be the globally unique identifier for the external contact.
:param external_contact_id: The external_contact_id of this ParticipantBasic.
:type: str
"""
self._external_contact_id = external_contact_id
@property
def external_organization_id(self):
"""
Gets the external_organization_id of this ParticipantBasic.
If this participant represents an external org, then this will be the globally unique identifier for the external org.
:return: The external_organization_id of this ParticipantBasic.
:rtype: str
"""
return self._external_organization_id
@external_organization_id.setter
def external_organization_id(self, external_organization_id):
"""
Sets the external_organization_id of this ParticipantBasic.
If this participant represents an external org, then this will be the globally unique identifier for the external org.
:param external_organization_id: The external_organization_id of this ParticipantBasic.
:type: str
"""
self._external_organization_id = external_organization_id
@property
def queue_id(self):
"""
Gets the queue_id of this ParticipantBasic.
If present, the queue id that the communication channel came in on.
:return: The queue_id of this ParticipantBasic.
:rtype: str
"""
return self._queue_id
@queue_id.setter
def queue_id(self, queue_id):
"""
Sets the queue_id of this ParticipantBasic.
If present, the queue id that the communication channel came in on.
:param queue_id: The queue_id of | |
self.get_grid_data(it, v_n_x)
z_arr = self.get_int_data(it, v_n_z)
if mod == 'xy slice':
return np.array(x_arr[:, 0, 0]), np.array(y_arr[0, :, 0]), np.array(z_arr[:, :, 0]),
elif mod == 'integ_over_z':
return np.array(x_arr[:, 0, 0]),np.array(y_arr[0, :, 0]), self.ingeg_over_z(it, z_arr)
elif mod == 'integ_over_z fill_phi':
y_arr, z_arr = self.fill_pho0_and_phi2pi(np.array(y_arr[0, :, 0]),
self.ingeg_over_z(it, z_arr))
print(x_arr[:, 0, 0].shape, y_arr.shape, z_arr.shape)
return np.array(x_arr[:, 0, 0]), y_arr, z_arr
elif mod == 'integ_over_z fill_phi *r':
r2d_arr = np.array(x_arr[:, :, 0])
phi_arr = np.array(y_arr[0, :, 0])
z2d_arr = self.ingeg_over_z(it, z_arr)
rz2d = r2d_arr * z2d_arr
phi_arr, rz2d = self.fill_pho0_and_phi2pi(phi_arr, rz2d)
return np.array(x_arr[:, 0, 0]), phi_arr, rz2d
elif mod == 'integ_over_z fill_phi *r log':
r2d_arr = np.array(x_arr[:, :, 0])
phi_arr = np.array(y_arr[0, :, 0])
z2d_arr = self.ingeg_over_z(it, z_arr)
rz2d = r2d_arr * z2d_arr
phi_arr, rz2d = self.fill_pho0_and_phi2pi(phi_arr, rz2d)
return np.array(x_arr[:, 0, 0]), phi_arr, np.log10(rz2d)
elif mod == 'integ_over_z fill_phi -ave(r)':
r2d_arr = np.array(x_arr[:, :, 0])
phi_arr = np.array(y_arr[0, :, 0])
z2d_arr = self.ingeg_over_z(it, z_arr)
for i in range(len(x_arr[:, 0, 0])):
z2d_arr[i, :] = z2d_arr[i, :] - (np.sum(z2d_arr[i, :]) / len(z2d_arr[i, :]))
phi_arr, rz2d = self.fill_pho0_and_phi2pi(phi_arr, z2d_arr)
return np.array(x_arr[:, 0, 0]), phi_arr, rz2d
else:
raise NameError("Unknown 'mod' parameter:{} ".format(mod))
# old class not used in the pipeline
class COMPUTE_STORE_DESITYMODES(LOAD_INT_DATA):
def __init__(self, sim, grid_object):
LOAD_INT_DATA.__init__(self, sim, grid_object)
#
self.gen_set = {
'v_n': 'density',
'v_n_r': 'r_cyl',
'v_n_dr': 'dr_cyl',
'v_n_phi': 'phi_cyl',
'v_n_dphi': 'dphi_cyl',
'v_n_dz': 'dz_cyl',
'iterations': 'all',
'do_norm': True,
'm_to_norm': 0,
'outfname': 'density_modes_int_lapse15.h5',
'outdir': Paths.ppr_sims + sim + '/' + self.set_rootdir,
'lapse_mask': 0.15
}
#
self.list_modes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.list_dm_v_ns = ["int_phi", "int_phi_r"]
self.data_dm_matrix = [[[np.zeros(0,)
for k in range(len(self.list_dm_v_ns))]
for z in range(len(self.list_modes))]
for k in range(len(self.list_iterations))]
def check_dm_v_n(self, v_n):
if v_n not in self.list_dm_v_ns:
raise NameError("v_n: {} not in the list of Density Modes v_ns\n{}"
.format(v_n, self.list_dm_v_ns))
def check_mode(self, mode):
if not int(mode) in self.list_modes:
raise NameError("mode:{} not in the list of modes\n{}"
.format(mode, self.list_modes))
def i_mode(self, mode):
self.check_mode(mode)
return int(self.list_modes.index(mode))
def i_dm_v_n(self, v_n):
self.check_dm_v_n(v_n)
return int(self.list_dm_v_ns.index(v_n))
# ---
def compute_density_mode_old(self, it, mode):
# getting grid
r_cyl = self.get_grid_data(it, self.gen_set["v_n_r"])
dr_cyl = self.get_grid_data(it, self.gen_set["v_n_dr"])
phi_cyl = self.get_grid_data(it, self.gen_set["v_n_phi"])
dphi_cyl = self.get_grid_data(it, self.gen_set["v_n_dphi"])
dz_cyl = self.get_grid_data(it, self.gen_set["v_n_dz"])
# getting data
density = self.get_int_data(it, self.gen_set["v_n"])
if self.gen_set["lapse_mask"] != None:
lapse = self.get_int_data(it, "lapse")
density[lapse < float(self.gen_set["lapse_mask"])] = 0
# print(density.shape, phi_cyl.shape, r_cyl.shape, dr_cyl.shape)
# print(dr_cyl[:, :, 0])
m_int_phi, m_int_phi_r = \
PHYSICS.get_dens_decomp_3d(density, r_cyl, phi_cyl, dphi_cyl, dr_cyl, dz_cyl, m=mode)
if self.gen_set["do_norm"]:
# print("norming")
m_int_phi_norm, m_int_phi_r_norm = \
PHYSICS.get_dens_decomp_3d(density, r_cyl, phi_cyl, dphi_cyl, dr_cyl, dz_cyl, m=int(self.gen_set["m_to_norm"]))
m_int_phi /= m_int_phi_norm
m_int_phi_r /= m_int_phi_r_norm
self.data_dm_matrix[self.i_it(it)][self.i_mode(mode)][self.i_dm_v_n("int_phi")] = \
m_int_phi
self.data_dm_matrix[self.i_it(it)][self.i_mode(mode)][self.i_dm_v_n("int_phi_r")] = \
np.array([m_int_phi_r])
def compute_density_mode(self, it, mode):
# getting grid
r_cyl = self.get_grid_data(it, self.gen_set["v_n_r"])
dr_cyl = self.get_grid_data(it, self.gen_set["v_n_dr"])
phi_cyl = self.get_grid_data(it, self.gen_set["v_n_phi"])
dphi_cyl = self.get_grid_data(it, self.gen_set["v_n_dphi"])
dz_cyl = self.get_grid_data(it, self.gen_set["v_n_dz"])
# getting data
density = self.get_int_data(it, self.gen_set["v_n"])
if self.gen_set["lapse_mask"] != None:
lapse = self.get_int_data(it, "lapse")
density[lapse < float(self.gen_set["lapse_mask"])] = 0
# print(density.shape, phi_cyl.shape, r_cyl.shape, dr_cyl.shape)
# print(dr_cyl[:, :, 0])
m_int_phi, m_int_phi_r = \
PHYSICS.get_dens_decomp_3d(density, r_cyl, phi_cyl, dphi_cyl, dr_cyl, dz_cyl, m=mode)
if self.gen_set["do_norm"]:
# print("norming")
m_int_phi_norm, m_int_phi_r_norm = \
PHYSICS.get_dens_decomp_3d(density, r_cyl, phi_cyl, dphi_cyl, dr_cyl, dz_cyl, m=int(self.gen_set["m_to_norm"]))
m_int_phi /= m_int_phi_norm
m_int_phi_r /= m_int_phi_r_norm
self.data_dm_matrix[self.i_it(it)][self.i_mode(mode)][self.i_dm_v_n("int_phi")] = \
m_int_phi
self.data_dm_matrix[self.i_it(it)][self.i_mode(mode)][self.i_dm_v_n("int_phi_r")] = \
np.array([m_int_phi_r])
# ---
def is_computed(self, it, mode, v_n):
if len(self.data_dm_matrix[self.i_it(it)][self.i_mode(mode)][self.i_dm_v_n(v_n)]) == 0:
self.compute_density_mode(it, mode)
def get_density_mode(self, it, mode, v_n):
self.check_it(it)
self.check_mode(mode)
self.check_dm_v_n(v_n)
self.is_computed(it, mode, v_n)
return self.data_dm_matrix[self.i_it(it)][self.i_mode(mode)][self.i_dm_v_n(v_n)]
# old class (used tp load files) --> actually is used for plotting
class LOAD_DENSITY_MODES:
def __init__(self, sim):
self.sim = sim
self.set_rootdir = __rootoutdir__
self.gen_set = {
'maximum_modes': 50,
'fname' : Paths.ppr_sims + sim + '/' + self.set_rootdir + "density_modes.h5",
'int_phi': 'int_phi', # 1D array ( C_m )
'int_phi_r': 'int_phi_r', # 2D array (1D for every iteration ( C_m(r) )
'xcs': 'xc', # 1D array
'ycs': 'yc', # 1D array
'rs': 'rs', # 2D array (1D for every iteration)
'times': 'times',
'iterations':'iterations'
}
self.n_of_modes_max = 50
self.list_data_v_ns = ["int_phi", "int_phi_r"]
self.list_grid_v_ns = ["r_cyl", "times", "iterations", "xc", "yc", "rs"]
self.data_dm_matrix = [[np.zeros(0,)
for k in range(len(self.list_data_v_ns))]
for z in range(self.n_of_modes_max)]
self.grid_matrix = [np.zeros(0,)
for k in range(len(self.list_grid_v_ns))]
self.list_modes = []#range(50)
def check_data_v_n(self, v_n):
if not v_n in self.list_data_v_ns:
raise NameError("v_n: {} not in data list:\n{}"
.format(v_n, self.list_data_v_ns))
def check_grid_v_n(self, v_n):
if not v_n in self.list_grid_v_ns:
raise NameError("v_n: {} not in grid list:\n{}"
.format(v_n, self.list_grid_v_ns))
def i_v_n(self, v_n):
if v_n in self.list_data_v_ns:
return int(self.list_data_v_ns.index(v_n))
else:
return int(self.list_grid_v_ns.index(v_n))
def check_mode(self, mode):
if len(self.list_modes) == 0:
raise ValueError("list of modes was not loaded before data extraction")
if not mode in self.list_modes:
raise ValueError("mode: {} available modes: {}"
.format(mode, self.list_modes))
def i_mode(self, mode):
if len(self.list_modes) == 0:
raise ValueError("list of modes was not loaded before data extraction")
return int(self.list_modes.index(mode))
# ---
def load_density_modes(self):
#
if not os.path.isfile(self.gen_set['fname']):
raise IOError("{} not found".format(self.gen_set['fname']))
dfile = h5py.File(self.gen_set['fname'], "r")
list_modes = []
# setting list of density modes in the file
for v_n in dfile:
if str(v_n).__contains__("m="):
mode = int(v_n.split("m=")[-1])
list_modes.append(mode)
self.list_modes = list_modes
if len(self.list_modes) > self.n_of_modes_max - 1:
raise ValueError("too many modes {} \n (>{}) in the file:{}"
.format(self.list_modes, self.n_of_modes_max, self.gen_set['fname']))
# extracting data
for v_n in dfile:
if str(v_n).__contains__("m="):
mode = int(v_n.split("m=")[-1])
group = dfile[v_n]
for v_n_ in group:
if str(v_n_) in self.list_data_v_ns:
self.data_dm_matrix[self.i_mode(mode)][self.i_v_n(v_n_)] = np.array(group[v_n_])
else:
raise NameError("{} group has a v_n: {} that is not in the data list:\n{}"
.format(v_n, v_n_, self.list_data_v_ns))
# extracting grid data, for overall
else:
if v_n in self.list_grid_v_ns:
self.grid_matrix[self.i_v_n(v_n)] = np.array(dfile[v_n])
else:
NameError("dfile v_n: {} not in list of grid v_ns\n{}"
.format(v_n, self.list_grid_v_ns))
dfile.close()
print(" modes: {}".format(self.list_modes))
# ---
def is_loaded(self, mode, v_n):
if len(self.list_modes) == 0:
self.load_density_modes()
elif len(self.data_dm_matrix[self.i_mode(mode)][self.i_v_n(v_n)]) == 0:
self.load_density_modes()
def get_grid(self, v_n):
if len(self.list_modes) == 0:
self.load_density_modes()
self.check_grid_v_n(v_n)
self.is_loaded(self.list_modes[0], self.list_grid_v_ns[0])
return self.grid_matrix[self.i_v_n(v_n)]
def get_data(self, mode, v_n):
self.check_data_v_n(v_n)
if len(self.list_modes) == 0:
self.load_density_modes()
self.is_loaded(mode, v_n)
return self.data_dm_matrix[self.i_mode(mode)][self.i_v_n(v_n)]
#
def get_grid_for_it(self, it, v_n):
iterations = list(self.get_grid("iterations"))
data =self.get_grid(v_n)
return data[iterations.index(it)]
def get_data_for_it(self, it, mode, v_n):
iteration = list(self.get_grid("iterations"))
data = self.get_data(mode, v_n)
return data[iteration.index(it)]
""" ================================================================================================================ """
def select_number(list, ful_list, dtype=int):
if not any(list):
return np.array(ful_list, dtype=dtype)
array = np.array(list, dtype=dtype)
ref_array = np.array(ful_list, dtype=dtype)
for element in array:
if not element in ref_array:
raise ValueError("number element: {} is not in the ref_array:{}"
.format(element, ref_array))
return array
def select_string(list_str, ful_list, for_all="all"):
if not any(list_str):
return ful_list
if len(list_str) == 1 and for_all in list_str:
return ful_list
for element in list_str:
if not element in ful_list:
raise ValueError("string element: {} is not in the ref_array:{}"
.format(element, ful_list))
return list_str
""" ===========================================| FOR PRINTING |===================================================== """
def print_colored_string(parts, colors, comma=False):
assert len(parts) ==len(colors)
for color in colors:
assert color in ["", "blue", "red", "yellow", "green"]
for part, color in zip(parts, colors):
if color == "":
if isinstance(part, list):
for _part in part: print(_part),
else: print(part),
elif color == "blue":
if isinstance(part, list):
for _part in part:
Printcolor.blue(_part, comma=True)
else:
Printcolor.blue(part, comma=True)
elif color == "green":
if isinstance(part, list):
for _part in part:
Printcolor.green(_part, comma=True)
else:
Printcolor.green(part, comma=True)
elif color == "red":
if isinstance(part, list):
for _part in part:
Printcolor.red(_part, comma=True)
else:
Printcolor.red(part, comma=True)
elif color == "yellow":
if isinstance(part, list):
for _part in part:
Printcolor.yellow(_part, comma=True)
else:
Printcolor.yellow(part, comma=True)
else:
raise NameError("wrong color: {}".format(color))
if comma:
print(''),
else:
print('')
def get_xmin_xmax_ymin_ymax_zmin_zmax(rl):
if rl == 6:
xmin, xmax = -14, 14
ymin, ymax = -14, 14
zmin, zmax = 0, 14
elif rl == 5:
xmin, xmax = -28, 28
ymin, ymax = -28, 28
zmin, zmax = 0, 28
elif rl == 4:
xmin, xmax = -48, 48
ymin, ymax = -48, +48
zmin, zmax = 0, 48
elif rl == 3:
xmin, xmax = -88, 88
ymin, ymax = -88, 88
zmin, zmax = 0, 88
elif rl == 2:
xmin, xmax = -178, 178
ymin, ymax = -178, +178
zmin, zmax = 0, 178
elif rl == 1:
xmin, xmax = -354, 354
ymin, ymax = -354, +354
zmin, zmax = 0, 354
elif rl == 0:
xmin, xmax = -1044, 1044
ymin, ymax = -1044, 1044
| |
volreg_kws is None else volreg_kws
if not isinstance(template, six.string_types):
if template is None:
# Correct motion: first pass
utils.run(f"3dTcat -prefix {temp_dir}/template.pass1.nii -overwrite {files[0][0]}'[{files[0][1]}]'")
pc(pc.run(correct_motion, f"{temp_dir}/template.pass1.nii", files[k][0], f"{prefix}.pass1.nii", **volreg_kws) for k, prefix in enumerate(temp_prefixs))
# Consider only template_candidate_runs
n_all_runs = len(temp_prefixs)
if template_candidate_runs is None:
template_candidate_runs = list(range(n_all_runs))
# Find best template
Xs = [np.loadtxt(f"{prefix}.pass1.param.1D") for k, prefix in enumerate(temp_prefixs) if k in template_candidate_runs]
XX = np.vstack(Xs)
idx = np.argmin([np.sqrt(np.sum(np.linalg.norm(XX-x, axis=1)**2)) for x in XX])
L = [X.shape[0] for X in Xs]
D = idx - np.cumsum(L)
run_idx = np.nonzero(D<0)[0][0]
TR_idx = L[run_idx] + D[run_idx]
# Get run_idx within all runs from run_idx within "candidate" runs
all_runs = np.zeros(n_all_runs)
sel_runs = all_runs[template_candidate_runs]
sel_runs[run_idx] = 1
all_runs[template_candidate_runs] = sel_runs
run_idx = np.nonzero(all_runs)[0][0]
else:
run_idx, TR_idx = template
base_file = files[run_idx][0]
n_TRs = afni.get_dims(base_file)[3]
template = f"{base_file}'[{max(0, TR_idx-dt)}..{min(n_TRs, TR_idx+dt)}]'"
# Correct motion: second pass (based on best templated)
utils.run(f"3dTstat -median -prefix {temp_dir}/template.pass2.nii -overwrite {template}")
pc(pc.run(correct_motion, f"{temp_dir}/template.pass2.nii", files[k][0], f"{prefix}.pass2.nii", **volreg_kws) for k, prefix in enumerate(temp_prefixs))
# Generate final outputs in one resample step
if final_resample:
if blip_results is not None:
pc((pc.run(apply_transforms, [f"{prefix}.pass2.aff12.1D", blip_result['warp_file']], \
f"{temp_dir}/template.pass2.nii", in_file, output['out_file'], res=final_res) \
for k, (prefix, blip_result, in_file, output) in enumerate(zip(temp_prefixs, blip_results, in_files, outputs))), pool_size=4)
elif best_reverse is not None:
pc((pc.run(apply_transforms, [f"{prefix}.pass2.aff12.1D", f"{prefix}.blip.for2mid.warp.nii"], \
f"{temp_dir}/template.pass2.nii", in_file, output['out_file'], res=final_res) \
for k, (prefix, in_file, output) in enumerate(zip(temp_prefixs, in_files, outputs))), pool_size=4)
else: # volreg only
pc((pc.run(apply_transforms, [f"{prefix}.pass2.aff12.1D"], \
f"{temp_dir}/template.pass2.nii", in_file, output['out_file'], res=final_res) \
for k, (prefix, in_file, output) in enumerate(zip(temp_prefixs, in_files, outputs))), pool_size=4)
else:
pc(pc.run(f"3dcopy {prefix}.pass2.nii {output['out_file']} -overwrite") for prefix, output in zip(temp_prefixs, outputs))
# Copy other results
if best_reverse is not None and blip_results is None:
pc(pc.run(f"3dcopy {prefix}.blip.for2mid.warp.nii {output['warp_file']} -overwrite") for prefix, output in zip(temp_prefixs, outputs))
pc(pc.run(f"3dcopy {prefix}.blip.nii {output['blip_file']} -overwrite") for prefix, output in zip(temp_prefixs, outputs))
pc(pc.run(shutil.copy, f"{prefix}.pass2.aff12.1D", output['xform_file']) for prefix, output in zip(temp_prefixs, outputs))
pc(pc.run(shutil.copy, f"{prefix}.pass2.param.1D", output['param_file']) for prefix, output in zip(temp_prefixs, outputs))
shutil.rmtree(temp_dir)
all_finished(outputs)
return outputs
def retrieve_mp2rage_labels(dicom_dirs, dicom_ext='.IMA'):
'''
Retrieve mp2rage subvolume labels like UNI, ND, etc.
Parameters
----------
dicom_dirs : list or str
A list of dicom file folders, e.g., ['T101', 'T102', ...], or
a glob pattern like 'raw_fmri/T1??'
Returns
-------
label2dicom_dir : OrderedDict
label -> (index, dicom_dir)
'''
if isinstance(dicom_dirs, six.string_types):
dicom_dirs = [d for d in sorted(glob.glob(dicom_dirs)) if path.isdir(d)]
label2dicom_dir = OrderedDict()
for index, dicom_dir in enumerate(dicom_dirs):
f = glob.glob(f"{dicom_dir}/*{dicom_ext}")[0]
header = dicom.parse_dicom_header(f)
label = header['SeriesDescription'][len(header['ProtocolName'])+1:]
label2dicom_dir[label] = (index, dicom_dir)
return label2dicom_dir
def assign_mp2rage_labels(T1s, dicom_dirs, dicom_ext='.IMA'):
if isinstance(T1s, six.string_types):
T1s = sorted(glob.glob(T1s))
labels = list(retrieve_mp2rage_labels(dicom_dirs, dicom_ext=dicom_ext).keys())
assert(len(T1s) == len(labels))
for T1, label in zip(T1s, labels):
afni.set_attribute(T1, 'mp2rage_label', label)
def create_mp2rage_SNR_mask(T1s, out_file):
'''
Need to call prep.assign_mp2rage_labels() first.
'''
temp_dir = utils.temp_folder()
out_dir, prefix, ext = afni.split_out_file(out_file, split_path=True, trailing_slash=True)
outputs = {
'out_file': f"{out_dir}{prefix}{ext}",
'thres_file': f"{out_dir}{prefix}_thres{ext}",
'ths': None,
'y': None,
'th': None,
}
# Retrieve mp2rage labels like INV2_ND, UNI_Images, etc.
if isinstance(T1s, six.string_types):
T1s = sorted(glob.glob(T1s))
label2T1 = OrderedDict((afni.get_attribute(T1, 'mp2rage_label'), (k, T1)) for k, T1 in enumerate(T1s))
# Smooth INV2_ND to estimate the intensity profile
utils.run(f"3dmerge -1blur_fwhm 6 -doall -prefix {outputs['thres_file']} -overwrite {label2T1['INV2_ND'][1]}")
INV2 = io.read_vol(outputs['thres_file']).ravel()
UNI = io.read_vol(label2T1['UNI_Images'][1]).ravel()
# Determine best threshold
# Exploit the fact that: 1) Noisy UNI is like Gaussian 2) Gaussian has large entropy/std
ths = []
y = []
intensities = np.percentile(INV2, np.arange(100))
for k, th in enumerate(intensities[1:], start=1):
if th - intensities[k-1] < 5:
continue
ths.append(th)
# y.append(stats.entropy(np.unique(UNI[INV2>th], return_counts=True)[1]))
y.append(np.std(UNI[INV2>th]))
th = ths[np.argmax(y)]
outputs['ths'] = ths
outputs['y'] = y
outputs['th'] = th
# Create mask
utils.run(f"3dcalc -a {outputs['thres_file']} -expr 'step(a-{th})' -prefix {outputs['out_file']} -overwrite")
# Modified mask (restricting to region with high correlation)
utils.run(f"3dLocalBistat -nbhd 'SPHERE(3)' -stat pearson -mask {outputs['out_file']} \
-prefix {temp_dir}/corr.nii -overwrite {label2T1['INV2_ND'][1]} {label2T1['UNI_Images'][1]}")
utils.run(f"3dcalc -a {temp_dir}/corr.nii -expr 'step(a-0.3)' -prefix {temp_dir}/good.nii -overwrite")
utils.run(f"3dmerge -1blur_fwhm 10 -doall -prefix {temp_dir}/good_smoothed.nii -overwrite {temp_dir}/good.nii")
utils.run(f"3dcalc -a {temp_dir}/good_smoothed.nii -expr 'step(a-0.3)' -prefix {temp_dir}/good_mask.nii -overwrite")
utils.run(f"3dmask_tool -input {temp_dir}/good_mask.nii -prefix {temp_dir}/good_mask.nii -overwrite -dilate_input 5")
utils.run(f"3dcalc -a {outputs['out_file']} -b {temp_dir}/good_mask.nii -expr 'step(a)*step(b)' \
-prefix {outputs['out_file']} -overwrite")
shutil.rmtree(temp_dir)
all_finished(outputs)
return outputs
def prep_mp2rage(dicom_dirs, out_file='T1.nii', unwarp=False, dicom_ext='.IMA'):
'''Convert dicom files and remove the noise pattern outside the brain.
dicom_dirs : list or str
A list of dicom file folders, e.g., ['T101', 'T102', ...], or
a glob pattern like 'raw_fmri/T1??'
'''
pc = utils.PooledCaller()
prefix, ext = afni.split_out_file(out_file)
outputs = {
'out_file': f"{prefix}{ext}",
'ns_file': f"{prefix}_ns{ext}",
}
if unwarp:
outputs.update({
'corr_file': f"{prefix}_corr{ext}",
'corr_ns_file': f"{prefix}_corr_ns{ext}",
'warp_file': f"{prefix}_corr.warp{ext}",
})
if not all_finished(outputs):
temp_dir = utils.temp_folder()
# Retrieve dicom information (for labels like UNI, ND, etc.)
label2dicom_dir = retrieve_mp2rage_labels(dicom_dirs, dicom_ext=dicom_ext)
# Convert dicom files
for label in ['UNI_Images', 'INV2_ND', 'INV2']:
pc.run(io.convert_dicom, label2dicom_dir[label][1], f"{temp_dir}/{label}.nii", dicom_ext=dicom_ext)
pc.wait()
# Generate skull strip mask
utils.run(f"3dcalc -a {temp_dir}/UNI_Images.nii -b {temp_dir}/INV2_ND.nii \
-expr 'a*b' -float -prefix {temp_dir}/INV2xUNI.nii -overwrite") # INV2*UNI is the recommended method by {Fujimoto2014}, but too aggressive with 3dSkullStrip
for label in ['INV2_ND', 'INV2xUNI']:
pc.run(f"3dSkullStrip -orig_vol -prefix {temp_dir}/{label}_ns.nii -overwrite -input {temp_dir}/{label}.nii")
pc.wait()
utils.run(f"3dcalc -a {temp_dir}/INV2_ND_ns.nii -b {temp_dir}/INV2xUNI_ns.nii \
-expr 'max(step(a),step(b))' -prefix {temp_dir}/mask.nii -overwrite") # Tfter_indexe the union of the two masks as the final brain mask
utils.run(f"3dmask_tool -dilate_input -1 1 -prefix {temp_dir}/mask.nii -overwrite -input {temp_dir}/mask.nii") # Remove "spikes" on the surface of the mask
# Generate merged T1
utils.run(f"3dcalc -a {temp_dir}/UNI_Images.nii -m {temp_dir}/mask.nii \
-expr 'a*m' -prefix {outputs['ns_file']} -overwrite")
utils.run(f"3dcalc -a {temp_dir}/UNI_Images.nii -b {temp_dir}/INV2_ND.nii -m {temp_dir}/mask.nii \
-expr 'a*m+2*b*(1-m)' -prefix {outputs['out_file']} -overwrite") # Generate "merged" file
if unwarp:
# Estimate distortion correction transform
correction = 'DIS2D'
min_patch = calculate_min_patch(f"{temp_dir}/INV2.nii")
utils.run(f"3dQwarp -blur 1 1 -minpatch {min_patch} \
-base {temp_dir}/INV2.nii \
-source {temp_dir}/INV2_ND.nii \
-prefix {temp_dir}/{correction}.nii -overwrite")
# Apply transform
for fi, fo in zip(['out_file', 'ns_file'], ['corr_file', 'corr_ns_file']):
pc.run(apply_transforms, f"{temp_dir}/{correction}_WARP.nii", f"{temp_dir}/INV2.nii", outputs[fi], outputs[fo])
pc.wait()
os.rename(f"{temp_dir}/{correction}_WARP.nii", outputs['warp_file'])
shutil.rmtree(temp_dir)
else:
print('>> Reuse existing results.')
all_finished(outputs)
return outputs
def prep_mp2rages(data_dir, sessions=None, subdir_pattern='T1??', unwarp=True, **kwargs):
if sessions is None:
sessions = dicom_report.inspect_mp2rage(data_dir, subdir_pattern=subdir_pattern).session
pc = utils.PooledCaller(pool_size=4)
for session_dir in [f'{data_dir}/{session}' for session in sessions]:
out_file = kwargs.pop('out_file') if 'out_file' in kwargs else 'T1.nii'
pc.run(prep_mp2rage, f'{session_dir}/{subdir_pattern}', out_file=f'{session_dir}/{out_file}', unwarp=unwarp, **kwargs)
outputs = pc.wait()
return OrderedDict([(session, output) for session, output in zip(sessions, outputs)])
def average_anat(T1s, out_file, template_idx=0, T1s_ns=None, weight=None):
prefix, ext = afni.split_out_file(out_file)
os.makedirs(prefix, exist_ok=True)
N = len(T1s)
pc = utils.PooledCaller()
outputs = {
'out_file': [f"{prefix}/T1{k+1:02d}.nii" for k in range(N)],
'ns_file': [f"{prefix}/T1{k+1:02d}_ns.nii" for k in range(N)],
'cost': None,
}
if T1s_ns is None:
T1s_ns = [f"{prefix}/T1{k+1:02d}_ns.nii" for k in range(N)]
pc(pc.run(skullstrip, T1, T1_ns) for T1, T1_ns in zip(T1s, T1s_ns))
elif T1s_ns == 'default':
T1s_ns = [afni.insert_suffix(T1, '_ns') for T1 in T1s]
for k in range(N):
if k == template_idx:
pc.run(copy_dset, T1s_ns[k], outputs['ns_file'][k])
else:
pc.run(align_anat, T1s_ns[template_idx], T1s_ns[k], outputs['ns_file'][k], strip=False)
align_outputs = pc.wait(pool_size=4)
outputs['cost'] = [(o['cost']['lpa'] if o is not None else np.nan) for o in align_outputs]
for k in range(N):
if k == template_idx:
pc.run(copy_dset, T1s[template_idx], outputs['out_file'][k])
else:
pc.run(apply_transforms, align_outputs[k]['xform_file'], T1s[template_idx], T1s[k], outputs['out_file'][k])
pc.wait()
if weight is None:
pc.run1(f"3dMean -prefix {prefix}{ext} -overwrite {' '.join(outputs['out_file'])}")
else:
raise NotImplementedError()
all_finished(outputs)
return outputs
def fs_recon(T1s, out_dir, T2=None, FLAIR=None, NIFTI=True, hires=True, fs_ver=None, V1=True, HCP_atlas=True, n_jobs=None):
'''
Parameters
----------
T1s : list of str | 'brainmask_edit' | 'wm_edit'
fs_ver : {'v6', 'v6.hcp', 'skip'}
'''
start_time = time.time()
edits = ['brainmask_edit', 'wm_edit']
if isinstance(T1s, six.string_types) and T1s not in edits:
T1s = [T1s]
out_dir = path.realpath(out_dir)
subjects_dir, subj = path.split(out_dir) # Environment variable may need full path
temp_dir = utils.temp_folder()
outputs = {
'subj_dir': out_dir,
'suma_dir': f"{out_dir}/SUMA",
}
if n_jobs is None:
n_jobs = DEFAULT_JOBS
# Setup FreeSurfer SUBJECTS_DIR
if not path.exists(subjects_dir):
os.makedirs(subjects_dir)
os.environ['SUBJECTS_DIR'] = subjects_dir
if not path.exists(f'{subjects_dir}/V1_average'):
os.symlink(f"{os.environ['FREESURFER_HOME']}/subjects/V1_average", f"{subjects_dir}/V1_average")
# Run recon-all
if fs_ver is None:
fs_ver = 'v6' # {'v7', 'v6', 'v6.hcp', 'skip'}
if T1s == 'brainmask_edit': # Manual edit brainmask.mgz
if fs_ver == 'v6':
hires_cmd = f"-hires" if hires else ''
utils.run(f"recon-all -s {subj} \
-autorecon-pial {hires_cmd} \
-parallel -openmp {n_jobs}",
error_pattern='', goal_pattern='recon-all .+ finished without error')
elif fs_ver == 'skip':
pass
elif T1s == 'wm_edit': # Manual edit control points and wm.mgz (in addition to brainmask.mgz)
if fs_ver == 'v6':
hires_cmd = f"-hires" if hires else ''
utils.run(f"recon-all -s {subj} \
-autorecon2-cp -autorecon2-wm -autorecon-pial {hires_cmd} \
-parallel -openmp | |
<gh_stars>1-10
"""
This file provides necessary code to allow boot up of a virtual machine with
the correct program running. This code may provide slightly different environment
when compared to real hardware process, since e.g. external files can be mmap-ed
into VM's memory for writing.
"""
import importlib
import mmap
from functools import partial
from ctypes import sizeof
from six import PY2
from .interfaces import IMachineWorker
from .errors import InvalidResourceError
from .util import align, BinaryFile
from .mm import u8_t, u16_t, u32_t, UINT32_FMT, PAGE_SIZE, area_to_pages, PAGE_MASK, ExternalMemoryPage
from .mm.binary import SectionFlags, File
from .snapshot import SnapshotNode
from .hdt import HDT, HDTEntry_Argument, HDTEntry_Device
from .debugging import Point # noqa
#: By default, Hardware Description Table starts at this address after boot.
DEFAULT_HDT_ADDRESS = 0x00000100
#: By default, CPU starts executing instructions at this address after boot.
DEFAULT_BOOTLOADER_ADDRESS = 0x00020000
class MMapMemoryPage(ExternalMemoryPage):
"""
Memory page backed by an external file that is accessible via ``mmap()``
call. It's a part of one of :py:class:`ducky.boot.MMapArea` instances, and
if such area was opened as `shared`, every change in the content of its
pages will reflect onto the content of an external file, and vice versa,
every change of external file will be reflected in content of this page
(if this page lies in affected area).
:param MMapArea area: area this page belongs to.
"""
def __init__(self, area, *args, **kwargs):
super(MMapMemoryPage, self).__init__(*args, **kwargs)
self.area = area
if PY2:
self.get, self.put = self._get_py2, self._put_py2
else:
self.get, self.put = self._get_py3, self._put_py3
def get(self, offset):
"""
Read one byte from page.
This is an abstract method, ``__init__`` is expected to replace it with
a method, tailored for the Python version used.
:param int offset: offset of the requested byte.
:rtype: int
"""
raise NotImplementedError()
def put(self, offset, b):
"""
Write one byte to page.
This is an abstract method, ``__init__`` is expected to replace it with
a method, tailored for the Python version used.
:param int offset: offset of the modified byte.
:param int b: new value of the modified byte.
"""
raise NotImplementedError()
def _get_py2(self, offset):
"""
Read one byte from page.
:param int offset: offset of the requested byte.
:rtype: int
"""
return ord(self.data[self.offset + offset])
def _put_py2(self, offset, b):
"""
Write one byte to page.
:param int offset: offset of the modified byte.
:param int b: new value of the modified byte.
"""
self.data[self.offset + offset] = chr(b)
def _get_py3(self, offset):
"""
Read one byte from page.
:param int offset: offset of the requested byte.
:rtype: int
"""
return self.data[self.offset + offset]
def _put_py3(self, offset, b):
"""
Write one byte to page.
:param int offset: offset of the modified byte.
:param int b: new value of the modified byte.
"""
self.data[self.offset + offset] = b
class MMapAreaState(SnapshotNode):
def __init__(self):
super(MMapAreaState, self).__init__('address', 'size', 'path', 'offset')
class MMapArea(object):
"""
Objects of this class represent one mmaped memory area each, to track this
information for later use.
:param ptr: ``mmap object``, as returned by :py:meth:`mmap.mmap` function.
:param u32_t address: address of the first byte of an area in the memory.
:param u32_t size: length of the area, in bytes.
:param file_path: path to a source file.
:param u32_t offset: offset of the first byte in the source file.
:param int pages_start: first page of the area.
:param int pages_cnt: number of pages in the area.
:param mm.binary.SectionFlags flags: flags applied to this area.
"""
def __init__(self, ptr, address, size, file_path, offset, pages_start, pages_cnt, flags):
super(MMapArea, self).__init__()
self.ptr = ptr
self.address = address
self.size = size
self.file_path = file_path
self.offset = offset
self.pages_start = pages_start
self.pages_cnt = pages_cnt
self.flags = flags
def __repr__(self):
return '<MMapArea: address=%s, size=%s, filepath=%s, pages-start=%s, pages-cnt=%i, flags=%s>' % (UINT32_FMT(self.address), self.size, self.file_path, self.pages_start, self.pages_cnt, self.flags.to_string())
def save_state(self, parent):
pass
def load_state(self, state):
pass
class ROMLoader(IMachineWorker):
"""
This class provides methods for loading all necessary pieces into VM's
memory. These methods are called in VM's `boot` phase.
"""
def __init__(self, machine):
self.machine = machine
self.config = machine.config
self.opened_mmap_files = {} # path: (cnt, file)
self.mmap_areas = {}
self.logger = self.machine.LOGGER
self.DEBUG = self.machine.DEBUG
def _get_mmap_fileno(self, file_path):
if file_path not in self.opened_mmap_files:
self.opened_mmap_files[file_path] = [0, open(file_path, 'r+b')]
desc = self.opened_mmap_files[file_path]
desc[0] += 1
return desc[1].fileno()
def _put_mmap_fileno(self, file_path):
desc = self.opened_mmap_files[file_path]
desc[0] -= 1
if desc[0] > 0:
return
desc[1].close()
del self.opened_mmap_files[file_path]
def mmap_area(self, file_path, address, size, offset = 0, flags = None, shared = False):
"""
Assign set of memory pages to mirror external file, mapped into memory.
:param string file_path: path of external file, whose content new area
should reflect.
:param u24 address: address where new area should start.
:param u24 size: length of area, in bytes.
:param int offset: starting point of the area in mmaped file.
:param ducky.mm.binary.SectionFlags flags: specifies required flags for mmaped
pages.
:param bool shared: if ``True``, content of external file is mmaped as
shared, i.e. all changes are visible to all processes, not only to the
current ducky virtual machine.
:returns: newly created mmap area.
:rtype: ducky.mm.MMapArea
:raises ducky.errors.InvalidResourceError: when ``size`` is not multiply of
:py:data:`ducky.mm.PAGE_SIZE`, or when ``address`` is not multiply of
:py:data:`ducky.mm.PAGE_SIZE`, or when any of pages in the affected area
is already allocated.
"""
self.DEBUG('%s.mmap_area: file=%s, offset=%s, size=%s, address=%s, flags=%s, shared=%s', self.__class__.__name__, file_path, offset, size, UINT32_FMT(address), flags.to_string(), shared)
if size % PAGE_SIZE != 0:
raise InvalidResourceError('Memory size must be multiple of PAGE_SIZE')
if address % PAGE_SIZE != 0:
raise InvalidResourceError('MMap area address must be multiple of PAGE_SIZE')
mc = self.machine.memory
pages_start, pages_cnt = area_to_pages(address, size)
for i in range(pages_start, pages_start + pages_cnt):
if i in mc.pages:
raise InvalidResourceError('MMap request overlaps with existing pages: page=%s, area=%s' % (mc.pages[i], mc.pages[i].area))
mmap_flags = mmap.MAP_SHARED if shared else mmap.MAP_PRIVATE
# Always mmap as writable - VM will force read-only access using
# page flags. But since it is possible to change page flags
# in run-time, and request write access to areas originaly
# loaded as read-only, such write access would fail because
# the underlying mmap area was mmaped as read-only only, and this
# limitation is not possible to overcome.
mmap_prot = mmap.PROT_READ | mmap.PROT_WRITE
ptr = mmap.mmap(
self._get_mmap_fileno(file_path),
size,
flags = mmap_flags,
prot = mmap_prot,
offset = offset)
area = MMapArea(ptr, address, size, file_path, ptr, pages_start, pages_cnt, flags)
for i in range(pages_start, pages_start + pages_cnt):
mc.register_page(MMapMemoryPage(area, mc, i, ptr, offset = (i - pages_start) * PAGE_SIZE))
self.mmap_areas[area.address] = area
return area
def unmmap_area(self, mmap_area):
mc = self.machine.memory
for pg in mc.get_pages(pages_start = mmap_area.pages_start, pages_cnt = mmap_area.pages_cnt):
mc.unregister_page(pg)
del self.mmap_areas[mmap_area.address]
mmap_area.ptr.close()
self._put_mmap_fileno(mmap_area.file_path)
def setup_hdt(self):
"""
Initialize memory area containing :ref:`HDT`.
If VM config file specifies ``HDT`` image file, it is loaded, otherwise HDT
is constructed for the actual configuration, and then it's copied into memory.
:param u32_t machine.hdt-address: Base address of ``HDT`` in memory. If not
set, :py:const:`ducky.boot.DEFAULT_HDT_ADDRESS` is used.
:param str machine.hdt-image: ``HDT`` image to load. If not set, ``HDT`` is
constructed for the actual VM's configuration.
"""
self.DEBUG('%s.setup_hdt', self.__class__.__name__)
hdt_address = self.config.getint('machine', 'hdt-address', DEFAULT_HDT_ADDRESS)
if hdt_address & ~PAGE_MASK:
raise InvalidResourceError('HDT address must be page-aligned: address=%s' % UINT32_FMT(hdt_address))
self.DEBUG('HDT address=%s', UINT32_FMT(hdt_address))
def __alloc_pages(size):
pages = self.machine.memory.alloc_pages(base = hdt_address, count = align(PAGE_SIZE, size) // PAGE_SIZE)
self.machine.DEBUG('%s.setup_hdt: address=%s, size=%s (%s pages)', self.__class__.__name__, UINT32_FMT(hdt_address), size, len(pages))
hdt_image = self.config.get('machine', 'hdt-image', None)
if hdt_image is None:
self.DEBUG('HDT image not specified, creating one')
hdt = HDT(self.machine.LOGGER, config = self.config)
hdt.create()
__alloc_pages(len(hdt))
def __write_field(writer_fn, size, address, field_value):
writer_fn(address, field_value)
return address + size
def __write_array(max_length, address, field_value):
for i in range(0, max_length):
self.machine.memory.write_u8(address + i, field_value[i])
return address + max_length
def __write_struct(address, struct):
self.DEBUG('__write_struct: address=%s, struct=%s (%s)', UINT32_FMT(address), struct, sizeof(struct))
for n, t in struct._fields_:
address = writers[sizeof(t)](address, getattr(struct, n))
return address
writers = {
1: partial(__write_field, self.machine.memory.write_u8, 1),
2: partial(__write_field, self.machine.memory.write_u16, 2),
4: partial(__write_field, self.machine.memory.write_u32, 4),
HDTEntry_Argument.MAX_NAME_LENGTH: partial(__write_array, HDTEntry_Argument.MAX_NAME_LENGTH),
HDTEntry_Device.MAX_NAME_LENGTH: partial(__write_array, HDTEntry_Device.MAX_NAME_LENGTH),
HDTEntry_Device.MAX_IDENT_LENGTH: partial(__write_array, HDTEntry_Device.MAX_IDENT_LENGTH)
}
address = __write_struct(hdt_address, hdt.header)
for entry in hdt.entries:
address = __write_struct(address, entry)
else:
self.DEBUG('Loading HDT image %s', hdt_image)
with BinaryFile.open(self.logger, hdt_image, 'r') as f_in:
img = f_in.read()
__alloc_pages(len(img))
for address, b in zip(range(hdt_address, hdt_address + len(img)), | |
<reponame>iandorsey00/geodata
county_names = [
'Autauga County, Alabama',
'Baldwin County, Alabama',
'Barbour County, Alabama',
'Bibb County, Alabama',
'Blount County, Alabama',
'Bullock County, Alabama',
'Butler County, Alabama',
'Calhoun County, Alabama',
'Chambers County, Alabama',
'Cherokee County, Alabama',
'Chilton County, Alabama',
'Choctaw County, Alabama',
'Clarke County, Alabama',
'Clay County, Alabama',
'Cleburne County, Alabama',
'Coffee County, Alabama',
'Colbert County, Alabama',
'Conecuh County, Alabama',
'Coosa County, Alabama',
'Covington County, Alabama',
'Crenshaw County, Alabama',
'Cullman County, Alabama',
'Dale County, Alabama',
'Dallas County, Alabama',
'DeKalb County, Alabama',
'Elmore County, Alabama',
'Escambia County, Alabama',
'Etowah County, Alabama',
'Fayette County, Alabama',
'Franklin County, Alabama',
'Geneva County, Alabama',
'Greene County, Alabama',
'Hale County, Alabama',
'Henry County, Alabama',
'Houston County, Alabama',
'Jackson County, Alabama',
'Jefferson County, Alabama',
'Lamar County, Alabama',
'Lauderdale County, Alabama',
'Lawrence County, Alabama',
'Lee County, Alabama',
'Limestone County, Alabama',
'Lowndes County, Alabama',
'Macon County, Alabama',
'Madison County, Alabama',
'Marengo County, Alabama',
'Marion County, Alabama',
'Marshall County, Alabama',
'Mobile County, Alabama',
'Monroe County, Alabama',
'Montgomery County, Alabama',
'Morgan County, Alabama',
'Perry County, Alabama',
'Pickens County, Alabama',
'Pike County, Alabama',
'Randolph County, Alabama',
'Russell County, Alabama',
'St. Clair County, Alabama',
'Shelby County, Alabama',
'Sumter County, Alabama',
'Talladega County, Alabama',
'Tallapoosa County, Alabama',
'Tuscaloosa County, Alabama',
'Walker County, Alabama',
'Washington County, Alabama',
'Wilcox County, Alabama',
'Winston County, Alabama',
'Aleutians East Borough, Alaska',
'Aleutians West Census Area, Alaska',
'Anchorage Municipality, Alaska',
'Bethel Census Area, Alaska',
'Bristol Bay Borough, Alaska',
'Denali Borough, Alaska',
'Dillingham Census Area, Alaska',
'Fairbanks North Star Borough, Alaska',
'Haines Borough, Alaska',
'Hoonah-Angoon Census Area, Alaska',
'Juneau City and Borough, Alaska',
'Kenai Peninsula Borough, Alaska',
'Ketchikan Gateway Borough, Alaska',
'Kodiak Island Borough, Alaska',
'Kusilvak Census Area, Alaska',
'Lake and Peninsula Borough, Alaska',
'Matanuska-Susitna Borough, Alaska',
'Nome Census Area, Alaska',
'North Slope Borough, Alaska',
'Northwest Arctic Borough, Alaska',
'Petersburg Borough, Alaska',
'Prince of Wales-Hyder Census Area, Alaska',
'Sitka City and Borough, Alaska',
'Skagway Municipality, Alaska',
'Southeast Fairbanks Census Area, Alaska',
'Valdez-Cordova Census Area, Alaska',
'Wrangell City and Borough, Alaska',
'Yakutat City and Borough, Alaska',
'Yukon-Koyukuk Census Area, Alaska',
'Apache County, Arizona',
'Cochise County, Arizona',
'Coconino County, Arizona',
'Gila County, Arizona',
'Graham County, Arizona',
'Greenlee County, Arizona',
'La Paz County, Arizona',
'Maricopa County, Arizona',
'Mohave County, Arizona',
'Navajo County, Arizona',
'Pima County, Arizona',
'Pinal County, Arizona',
'Santa Cruz County, Arizona',
'Yavapai County, Arizona',
'Yuma County, Arizona',
'Arkansas County, Arkansas',
'Ashley County, Arkansas',
'Baxter County, Arkansas',
'Benton County, Arkansas',
'Boone County, Arkansas',
'Bradley County, Arkansas',
'Calhoun County, Arkansas',
'Carroll County, Arkansas',
'Chicot County, Arkansas',
'Clark County, Arkansas',
'Clay County, Arkansas',
'Cleburne County, Arkansas',
'Cleveland County, Arkansas',
'Columbia County, Arkansas',
'Conway County, Arkansas',
'Craighead County, Arkansas',
'Crawford County, Arkansas',
'Crittenden County, Arkansas',
'Cross County, Arkansas',
'Dallas County, Arkansas',
'Desha County, Arkansas',
'Drew County, Arkansas',
'Faulkner County, Arkansas',
'Franklin County, Arkansas',
'Fulton County, Arkansas',
'Garland County, Arkansas',
'Grant County, Arkansas',
'Greene County, Arkansas',
'Hempstead County, Arkansas',
'Hot Spring County, Arkansas',
'Howard County, Arkansas',
'Independence County, Arkansas',
'Izard County, Arkansas',
'Jackson County, Arkansas',
'Jefferson County, Arkansas',
'Johnson County, Arkansas',
'Lafayette County, Arkansas',
'Lawrence County, Arkansas',
'Lee County, Arkansas',
'Lincoln County, Arkansas',
'Little River County, Arkansas',
'Logan County, Arkansas',
'Lonoke County, Arkansas',
'Madison County, Arkansas',
'Marion County, Arkansas',
'Miller County, Arkansas',
'Mississippi County, Arkansas',
'Monroe County, Arkansas',
'Montgomery County, Arkansas',
'Nevada County, Arkansas',
'Newton County, Arkansas',
'Ouachita County, Arkansas',
'Perry County, Arkansas',
'Phillips County, Arkansas',
'Pike County, Arkansas',
'Poinsett County, Arkansas',
'Polk County, Arkansas',
'Pope County, Arkansas',
'Prairie County, Arkansas',
'Pulaski County, Arkansas',
'Randolph County, Arkansas',
'St. Francis County, Arkansas',
'Saline County, Arkansas',
'Scott County, Arkansas',
'Searcy County, Arkansas',
'Sebastian County, Arkansas',
'Sevier County, Arkansas',
'Sharp County, Arkansas',
'Stone County, Arkansas',
'Union County, Arkansas',
'Van Buren County, Arkansas',
'Washington County, Arkansas',
'White County, Arkansas',
'Woodruff County, Arkansas',
'Yell County, Arkansas',
'Alameda County, California',
'Alpine County, California',
'Amador County, California',
'Butte County, California',
'Calaveras County, California',
'Colusa County, California',
'Contra Costa County, California',
'Del Norte County, California',
'El Dorado County, California',
'Fresno County, California',
'Glenn County, California',
'Humboldt County, California',
'Imperial County, California',
'Inyo County, California',
'Kern County, California',
'Kings County, California',
'Lake County, California',
'Lassen County, California',
'Los Angeles County, California',
'Madera County, California',
'Marin County, California',
'Mariposa County, California',
'Mendocino County, California',
'Merced County, California',
'Modoc County, California',
'Mono County, California',
'Monterey County, California',
'Napa County, California',
'Nevada County, California',
'Orange County, California',
'Placer County, California',
'Plumas County, California',
'Riverside County, California',
'Sacramento County, California',
'San Benito County, California',
'San Bernardino County, California',
'San Diego County, California',
'San Francisco County, California',
'San Joaquin County, California',
'San Luis Obispo County, California',
'San Mateo County, California',
'Santa Barbara County, California',
'Santa Clara County, California',
'Santa Cruz County, California',
'Shasta County, California',
'Sierra County, California',
'Siskiyou County, California',
'Solano County, California',
'Sonoma County, California',
'Stanislaus County, California',
'Sutter County, California',
'Tehama County, California',
'Trinity County, California',
'Tulare County, California',
'Tuolumne County, California',
'Ventura County, California',
'Yolo County, California',
'Yuba County, California',
'Adams County, Colorado',
'Alamosa County, Colorado',
'Arapahoe County, Colorado',
'Archuleta County, Colorado',
'Baca County, Colorado',
'Bent County, Colorado',
'Boulder County, Colorado',
'Broomfield County, Colorado',
'Chaffee County, Colorado',
'Cheyenne County, Colorado',
'Clear Creek County, Colorado',
'Conejos County, Colorado',
'Costilla County, Colorado',
'Crowley County, Colorado',
'Custer County, Colorado',
'Delta County, Colorado',
'Denver County, Colorado',
'Dolores County, Colorado',
'Douglas County, Colorado',
'Eagle County, Colorado',
'Elbert County, Colorado',
'El Paso County, Colorado',
'Fremont County, Colorado',
'Garfield County, Colorado',
'Gilpin County, Colorado',
'Grand County, Colorado',
'Gunnison County, Colorado',
'Hinsdale County, Colorado',
'Huerfano County, Colorado',
'Jackson County, Colorado',
'Jefferson County, Colorado',
'Kiowa County, Colorado',
'<NAME> County, Colorado',
'Lake County, Colorado',
'La Plata County, Colorado',
'Larimer County, Colorado',
'Las Animas County, Colorado',
'Lincoln County, Colorado',
'Logan County, Colorado',
'Mesa County, Colorado',
'Mineral County, Colorado',
'Moffat County, Colorado',
'Montezuma County, Colorado',
'Montrose County, Colorado',
'Morgan County, Colorado',
'Otero County, Colorado',
'Ouray County, Colorado',
'Park County, Colorado',
'Phillips County, Colorado',
'Pitkin County, Colorado',
'Prowers County, Colorado',
'Pueblo County, Colorado',
'Rio Blanco County, Colorado',
'Rio Grande County, Colorado',
'Routt County, Colorado',
'Saguache County, Colorado',
'San Juan County, Colorado',
'San Miguel County, Colorado',
'Sedgwick County, Colorado',
'Summit County, Colorado',
'Teller County, Colorado',
'Washington County, Colorado',
'Weld County, Colorado',
'Yuma County, Colorado',
'Fairfield County, Connecticut',
'Hartford County, Connecticut',
'Litchfield County, Connecticut',
'Middlesex County, Connecticut',
'New Haven County, Connecticut',
'New London County, Connecticut',
'Tolland County, Connecticut',
'Windham County, Connecticut',
'District of Columbia, District of Columbia',
'Kent County, Delaware',
'New Castle County, Delaware',
'Sussex County, Delaware',
'Alachua County, Florida',
'Baker County, Florida',
'Bay County, Florida',
'Bradford County, Florida',
'Brevard County, Florida',
'Broward County, Florida',
'Calhoun County, Florida',
'Charlotte County, Florida',
'Citrus County, Florida',
'Clay County, Florida',
'Collier County, Florida',
'Columbia County, Florida',
'DeSoto County, Florida',
'Dixie County, Florida',
'Duval County, Florida',
'Escambia County, Florida',
'Flagler County, Florida',
'Franklin County, Florida',
'Gadsden County, Florida',
'Gilchrist County, Florida',
'Glades County, Florida',
'Gulf County, Florida',
'Hamilton County, Florida',
'Hardee County, Florida',
'Hendry County, Florida',
'Hernando County, Florida',
'Highlands County, Florida',
'Hillsborough County, Florida',
'Holmes County, Florida',
'Indian River County, Florida',
'Jackson County, Florida',
'Jefferson County, Florida',
'Lafayette County, Florida',
'Lake County, Florida',
'Lee County, Florida',
'Leon County, Florida',
'Levy County, Florida',
'Liberty County, Florida',
'Madison County, Florida',
'Manatee County, Florida',
'Marion County, Florida',
'Martin County, Florida',
'Miami-Dade County, Florida',
'Monroe County, Florida',
'Nassau County, Florida',
'Okaloosa County, Florida',
'Okeechobee County, Florida',
'Orange County, Florida',
'Osceola County, Florida',
'Palm Beach County, Florida',
'Pasco County, Florida',
'Pinellas County, Florida',
'Polk County, Florida',
'Putnam County, Florida',
'St. Johns County, Florida',
'St. Lucie County, Florida',
'Santa Rosa County, Florida',
'Sarasota County, Florida',
'Seminole County, Florida',
'Sumter County, Florida',
'Suwannee County, Florida',
'Taylor County, Florida',
'Union County, Florida',
'Volusia County, Florida',
'Wakulla County, Florida',
'Walton County, Florida',
'Washington County, Florida',
'Appling County, Georgia',
'Atkinson County, Georgia',
'Bacon County, Georgia',
'Baker County, Georgia',
'Baldwin County, Georgia',
'Banks County, Georgia',
'Barrow County, Georgia',
'Bartow County, Georgia',
'Ben Hill County, Georgia',
'Berrien County, Georgia',
'Bibb County, Georgia',
'Bleckley County, Georgia',
'Brantley County, Georgia',
'Brooks County, | |
"GNX": {
"symbol": "GNX",
"name": "Genaro Network",
"type": "ERC20",
"address": "0x6EC8a24CaBdc339A06a172F8223ea557055aDAa5",
"ens_address": "",
"decimals": 9,
"website": "https://genaro.network",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/genaro-network",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/GenaroNetwork",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/GenaroNetwork",
"youtube": ""
}
},
"WIN": {
"symbol": "WIN",
"name": "WCOIN",
"type": "ERC20",
"address": "0x899338b84D25aC505a332aDCE7402d697D947494",
"ens_address": "",
"decimals": 8,
"website": "",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"SRN": {
"symbol": "SRN",
"address": "0x68d57c9a1C35f63E2c83eE8e49A64e9d70528D25",
"decimals": 18,
"name": "<NAME>",
"ens_address": "",
"website": "https://sirinlabs.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://medium.com/@sirinlabs",
"chat": "",
"facebook": "https://www.facebook.com/SirinLabs/?fref=ts",
"forum": "https://bitcointalk.org/index.php?topic=2285838",
"github": "https://github.com/sirin-labs/crowdsale-smart-contract",
"gitter": "",
"instagram": "https://www.instagram.com/sirinlabs",
"linkedin": "https://www.linkedin.com/company-beta/10487115",
"reddit": "https://www.reddit.com/r/SirinLabs",
"slack": "",
"telegram": "https://t.me/sirinlabs",
"twitter": "https://twitter.com/SIRINLABS",
"youtube": ""
}
},
"$FFC": {
"symbol": "$FFC",
"address": "0x4E84E9e5fb0A972628Cf4568c403167EF1D40431",
"decimals": 18,
"name": "$Fluzcoin",
"ens_address": "",
"website": "https://fluzcoin.io/",
"logo": {
"src": "https://i.imgur.com/ar18ECx.png",
"width": "358",
"height": "373",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "https://fluzcoin.io/"
},
"social": {
"blog": "https://medium.com/@fluzcoin",
"chat": "",
"facebook": "https://www.facebook.com/fluzcoin/",
"forum": "https://bitcointalk.org/index.php?topic=3794410.0",
"github": "https://github.com/Fluzcoin",
"gitter": "",
"instagram": "https://www.instagram.com/fluzcoin.official/",
"linkedin": "https://www.linkedin.com/company/fluzcoin/",
"reddit": "https://www.reddit.com/r/fluzcoin/",
"slack": "",
"telegram": "https://t.me/Fluzcoin_Foundation",
"twitter": "https://twitter.com/fluzcoin",
"youtube": "https://www.youtube.com/channel/UCdK-HoZdmvmC-9bS5TeJT0g"
}
},
"CO2": {
"symbol": "CO2",
"address": "0xB4b1D2C217EC0776584CE08D3DD98F90EDedA44b",
"decimals": 18,
"name": "Climatecoin",
"ens_address": "",
"website": "https://climatecoin.io",
"logo": {
"src": "https://climatecoin.io/uploads/logosmall-1-42x42.png",
"width": "42",
"height": "42",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "https://climatecoin.io"
},
"social": {
"blog": "https://medium.com/@Climatecoin",
"chat": "https://t.me/joinchat/Fy8RMAvg7dTdD0ZhOu1a1w",
"facebook": "https://www.facebook.com/climatecoinofficial",
"forum": "https://bitcointalk.org/index.php?topic=2188692.0",
"github": "https://github.com/climatecoinio",
"gitter": "",
"instagram": "https://www.instagram.com/climatecoin",
"linkedin": "https://www.linkedin.com/company/11229823",
"reddit": "https://www.reddit.com/user/CLIMATECOIN",
"slack": "https://climatecoinofficial.slack.com",
"telegram": "https://t.me/climatecoinofficial",
"twitter": "https://twitter.com/infoclimatecoin",
"youtube": "https://www.youtube.com/channel/UCa5Q35bRxMZDBcEAEgfisKA"
}
},
"PMNT": {
"symbol": "PMNT",
"name": "Paymon",
"type": "ERC20",
"address": "0x81b4D08645DA11374a03749AB170836E4e539767",
"ens_address": "",
"decimals": 9,
"website": "https://paymon.org",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/@Paymon_official",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/paymonplatform",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/Paymon_official",
"youtube": ""
}
},
"1SG": {
"symbol": "1SG",
"address": "0x0F72714B35a366285Df85886A2eE174601292A17",
"decimals": 18,
"name": "1SG",
"ens_address": "",
"website": "https://www.1.sg",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://medium.com/@support_34903",
"twitter": "https://twitter.com/1SG_2018",
"telegram": "https://t.me/En_1SG",
"reddit": "https://www.reddit.com/r/1SG_/",
"github": "https://github.com/MarsBlockchain/1sg-contract",
"medium": "https://medium.com/@support_34903"
}
},
"ZXC": {
"symbol": "ZXC",
"name": "0xcert Protocol Token",
"type": "ERC20",
"address": "0x83e2BE8d114F9661221384B3a50d24B96a5653F5",
"ens_address": "",
"decimals": 18,
"website": "https://0xcert.org",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/0xcert",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/0xcert",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/0xcert",
"youtube": ""
}
},
"XRL": {
"symbol": "XRL",
"address": "0xB24754bE79281553dc1adC160ddF5Cd9b74361a4",
"decimals": 9,
"name": "XRL",
"ens_address": "",
"website": "https://rialto.ai",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"LOVE": {
"symbol": "LOVE",
"address": "0x5a276Aeb77bCfDAc8Ac6f31BBC7416AE1A85eEF2",
"decimals": 0,
"name": "Love",
"social": {
"twitter": "https://twitter.com/GNSPS",
"github": "https://github.com/GNSPS"
}
},
"PKG": {
"symbol": "PKG",
"name": "<NAME>",
"type": "ERC20",
"address": "0x02F2D4a04E6E01aCE88bD2Cd632875543b2eF577",
"ens_address": "",
"decimals": 18,
"website": "http://pkgtoken.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/pokemongopkg",
"youtube": ""
}
},
"YEE": {
"symbol": "YEE",
"name": "Yee Token",
"type": "ERC20",
"address": "0x922105fAd8153F516bCfB829f56DC097a0E1D705",
"ens_address": "",
"decimals": 18,
"website": "http://www.yeefoundation.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/YeeToken",
"youtube": ""
}
},
"FIH": {
"symbol": "FIH",
"name": "<NAME>",
"type": "ERC20",
"address": "0xdfC3e857c8cCEA7657E0ed98AB92e048e38deE0f",
"ens_address": "",
"decimals": 18,
"website": "https://www.fidelityhouse.io",
"logo": {
"src": "https://www.fidelityhouse.io/assets/logo_fidelityhouse-28x28.png",
"width": "28",
"height": "28",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://medium.com/fidelityhouse",
"chat": "",
"facebook": "https://www.facebook.com/FidelityHouseInternational",
"forum": "",
"github": "https://github.com/FidelityHouseInternational",
"gitter": "",
"instagram": "",
"linkedin": "https://www.linkedin.com/company/fidelityhouse-international",
"reddit": "https://www.reddit.com/user/FidelityHouse",
"slack": "",
"telegram": "https://t.me/FidelityHouseInternational",
"twitter": "https://twitter.com/Fidelity_House",
"youtube": ""
}
},
"ERO": {
"symbol": "ERO",
"name": "Eroscoin",
"type": "ERC20",
"address": "0x74CEDa77281b339142A36817Fa5F9E29412bAb85",
"ens_address": "",
"decimals": 8,
"website": "https://eroscoin.org",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://blog.eroscoin.org/",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/EROSCOIN",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/eroscoinnews",
"youtube": ""
}
},
"PIX": {
"symbol": "PIX",
"address": "0x8eFFd494eB698cc399AF6231fCcd39E08fd20B15",
"decimals": 0,
"name": "Lampix",
"ens_address": "",
"website": "https://www.lampix.co",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "https://www.facebook.com/Lampix.co",
"forum": "https://bitcointalk.org/index.php?topic=2044884.0",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/Lampix",
"slack": "https://lampix-invite.herokuapp.com",
"telegram": "",
"twitter": "https://twitter.com/lampix_co",
"youtube": ""
}
},
"LPT": {
"symbol": "LPT",
"address": "0x58b6A8A3302369DAEc383334672404Ee733aB239",
"decimals": 18,
"name": "Livepeer Token",
"ens_address": "",
"website": "https://livepeer.org/",
"logo": {
"src": "https://livepeer-dev.s3.amazonaws.com/logos/lpt_transparent_200.png",
"width": "200",
"height": "200",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "https://forum.livepeer.org"
},
"social": {
"blog": "https://medium.com/livepeer-blog",
"chat": "https://discord.gg/RR4kFAh",
"facebook": "",
"forum": "https://forum.livepeer.org",
"github": "https://github.com/livepeer",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/livepeer/",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/LivepeerOrg",
"youtube": ""
}
},
"GXVC": {
"symbol": "GXVC",
"address": "0x22F0AF8D78851b72EE799e05F54A77001586B18A",
"decimals": 10,
"name": "<NAME>",
"ens_address": "",
"website": "https://genevieveco.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "https://github.com/GxC17Genevieve/GXVC",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"BZ": {
"symbol": "BZ",
"name": "<NAME>",
"type": "ERC20",
"address": "0x4375E7aD8A01B8eC3Ed041399f62D9Cd120e0063",
"ens_address": "",
"decimals": 18,
"website": "https://www.bitz.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/@Bit_z.com",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"CNB": {
"symbol": "CNB",
"address": "0xEBf2F9E8De960f64ec0fDCDa6Cb282423133347B",
"decimals": 8,
"name": "Canabio",
"ens_address": "",
"website": "https://canabio.net",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "https://canabio.net"
},
"social": {
"blog": "",
"chat": "",
"facebook": "https://www.facebook.com/CanabioToken",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/CanabioProject",
"youtube": ""
}
},
"HAND": {
"symbol": "HAND",
"name": "ShowHand",
"type": "ERC20",
"address": "0x48C1B2f3eFA85fbafb2ab951bF4Ba860a08cdBB7",
"ens_address": "",
"decimals": 0,
"website": "https://www.showhand.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/showhandio",
"youtube": ""
}
},
"VUU": {
"symbol": "VUU",
"address": "0x4b96bf1feF93A216914fc843D81207A027ce52b3",
"decimals": 18,
"name": "<NAME>",
"ens_address": "",
"website": "https://www.vuulr.com/",
"logo": {
"src": "https://assets.vuulr.com/Vuulr-Logo_100x100T.png",
"width": "100",
"height": "100",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "https://support.vuulr.com/"
},
"social": {
"blog": "https://medium.com/vuulr",
"chat": "",
"facebook": "https://www.facebook.com/Vuulr",
"forum": "",
"github": "https://github.com/Vuulr",
"gitter": "",
"instagram": "",
"linkedin": "https://www.linkedin.com/company/13425836",
"reddit": "https://www.reddit.com/r/Vuulr/",
"slack": "",
"telegram": "https://t.me/vuulr",
"twitter": "https://twitter.com/vuulr_official",
"youtube": "https://www.youtube.com/channel/UCyf7nAFBdMeIdcb3jSHhGbQ"
}
},
"KZN": {
"symbol": "KZN",
"address": "0x9541FD8B9b5FA97381783783CeBF2F5fA793C262",
"decimals": 8,
"name": "KaizenCoin",
"ens_address": | |
held object (pickup_target) on the place_target
return dict(action="PutByType", objectType=self._last_to_interact_object_pose["objectType"])
return dict(action="CloseByType", objectType=self._last_to_interact_object_pose["objectType"])
elif self._last_to_interact_object_pose["objectId"] == place_target["objectId"]:
# Put the held object (pickup_target) on the place_target
return dict(action="PutByType", objectType=self._last_to_interact_object_pose["objectType"])
else:
raise RuntimeError(" HOW??? ")
return dict(action=expert_nav_action)
def _generate_and_record_expert_action(self):
"""Generate the next greedy expert action and save it to the
`expert_action_list`."""
if self.task.num_steps_taken() == len(self.expert_action_list) + 1:
get_logger().warning(
f"Already generated the expert action at step {self.task.num_steps_taken()}"
)
return
assert self.task.num_steps_taken() == len(
self.expert_action_list
), f"{self.task.num_steps_taken()} != {len(self.expert_action_list)}"
expert_action_dict = self._generate_expert_action_dict()
action_str = stringcase.snakecase(expert_action_dict["action"])
if action_str not in self.task.action_names():
if "objectType" in expert_action_dict:
obj_type = stringcase.snakecase(expert_action_dict["objectType"])
action_str = f"{action_str}_{obj_type}"
try:
self.expert_action_list.append(self.task.action_names().index(action_str))
except ValueError:
get_logger().error(
f"{action_str} is not a valid action for the given task."
)
self.expert_action_list.append(None)
class SubTaskExpert:
def __init__(
self,
task: "HomeServiceBaseTask",
shortest_path_navigator: ShortestPathNavigatorTHOR,
):
self.task = task
self.shortest_path_navigator = shortest_path_navigator
assert self.task.num_steps_taken() == 0
self.expert_action_list: List[int] = []
self.goto_action_list: List[str] = ["RotateRight" for _ in range(4)]
self.check_room_type_done: bool = False
self.require_check_room_type: bool = True
self._last_to_interact_object_pose: Optional[Dict[str, Any]] = None
self.map_oracle = True
self.shortest_path_navigator.on_reset()
self.update(action_taken=None, action_success=None)
@property
def expert_action(self) -> int:
assert self.task.num_steps_taken() == len(self.expert_action_list) - 1, (
f"self.task.num_steps_taken(): {self.task.num_steps_taken()} is not equal to \
len(self.expert_action_list) - 1: {len(self.expert_action_list) - 1}"
)
return self.expert_action_list[-1]
@property
def goto_action(self) -> str:
if len(self.goto_action_list) > 0:
return self.goto_action_list.pop()
else:
return None
def update(
self,
action_taken: Optional[int],
action_success: Optional[bool],
):
if action_taken is not None:
assert action_success is not None
action_names = self.task.action_names()
last_expert_action = self.expert_action_list[-1]
agent_took_expert_action = action_taken == last_expert_action
action_str = action_names[action_taken]
was_nav_action = any(k in action_str for k in ['move', 'rotate', 'look'])
was_goto_action = 'goto' in action_str
if self.task.current_subtask[0] == "Done":
return self._generate_and_record_expert_action()
# if self.task.current_subtask[0] == "Pickup":
# if self.task.env.scene == self.task.env.current_task_spec.target_scene:
# with include_object_data(self.task.env.controller):
# md = self.task.env.last_event.metadata
# cur_subtask_target = next(
# (o for o in md["objects"] if o["objectType"] == self.task.current_subtask[1]), None
# )
# print(f'cur_subtask_target in AFTER navigate success in update()')
# print(f'visible: {cur_subtask_target["visible"]} | distance: {cur_subtask_target["distance"]}')
# elif self.task.current_subtask[0] == "Put":
# if self.task.env.scene == self.task.env.current_task_spec.target_scene:
# with include_object_data(self.task.env.controller):
# md = self.task.env.last_event.metadata
# cur_subtask_place = next(
# (o for o in md["objects"] if o["objectType"] == self.task.current_subtask[2]), None
# )
# print(f'cur_subtask_place in AFTER navigate success in update()')
# print(f'visible: {cur_subtask_place["visible"]} | distance: {cur_subtask_place["distance"]}')
if not action_success:
if was_nav_action:
self.shortest_path_navigator.update_graph_with_failed_action(
stringcase.pascalcase(action_str)
)
# elif (
# "pickup" in action_str
# or "open_by_type" in action_str
# or "close_by_type" in action_str
# or "put_by_type" in action_str
# ) and action_taken == last_expert_action:
# assert self._last_to_interact_object_pose is not None
# self._invalidate_interactable_loc_for_pose(
# location=self.task.env.get_agent_location(),
# obj_pose=self._last_to_interact_object_pose,
# )
# self.task.rollback_subtask()
elif was_goto_action:
# Reset Fail?
raise RuntimeError
elif (
action_str == "pickup"
) and action_taken == last_expert_action:
if self.task.env.scene == self.task.env.current_task_spec.target_scene:
assert self._last_to_interact_object_pose is not None
self._invalidate_interactable_loc_for_pose(
location=self.task.env.get_agent_location(),
obj_pose=self._last_to_interact_object_pose,
)
# After invalidate current agent location, re-navigate to the object
if self.task.current_subtask[0] != "Navigate":
while self.task.current_subtask[0] != "Navigate":
self.task.rollback_subtask()
elif (
action_str == "put"
) and action_taken == last_expert_action:
if self.task.env.scene == self.task.env.current_task_spec.target_scene:
assert self._last_to_interact_object_pose is not None
self._invalidate_interactable_loc_for_pose(
location=self.task.env.get_agent_location(),
obj_pose=self._last_to_interact_object_pose,
)
# After invalidate current agent location, re-navigate to the object
if self.task.current_subtask[0] != "Navigate":
# print(" > rollback subtask to navigate")
while self.task.current_subtask[0] != "Navigate":
self.task.rollback_subtask()
# print(f'updated subtask {self.task.current_subtask}')
elif (
("crouch" in action_str or "stand" in action_str)
and action_taken == last_expert_action
):
if self.task.env.scene == self.task.env.current_task_spec.target_scene:
assert self._last_to_interact_object_pose is not None
agent_loc = self.task.env.get_agent_location()
agent_loc["standing"] = not agent_loc["standing"]
self._invalidate_interactable_loc_for_pose(
location=agent_loc,
obj_pose=self._last_to_interact_object_pose,
)
else:
if not was_nav_action:
# if (
# "pickup" in action_str
# or "open_by_type" in action_str
# or "close_by_type" in action_str
# or "put_by_type" in action_str
# ):
# self._last_to_interact_object_pose = None
if action_str == "pickup":
# if (
# agent_took_expert_action or
# self.task.planned_task[self.task._subtask_step - 1][0] == "Pickup"
# ):
if agent_took_expert_action:
held_object = self.task.env.held_object
if held_object is None:
raise RuntimeError(
f"Impossible..."
)
elif held_object["objectType"] != self._last_to_interact_object_pose["objectType"]:
raise RuntimeError(
f"Impossible......"
)
else:
self._last_to_interact_object_pose = None
elif self.task.current_subtask[0] != "Goto":
# unintended pickup action succeeded
while self.task.current_subtask[0] != "Goto":
self.task.rollback_subtask()
elif action_str == "put":
# if (
# agent_took_expert_action or
# self.task.planned_task[self.task._subtask_step - 1][0] == "Put"
# ):
if agent_took_expert_action:
assert self.task.env.held_object is None
self._last_to_interact_object_pose = None
elif self.task.current_subtask[0] != "Goto":
while self.task.current_subtask[0] != "Goto":
self.task.rollback_subtask()
elif was_goto_action:
if self.task.current_subtask[0] != "Goto":
# If current subtask is not GOTO, rollback subtasks to GOTO
while self.task.current_subtask[0] != "Goto":
self.task.rollback_subtask()
self.require_check_room_type = True
self.goto_action_list = ["RotateRight" for _ in range(4)]
elif "crouch" in action_str or "stand" in action_str:
# took crouch/stand action in pickup/put subtask
# should navigate to target again
if self.task.current_subtask[0] in ("Pickup", "Put"):
while self.task.current_subtask[0] != "Navigate":
self.task.rollback_subtask()
else:
if self.task.current_subtask[0] in ("Pickup", "Put"):
# took nav action in pickup/put subtask
# should navigate to target again
while self.task.current_subtask[0] != "Navigate":
self.task.rollback_subtask()
self._generate_and_record_expert_action()
def _get_interactable_positions(
self,
obj: Dict[str, Any]
):
if self.map_oracle:
if obj is not None:
return self.task.env._interactable_positions_cache.get(
scene_name=self.task.env.scene, obj=obj, controller=self.task.env.controller,
# max_distance=1.0
)
else:
return []
else:
#TODO
pass
def _expert_goto_action_to_scene_type(
self,
scene_type: str
) -> Optional[str]:
# if len(self.goto_action_list) == 0:
# if not self.task._1st_check:
# for _ in range(4):
# self.goto_action_list.append("RotateRight")
# self.task._1st_check = True
# else:
# if not self.task._took_goto_action:
# self.goto_action_list.append(f"Goto{scene_type}")
# elif not self.task._2nd_check:
# for _ in range(4):
# self.goto_action_list.append("RotateRight")
# self.task._2nd_check = True
# goto_action = self.goto_action
# if len(self.goto_action_list) == 0:
# if self.task._2nd_check:
# self.task._check_goto_done = True
# elif self.task._1st_check and (
# SCENE_TO_SCENE_TYPE[self.task.env.scene] == scene_type
# ):
# self.task._check_goto_done = True
if len(self.goto_action_list) > 0:
self.check_room_type_done = False
goto_action = self.goto_action
if len(self.goto_action_list) == 0:
self.check_room_type_done = True
self.require_check_room_type = False
else:
goto_action = f"Goto{scene_type}"
self.require_check_room_type = True
return goto_action
def _expert_nav_action_to_obj(
self,
obj: Dict[str, Any]
) -> Optional[str]:
env: HomeServiceTHOREnvironment = self.task.env
agent_loc = env.get_agent_location()
shortest_path_navigator = self.shortest_path_navigator
interactable_positions = self._get_interactable_positions(obj)
target_keys = [
shortest_path_navigator.get_key(loc) for loc in interactable_positions
]
if len(target_keys) == 0:
# print(f'No target keys')
return "Fail"
source_state_key = shortest_path_navigator.get_key(env.get_agent_location())
action = "Pass"
if source_state_key not in target_keys:
try:
action = shortest_path_navigator.shortest_path_next_action_multi_target(
source_state_key=source_state_key, goal_state_keys=target_keys,
)
except nx.NetworkXNoPath as _:
# print(f'No path exists from {source_state_key} to {target_keys}')
rand_nav_action = random.choice(["MoveAhead", "RotateRight", "RotateLeft", "LookUp", "LookDown"])
# print(f'take random nav action... {rand_nav_action}')
# import pdb; pdb.set_trace()
return rand_nav_action
if action != "Pass":
return action
else:
agent_x = agent_loc["x"]
agent_z = agent_loc["z"]
for gdl in interactable_positions:
d = round(abs(agent_x - gdl["x"]) + abs(agent_z - gdl["z"]), 2)
if d <= 1e-2:
if _are_agent_locations_equal(agent_loc, gdl, ignore_standing=True):
# print("????")
# print(f'agent_loc: {agent_loc} | gdl: {gdl}')
if agent_loc["standing"] != gdl["standing"]:
return "Crouch" if agent_loc["standing"] else "Stand"
else:
return "Pass"
return None
def _expert_nav_action_to_position(self, position) -> Optional[str]:
"""Get the shortest path navigational action towards the certain position
"""
env: HomeServiceTHOREnvironment = self.task.env
shortest_path_navigator = self.shortest_path_navigator
if isinstance(position, np.ndarray):
position_key = (round(position[0], 2), round(position[1], 2), round(position[2], 2))
position = dict(
x=position[0],
y=position[1],
z=position[2],
)
elif isinstance(position, dict):
position_key = (round(position['x'], 2), round(position['y'], 2), round(position['z'], 2))
if position_key not in shortest_path_navigator._position_to_object_id:
# Spawn the TargetCircle and place it on the position
event = env.controller.step("SpawnTargetCircle", anywhere=True)
assert event.metadata["lastActionSuccess"]
id_target_circle = event.metadata["actionReturn"]
event = env.controller.step(
"TeleportObject",
objectId=id_target_circle,
position=position,
rotation=0,
forceAction=True
)
assert event.metadata["lastActionSuccess"]
# To change objectId for former target circle
event = env.controller.step("SpawnTargetCircle", anywhere=True)
assert event.metadata["lastActionSuccess"]
id_target_circle = event.metadata["actionReturn"]
event = env.controller.step("RemoveFromScene", objectId=id_target_circle)
assert event.metadata["lastActionSuccess"]
def distance(p1, p2):
d = 0
for c in ("x", "y", "z"):
d += (p1[c] - p2[c]) ** 2
return round(math.sqrt(d), 2)
# check
target_circle_after_teleport = next(
(
obj for obj in env.last_event.metadata['objects']
if obj['objectType'] == "TargetCircle" and distance(obj["position"], position) < 0.05
), None
)
assert target_circle_after_teleport is not None
shortest_path_navigator._position_to_object_id[position_key] = target_circle_after_teleport['objectId']
object_id = shortest_path_navigator._position_to_object_id[position_key]
obj = next(
obj for obj in env.last_event.metadata['objects']
if obj['objectId'] == object_id
)
return self._expert_nav_action_to_obj(obj=obj)
def _invalidate_interactable_loc_for_pose(
self,
location: Dict[str, Any],
obj_pose: Dict[str, Any]
) -> bool:
"""Invalidate a given location in the `interactable_positions_cache` as
we tried to interact but couldn't."""
env: HomeServiceTHOREnvironment = self.task.env
if obj_pose is None:
return False
| |
check3 += 1
continue
elif 'DF-MP2 Energies' in line:
check4 = 1
elif '@DF-RHF Final Energy' in line:
count2 += 1
if chk == 0:
if count2 == 2:
count2 = 0
check5 = 1
elif chk == 1:
if not param.coarse_level == 2:
check5 = 1
else:
if count2 == 2:
check5 = 1
elif chk == 2:
if count2 == 1:
check5 = 1
if check2 == 2:
if count1+1 > len(f1.coord):
check2 = 0
count1 = 0
continue
else:
s = line.split()
s1 = [float(xx) for xx in s]
Grad.addc(f1.coord[count1].n,s1[:3])
count1 += 1
elif check3 == 2:
if count1+1 > len(f1.coord):
check3 = 0
count1 = 0
continue
else:
s = line.split()
s1 = [float(xx) for xx in s]
Grad.addf(f1.coord[count1].n,s1[:3])
count1 += 1
elif check4 == 1:
if 'Total Energy' in line:
s = line.split()
energymp2p = float(s[-2])
check4 = 0
elif check5 == 1:
if 'Total Energy' in line:
s = line.split()
energyscfp = float(s[-1])
check5 = 0
psirr.close()
if chk == 0:
return(energyscfp,energymp2p,Grad.c,Grad.f)
elif chk == 1 or chk == 2:
return(energyscfp,Grad.c)
def s_psi(file1):
check1 = 0
check2 = 0
check3 = 0
check4 = 0
check5 = 0
count1 = 0
count2 = 0
Grad = Gradobj()
Xcount = 0
psirr = open(file1,'r')
for line in psirr.readlines():
if 'START SCF GRADIENT' in line:
check2 += 1
continue
elif '@DF-RHF Final Energy' in line:
check5 = 1
if check2 == 2:
if not 'STOP SCF GRADIENT' in line:
count1 += 1
s = line.split()
s1 = [float(xx) for xx in s]
Grad.adds(0,s1[:3])
else:
check2 = 0
elif check5 == 1:
if 'Total Energy' in line:
s = line.split()
energyscfp = float(s[-1])
check5 = 0
psirr.close()
return(Grad.s,energyscfp)
def smp2_psi(file1):
check1 = 0
check2 = 0
check3 = 0
check4 = 0
check5 = 0
count1 = 0
count2 = 0
Grad = []
Xcount = 0
psirr = open(file1,'r')
cout = 1
for line in psirr.readlines():
if 'START MP2 GRADIENT' in line:
check2 += 1
continue
elif 'DF-MP2 Energies' in line:
check5 = 1
if check2 == 2:
if not 'STOP MP2 GRADIENT' in line:
count1 += 1
s = line.split()
s1 = [float(xx) for xx in s]
Grad.append(Gmethod(cout,s1[:3]))
cout += 1
else:
check2 = 0
elif check5 == 1:
if 'Total Energy' in line:
s = line.split()
energyscfp = float(s[-2])
check5 = 0
psirr.close()
return(Grad,energyscfp)
# ---> Molpro
def nb_mb_mol(file1,file2,chck0):
check1 = 0
check2 = 0
check3 = 0
check4 = 0
check5 = 0
count1 = 0
count2 = 0
count3 = 0
Grad = Gradobj()
scfbas = 'START DENSE FRAGMENT'
scfbas1 = 'STOP DENSE FRAGMENT'
if chck0 == 2:
scfbas = 'START COARSE FRAGMENT'
scfbas1 = 'STOP COARSE FRAGMENT'
for line in file1.readlines():
if scfbas in line:
check1 += 1; continue
elif scfbas1 in line:
check1 -= 1; continue
elif 'START MAIN FRAGMENT' in line:
check2 += 1; continue
elif 'STOP MAIN FRAGMENT' in line:
check2 -= 1; continue
elif '!MP2 total energy' in line:
check3 = 1
elif '!RHF STATE 1.1 Energy' in line:
count2 += 1
if chck0 == 0:
if count2 == 1:
count2 = 0
check4 = 1
elif chck0 == 1:
if not param.coarse_level == 2:
check4 = 1
else:
if count2 == 2:
check4 = 1
elif chck0 == 2:
if count2 == 1:
check4 = 1
if check3 == 1:
s = line.split()
energymp2p = float(s[-1])
check3 = 0
elif check4 == 1:
s = line.split()
energyscfp = float(s[-1])
check4 = 0
if check1 == 1:
if 'Atom dE/dx dE/dy dE/dz' in line:
count3 = 1
continue
elif not count3:
continue
if (count3 and line.isspace()):
continue
if count1+1 > len(file2):
check1 = 0
count1 = 0
count3 = 0
continue
else:
s = line.split()
s1 = [float(xx) for xx in s]
Grad.addc(file2[count1].n,s1[1:])
count1 += 1
elif check2 == 1:
if 'Atom dE/dx dE/dy dE/dz' in line:
count3 = 1
continue
elif not count3:
continue
if (count3 and line.isspace()):
continue
if count1+1 > len(file2):
check2 = 0
count1 = 0
continue
else:
s = line.split()
s1 = [float(xx) for xx in s]
Grad.addf(file2[count1].n,s1[1:])
count1 += 1
file1.close()
if chck0 == 0:
return(energyscfp,energymp2p,Grad.c,Grad.f)
elif chck0 == 1 or chck0 == 2:
return(energyscfp,Grad.c)
def b_mol(f1,chk):
check1 = 0
check2 = 0
check3 = 0
check4 = 0
check5 = 0
count1 = 0
count2 = 0
count3 = 0
Grad = Gradobj()
psirr = open(f1.name+'.out','r')
scfbas = 'START DENSE FRAGMENT'
scfbas1 = 'STOP DENSE FRAGMENT'
if chk == 2:
scfbas = 'START COARSE FRAGMENT'
scfbas1 = 'STOP COARSE FRAGMENT'
for line in psirr.readlines():
if scfbas in line:
check1 += 1; continue
elif scfbas1 in line:
check1 -= 1; continue
elif 'START MAIN FRAGMENT' in line:
check2 += 1; continue
elif 'STOP MAIN FRAGMENT' in line:
check2 -= 1; continue
elif '!MP2 total energy' in line:
check3 = 1
elif '!RHF STATE 1.1 Energy' in line:
count2 += 1
if chk == 0:
if count2 == 1:
count2 = 0
check4 = 1
elif chk == 1:
if not param.coarse_level == 2:
check4 = 1
else:
if count2 == 2:
check4 = 1
elif chk == 2:
if count2 == 1:
check4 = 1
if check3 == 1:
s = line.split()
energymp2p = float(s[-1])
check3 = 0
elif check4 == 1:
s = line.split()
energyscfp = float(s[-1])
check4 = 0
if check1 == 1:
if 'Atom dE/dx dE/dy dE/dz' in line:
count3 = 1
continue
elif not count3:
continue
if (count3 and line.isspace()):
continue
if count1+1 > len(f1.coord):
check1 = 0
count1 = 0
count3 = 0
continue
else:
s = line.split()
s1 = [float(xx) for xx in s]
Grad.addc(f1.coord[count1].n,s1[1:])
count1 += 1
elif check2 == 1:
if 'Atom dE/dx dE/dy dE/dz' in line:
count3 = 1
continue
elif not count3:
continue
if (count3 and line.isspace()):
continue
if count1+1 > len(f1.coord):
check2 = 0
count1 = 0
continue
else:
s = line.split()
s1 = [float(xx) for xx in s]
Grad.addf(f1.coord[count1].n,s1[1:])
count1 += 1
psirr.close()
if chk == 0:
return(energyscfp,energymp2p,Grad.c,Grad.f)
elif chk == 1 or chk == 2:
return(energyscfp,Grad.c)
def s_mol(file1):
check1 = 0
check2 = 0
check3 = 0
check4 = 0
check5 = 0
count1 = 0
count2 = 0
Grad = Gradobj()
Xcount = 0
psirr = open(file1,'r')
for line in psirr.readlines():
if 'Atom dE/dx dE/dy dE/dz' in line:
check1 = 1
continue
elif '!RHF STATE 1.1 Energy' in line:
s = line.split()
energyscfp = float(s[-1])
if check1 == 1:
count1 += 1
if count1 == 1:
continue
if line.isspace():
check1 = 0
continue
s = line.split()
s1 = [float(xx) for xx in s]
Grad.adds(0,s1[1:])
return(Grad.s,energyscfp)
def smp2_mol(file1):
check1 = 0
check2 = 0
check3 = 0
check4 = 0
check5 = 0
count1 = 0
count2 = 0
Grad = []
Xcount = 0
cout = 1
psirr = open(file1,'r')
for line in psirr.readlines():
if 'Atom dE/dx dE/dy dE/dz' in line:
check1 = 1
continue
elif '!MP2 total energy' in line:
s = line.split()
energyscfp = float(s[-1])
if check1 == 1:
count1 += 1
if count1 == 1:
continue
if line.isspace():
check1 = 0
continue
s = line.split()
s1 = [float(xx) for xx in s]
Grad.append(Gmethod(cout,s1[1:]))
cout += 1
return(Grad,energyscfp)
# ---> Gaussian output
def nb_mb_gau(file1,file2,chck0):
check1 = 0
check2 = 0
check3 = 0
check4 = 0
check5 = 0
count1 = 0
count2 = 0
count3 = 0
Grad = Gradobj()
scfbas = 'DENSE FRAGMENTATION'
if chck0 == 2:
scfbas = 'COARSE FRAGMENTATION'
c1 = 1; c2 = 1
for line in file1.readlines():
if scfbas in line:
check1 += c1*1
c1 = -1; continue
elif 'MAIN FRAGMENT' in line:
check2 += c2*1
c2 = -1; continue
elif 'EUMP2' in line:
check3 = 1
elif 'SCF Done: E(RHF)' in line:
count2 += 1
if chck0 == 0:
| |
# -*- coding: utf-8 -*-
"""
Contains functions for generating an SPH glass in a periodic box. To generate
a glass, see glassBox.
This package requires diskpy, ChaNGa, and pynbody.
Created on Wed Mar 16 17:37:10 2016
@author: ibackus
"""
import shutil
import os
import numpy as np
import pynbody
SimArray = pynbody.array.SimArray
import diskpy
from diskpy.ICgen.ICgen_utils import changa_command, changa_run
import itertools
# Constants
defaultparam = 'glassdefaults.param'
directory = os.path.split(os.path.abspath(__file__))[0]
defaultparam = os.path.join(directory, defaultparam)
kB = SimArray(1.0, 'k')
# Set up default params if required (ie the first time this package is run)
if not os.path.exists(defaultparam):
defaults = os.path.join(directory, '.defaultparam')
shutil.copyfile(defaults, defaultparam)
print 'Setting up default params...saved to ' + defaultparam
def _loadDefaults():
"""
Load default .param file
"""
return diskpy.utils.configparser(defaultparam, 'param')
def filenames():
"""
Return default filenames
"""
param = _loadDefaults()
inFile = param['achInFile']
outPrefix = param['achOutName']
return inFile, outPrefix
def glassNormalDist(n, height, baseshape=[], changaPreset='default', verbose=False,
fulloutput=False, nSmooth=32, runTimeScale=1.,
dampingScale=1., extraPars={}):
pass
def glassBox(n, shape=[1,1,1], changaPreset='default', verbose=False,
fulloutput=False, usegrid=False, randomness=1., nSmooth=32,
runTimeScale=1., dampingScale=1., extraPars={}):
"""
Generates an sph glass in a box with periodic boundary conditions using
ChaNGa. The procedure is:
1. Generate random particle positions in a box
2. Create a tipsy snapshot with only gas particles
3. Time evolve in a periodic box with no gravity and lots of artificial
viscosity.
ND-SPH is supported for SPH in dimensions 1, 2, and 3 IF ChaNGa has been
properly compiled with NDPSH. The number of dimensions to run in is
from the length of shape.
Parameters
----------
n : int or list/array-like
Number of particles (if int) or grid resolution [nx, ny, nz] along each
axis (only if usegrid=True)
shape : array-like
Shape of the box (x, y, z). The box will be centered around the origin.
changaPreset : str
ChaNGa preset to use (see diskpy for info on the ChaNGa presets)
verbose : bool
Verbosity, true or false
fulloutput : bool
If True, all the snapshots for each time step during the time evolution
will be output.
This is useful especially with long boxes where waves can form.
usegrid : bool
Use a grid to seed intial positions. The particles will be randomly
shifted around the grid locations
randomness : float
If usegrid=True, specifies by what fraction of the grid spacing
particles will be randomly shifted
nSmooth : int
Number of neighbors to use for SPH
runTimeScale : float
Factor to increase ChaNGa run time by. If you think your glasses are
not getting fully settled into a glass state, try increasing this
number.
dampingScale : float
Factor to increase the damping force in ChaNGa.
extraPars : dict
param dict defining params to override the default ChaNGa runtime
params defined here.
Returns
-------
f : SimSnap (see pynbody)
Snapshot containing the generated particle positions. The glass
positions can be accessed using f['pos']. Density is also available for
all the particles in f['rho']. The target density is 1
Notes
-----
The snapshot will saved to glass.std
If you find the box is not sufficiently glassy, you can time evolve it
again by running reglassify() which will run glass.std again.
"""
# Generate snapshot with random positions
snap = boxSnap(n, shape, usegrid, randomness)
param = makeBoxParam(snap, shape, fulloutput, runTimeScale, dampingScale)
# Save snapshot and param
paramname = param['achOutName'] + '.param'
ICname = param['achInFile']
param['nSmooth'] = nSmooth
param.update(extraPars)
diskpy.utils.configsave(param, paramname)
snap.write(fmt=pynbody.tipsy.TipsySnap, filename=ICname)
# Run ChaNGa to make a glass
f = runchanga(paramname, changaPreset, verbose, fulloutput)
return f
def glassify(snapshot, shape, changaPreset='default', verbose=False, \
fulloutput=False):
"""
Glassifies a snapshot, saves the results to the default filename (see
sphglass.filenames()) and returns the snapshot. snapshot can be a filename
or a pynbody SimSnap
"""
inFile, fPrefix = filenames()
paramname = fPrefix + '.param'
if not isinstance(snapshot, str):
snapshot.write(filename=inFile, fmt=pynbody.tipsy.TipsySnap)
snapshotName = inFile
else:
snapshotName = snapshot
snapshot = pynbody.load(snapshotName)
try:
param = makeBoxParam(snapshot, shape, fulloutput)
diskpy.utils.configsave(param, paramname, 'param')
shutil.move(snapshotName, inFile)
glass = reglassify(changaPreset, verbose, fulloutput)
finally:
shutil.move(inFile, snapshotName)
return glass
def reglassify(changaPreset='default', verbose=True, fulloutput=False):
"""
Run the most recently created glass (in the current working directory)
again to make it more glassy.
Parameters
----------
changaPreset : str
ChaNGa preset to use (see diskpy for info on the ChaNGa presets)
verbose : bool
Verbosity, true or false
fulloutput : bool
If true, don't clean-up extra snapshot stuff
Returns
-------
f : SimSnap (see pynbody)
Snapshot containing the generated particle positions. The glass
positions can be accessed using f['pos']. Density is also available for
all the particles in f['rho']. The target density is 1
"""
# Get the default paramname
paramname = filenames()[1] + '.param'
# Glassify
f = runchanga(paramname, changaPreset, verbose, fulloutput)
return f
def runchanga(paramname, changaPreset='default', verbose=True, fulloutput=False):
"""
Time evolves a snapshot in ChaNGa and overwrites the ICs with the result.
Also sets the velocity to zero.
Parameters
----------
paramname : str
Path to the .param file to run ChaNGa on
changaPreset : str
ChaNGa preset to use (see diskpy for info on the ChaNGa presets)
verbose : bool
Verbosity, true or false
fulloutput : bool
If true, don't clean-up extra snapshot stuff
Returns
-------
f : SimSnap
Simulation snapshot which has been run
"""
param = diskpy.utils.configparser(paramname,'param')
command = changa_command(paramname, changaPreset)
p = changa_run(command, verbose=False, force_wait=False)
currentStep = 0
for line in iter(p.stdout.readline, ''):
if verbose:
print line,
elif line[0:5] == 'Step:':
line = line.strip()
i = int(float(line.split()[1]))
if i > currentStep:
currentStep = i
print line, ' Total steps: ', param['nSteps']
# move results and clean up
fname = param['achOutName'] + '.{0:06}'.format(param['nSteps'])
ICname = param['achInFile']
if fulloutput:
shutil.copyfile(fname, ICname)
else:
shutil.move(fname, ICname)
os.system('rm -f ' + fname + '*')
os.system('rm -f ' + param['achOutName'] + '.0*')
# set velocity to zero
f = pynbody.load(ICname, paramname=paramname)
f['vel'] *= 0
f.write()
return f
def randomNormal(n, height, baseshape=[]):
"""
Generate random positions, normally distributed along z.
"""
nDim = len(baseshape) + 1
pos = np.zeros([n, nDim])
z = np.random.randn(n)
z *= height
pos[:,-1] = z
for i in range(nDim - 1):
pos[:, i] = np.random.rand(n) * baseshape[i]
return pos
def normalSnap(n, height, baseshape=[]):
"""
Generate a snapshot with positions normally distributed along z
"""
snap = pynbody.new(gas=n)
nDim = len(baseshape) + 1
pos = randomNormal(n, height, baseshape)
i0 = 3-nDim
snap['pos'][:, i0:] = SimArray(pos,'au')
volume = np.sqrt(2*np.pi) * height
if nDim > 1:
volume *= np.prod(baseshape)
snap['mass'] = volume*SimArray(np.ones(n), 'Msol')/n
snap['vel'] = SimArray(np.zeros([n,3]), 'km s**-1')
snap['temp'] = SimArray(np.ones(n),'K')
snap['eps'] = SimArray(np.ones(n))*height * 5
snap['rho'] = SimArray(np.ones(n), 'Msol kpc**-3')
def boxSnap(n, shape, usegrid=False, randomness=0.):
"""
Initialize snap shot with n randomly placed gas particles inside a box.
if usegrid=True, a grid is used to seed the particle positions (see grid())
n can then be [nx, ny, nz, ...] to specify the resolution along each
dimension. Otherwise, n is an int and a uniform spacing is attempted.
"""
nDim = len(shape)
if (nDim > 3) or (nDim < 1):
raise ValueError, 'Only supported dimensions are 1, 2, 3. try'\
'different shape'
if usegrid:
if hasattr(n, '__iter__'):
res = n
n = np.product(res)
else:
alpha = (float(n)/np.product(shape))**(1.0/nDim)
res = np.array([alpha * L for L in shape])
res = np.round(res).astype(int)
n = np.product(res)
n = int(n)
snap = pynbody.new(gas=n)
if usegrid:
pos = grid(res, shape, randomness)
else:
pos = randomBox(n, shape)
i0 = 3-nDim
snap['pos'][:, i0:] = SimArray(pos,'au')
volume = float(np.prod(shape))
snap['mass'] = volume*SimArray(np.ones(n), 'Msol')/n
snap['vel'] = SimArray(np.zeros([n,3]), 'km s**-1')
snap['temp'] = SimArray(np.ones(n),'K')
snap['eps'] = SimArray(np.ones(n))*max(shape)
snap['rho'] = SimArray(np.ones(n), 'Msol kpc**-3')
return snap
def getcs(snap, param):
"""
From a simulation snapshot and param file (or dict), return the average
sound speed. (in simulation units)
"""
# Get sound speed
units = diskpy.pychanga.units_from_param(param)
| |
inner_source ,
OOOoO000 . outer_source )
if ( I1IiiI1ii1i ) :
OoO = OOOoO000 . packet if ( OOO0ooo ) else None
lisp . lisp_glean_map_cache ( OOOoO000 . inner_source , OOOoO000 . outer_source ,
OOOoO000 . udp_sport , OoO )
if ( OOO0ooo ) : return
if 54 - 54: I11i / I1IiiI * oO0o + OoooooooOO - iII111i / OoooooooOO
I111IIiii1Ii , O0o = lisp . lisp_allow_gleaning ( OOOoO000 . inner_dest , None )
OOOoO000 . gleaned_dest = I111IIiii1Ii
if 13 - 13: oO0o . I1IiiI * oO0o + I1IiiI
if 59 - 59: I1IiiI + i11iIiiIii + i1IIi / I11i
if 44 - 44: I11i . OoOoOO00 * I1IiiI + OoooooooOO - iII111i - IiII
if 15 - 15: IiII / O0 . o0oOOo0O0Ooo . i11iIiiIii
OOOoOoO = lisp . lisp_map_cache_lookup ( OOOoO000 . inner_source , OOOoO000 . inner_dest )
if 59 - 59: I1Ii111 - o0oOOo0O0Ooo - ooOoO0o
if 48 - 48: i1IIi + I11i % OoOoOO00 / Oo0Ooo - o0oOOo0O0Ooo
if 67 - 67: oO0o % o0oOOo0O0Ooo . OoooooooOO + OOooOOo * I11i * OoOoOO00
if 36 - 36: O0 + Oo0Ooo
if 5 - 5: Oo0Ooo * OoOoOO00
if ( OOOoOoO and ( OOOoOoO . action == lisp . LISP_NATIVE_FORWARD_ACTION or
OOOoOoO . eid . address == 0 ) ) :
ii1I11iIiIII1 = lisp . lisp_db_for_lookups . lookup_cache ( OOOoO000 . inner_source , False )
if ( ii1I11iIiIII1 and ii1I11iIiIII1 . secondary_iid ) :
oOO0OOOOoooO = OOOoO000 . inner_dest
oOO0OOOOoooO . instance_id = ii1I11iIiIII1 . secondary_iid
if 22 - 22: I11i + iIii1I11I1II1
OOOoOoO = lisp . lisp_map_cache_lookup ( OOOoO000 . inner_source , oOO0OOOOoooO )
if ( OOOoOoO ) :
OOOoO000 . gleaned_dest = OOOoOoO . gleaned
else :
I111IIiii1Ii , O0o = lisp . lisp_allow_gleaning ( oOO0OOOOoooO , None )
OOOoO000 . gleaned_dest = I111IIiii1Ii
if 24 - 24: OoOoOO00 % i1IIi + iII111i . i11iIiiIii . I1ii11iIi11i
if 17 - 17: I1ii11iIi11i . II111iiii . ooOoO0o / I1ii11iIi11i
if 57 - 57: I11i
if 67 - 67: OoO0O00 . ooOoO0o
if 87 - 87: oO0o % Ii1I
if 83 - 83: II111iiii - I11i
if 35 - 35: i1IIi - iIii1I11I1II1 + i1IIi
if 86 - 86: iIii1I11I1II1 + OoOoOO00 . i11iIiiIii - Ii1I
if 51 - 51: OoOoOO00
if ( OOOoOoO == None and I111IIiii1Ii ) :
lisp . lprint ( "Suppress Map-Request for gleaned EID {}" . format ( lisp . green ( OOOoO000 . inner_dest . print_address ( ) , False ) ) )
if 14 - 14: IiII % oO0o % Oo0Ooo - i11iIiiIii
return
if 53 - 53: Ii1I % Oo0Ooo
if 59 - 59: OOooOOo % iIii1I11I1II1 . i1IIi + II111iiii * IiII
if ( OOOoOoO == None or OOOoOoO . action == lisp . LISP_SEND_MAP_REQUEST_ACTION ) :
if ( lisp . lisp_rate_limit_map_request ( OOOoO000 . inner_source ,
OOOoO000 . inner_dest ) ) : return
lisp . lisp_send_map_request ( II1iII1i , iiI1iIiI ,
OOOoO000 . inner_source , OOOoO000 . inner_dest , None )
if 41 - 41: Ii1I % I1ii11iIi11i
if ( OOOoO000 . is_trace ( ) ) :
iIiIi11Ii = oO0oIIII
i1iIiIi1I = "map-cache miss"
lisp . lisp_trace_append ( OOOoO000 , reason = i1iIiIi1I , lisp_socket = iIiIi11Ii )
if 37 - 37: Ii1I % OoO0O00
return
if 79 - 79: I1ii11iIi11i + I1IiiI / I1IiiI
if 71 - 71: OOooOOo * OoO0O00 % OoooooooOO % OoO0O00 / I1IiiI
if 56 - 56: OoooooooOO % i11iIiiIii * iIii1I11I1II1 . OoO0O00 * O0
if 23 - 23: i11iIiiIii
if 39 - 39: o0oOOo0O0Ooo - I1ii11iIi11i % iII111i * OoO0O00 - OOooOOo / iII111i
if 29 - 29: I1ii11iIi11i
if ( OOOoOoO and OOOoOoO . is_active ( ) and OOOoOoO . has_ttl_elapsed ( ) and
OOOoOoO . gleaned == False ) :
lisp . lprint ( "Refresh map-cache entry {}" . format ( lisp . green ( OOOoOoO . print_eid_tuple ( ) , False ) ) )
if 52 - 52: i11iIiiIii / i1IIi
lisp . lisp_send_map_request ( II1iII1i , iiI1iIiI ,
OOOoO000 . inner_source , OOOoO000 . inner_dest , None )
if 1 - 1: ooOoO0o
if 78 - 78: I1ii11iIi11i + I11i - O0
if 10 - 10: I1Ii111 % I1IiiI
if 97 - 97: OoooooooOO - I1Ii111
if 58 - 58: iIii1I11I1II1 + O0
if 30 - 30: ooOoO0o % iII111i * OOooOOo - I1ii11iIi11i * Ii1I % ooOoO0o
OOOoOoO . stats . increment ( len ( OOOoO000 . packet ) )
if 46 - 46: i11iIiiIii - O0 . oO0o
if 100 - 100: I1IiiI / o0oOOo0O0Ooo * iII111i . O0 / OOooOOo
if 83 - 83: I1Ii111
if 48 - 48: II111iiii * OOooOOo * I1Ii111
i1iiiIii11 , OOoOOO000O0 , oOo0 , II1i11I1 , iiIiIiII , Oo0O00O0O = OOOoOoO . select_rloc ( OOOoO000 , None )
if 37 - 37: I11i / IiII + II111iiii
if 18 - 18: I1ii11iIi11i
if ( i1iiiIii11 == None and iiIiIiII == None ) :
if ( II1i11I1 == lisp . LISP_NATIVE_FORWARD_ACTION ) :
lisp . dprint ( "Natively forwarding" )
OOOoO000 . send_packet ( OOo , OOOoO000 . inner_dest )
if 23 - 23: II111iiii
if ( OOOoO000 . is_trace ( ) ) :
iIiIi11Ii = oO0oIIII
i1iIiIi1I = "not an EID"
lisp . lisp_trace_append ( OOOoO000 , reason = i1iIiIi1I , lisp_socket = iIiIi11Ii )
if 24 - 24: iIii1I11I1II1 + iIii1I11I1II1 * iII111i
OoOO0o ( OooOOOO , "RTR" )
return
if 18 - 18: iII111i * I11i - Ii1I
i1iIiIi1I = "No reachable RLOCs found"
lisp . dprint ( i1iIiIi1I )
if 31 - 31: Oo0Ooo - O0 % OoOoOO00 % oO0o
if ( OOOoO000 . is_trace ( ) ) :
iIiIi11Ii = oO0oIIII
lisp . lisp_trace_append ( OOOoO000 , reason = i1iIiIi1I , lisp_socket = iIiIi11Ii )
if 45 - 45: I1ii11iIi11i + II111iiii * i11iIiiIii
return
if 13 - 13: OoooooooOO * oO0o - Ii1I / OOooOOo + I11i + IiII
if ( i1iiiIii11 and i1iiiIii11 . is_null ( ) ) :
lisp . dprint ( "Drop action RLOC found" )
if 39 - 39: iIii1I11I1II1 - OoooooooOO
if ( OOOoO000 . is_trace ( ) ) :
iIiIi11Ii = oO0oIIII
i1iIiIi1I = "drop action"
lisp . lisp_trace_append ( OOOoO000 , reason = i1iIiIi1I , lisp_socket = iIiIi11Ii )
if 81 - 81: I1ii11iIi11i - O0 * OoooooooOO
return
if 23 - 23: II111iiii / oO0o
if 28 - 28: Oo0Ooo * ooOoO0o - OoO0O00
if 19 - 19: I11i
if 67 - 67: O0 % iIii1I11I1II1 / IiII . i11iIiiIii - Ii1I + O0
if 27 - 27: OOooOOo
OOOoO000 . outer_tos = OOOoO000 . inner_tos
OOOoO000 . outer_ttl = OOOoO000 . inner_ttl
if 89 - 89: II111iiii / oO0o
if 14 - 14: OOooOOo . I1IiiI * ooOoO0o + II111iiii - ooOoO0o + OOooOOo
if 18 - 18: oO0o - o0oOOo0O0Ooo - I1IiiI - I1IiiI
if 54 - 54: Oo0Ooo + I1IiiI / iII111i . I1IiiI * OoOoOO00
if ( i1iiiIii11 ) :
OOOoO000 . encap_port = OOoOOO000O0
if ( OOoOOO000O0 == 0 ) : OOOoO000 . encap_port = lisp . LISP_DATA_PORT
OOOoO000 . outer_dest . copy_address ( i1iiiIii11 )
IIiIiiiIIIIi1 = OOOoO000 . outer_dest . afi_to_version ( )
OOOoO000 . outer_version = IIiIiiiIIIIi1
if 39 - 39: OoO0O00 / Ii1I / I1Ii111
O00O0 = iiIIIIi1i1 if ( IIiIiiiIIIIi1 == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 19 - 19: oO0o - II111iiii
OOOoO000 . outer_source . copy_address ( O00O0 )
if 63 - 63: i11iIiiIii . o0oOOo0O0Ooo
if ( OOOoO000 . is_trace ( ) ) :
iIiIi11Ii = oO0oIIII
if ( lisp . lisp_trace_append ( OOOoO000 , rloc_entry = Oo0O00O0O ,
lisp_socket = iIiIi11Ii ) == False ) : return
if 19 - 19: II111iiii
if 72 | |
########################################################################################################################
# Module: inference/proposal.py
# Description: Proposal mechanisms to extend particles (series of positions/edges/distances) and re-weight
# in light of a newly received observation.
#
# Web: https://github.com/SamDuffield/bmm
########################################################################################################################
from functools import lru_cache
from typing import Tuple, Union
import numpy as np
from numba import njit
from networkx.classes import MultiDiGraph
from bmm.src.tools.edges import get_geometry, edge_interpolate, discretise_edge
from bmm.src.inference.model import MapMatchingModel
@lru_cache(maxsize=2 ** 8)
def get_out_edges(graph: MultiDiGraph,
node: int) -> np.ndarray:
"""
Extracts out edges from a given node
:param graph: encodes road network, simplified and projected to UTM
:param node: cam_graph index to a single node
:return: array with columns u, v, k with u = node
"""
return np.atleast_2d([[u, v, k] for u, v, k in graph.out_edges(node, keys=True)])
@lru_cache(maxsize=2 ** 7)
def get_possible_routes_all_cached(graph: MultiDiGraph,
u: int,
v: int,
k: int,
d_max: float,
num_inter_cut_off: int) -> list:
in_route = np.array([[0., u, v, k, 1., 0., 0., 0.]])
return get_possible_routes(graph, in_route, d_max, all_routes=True, num_inter_cut_off=num_inter_cut_off)
def get_all_possible_routes_overshoot(graph: MultiDiGraph,
in_edge: np.ndarray,
d_max: float,
num_inter_cut_off: int = np.inf) -> list:
in_edge_geom = get_geometry(graph, in_edge[-1, 1:4])
in_edge_length = in_edge_geom.length
extra_dist = (1 - in_edge[-1, 4]) * in_edge_length
if extra_dist > d_max:
return get_possible_routes(graph, in_edge, d_max, all_routes=True, num_inter_cut_off=num_inter_cut_off)
all_possible_routes_overshoot = get_possible_routes_all_cached(graph, *in_edge[-1, 1:4],
d_max, num_inter_cut_off)
out_routes = []
for i in range(len(all_possible_routes_overshoot)):
temp_route = all_possible_routes_overshoot[i].copy()
temp_route[:, -1] += extra_dist
out_routes.append(temp_route)
return out_routes
def get_possible_routes(graph: MultiDiGraph,
in_route: np.ndarray,
dist: float,
all_routes: bool = False,
num_inter_cut_off: int = np.inf) -> list:
"""
Given a route so far and maximum distance to travel, calculate and return all possible routes on cam_graph.
:param graph: encodes road network, simplified and projected to UTM
:param in_route: shape = (_, 9)
columns: t, u, v, k, alpha, x, y, n_inter, d
t: float, time
u: int, edge start node
v: int, edge end node
k: int, edge key
alpha: in [0,1], position along edge
x: float, metres, cartesian x coordinate
y: float, metres, cartesian y coordinate
d: metres, distance travelled
:param dist: metres, maximum possible distance to travel
:param all_routes: if true return all routes possible <= d
otherwise return only routes of length d
:param num_inter_cut_off: maximum number of intersections to cross in the time interval
:return: list of arrays
each array with shape = (_, 9) as in_route
each array describes a possible route
"""
# Extract final position from inputted route
start_edge_and_position = in_route[-1]
# Extract edge geometry
start_edge_geom = get_geometry(graph, start_edge_and_position[1:4])
start_edge_geom_length = start_edge_geom.length
# Distance left on edge before intersection
# Use NetworkX length rather than OSM length
distance_left_on_edge = (1 - start_edge_and_position[4]) * start_edge_geom_length
if distance_left_on_edge > dist:
# Remain on edge
# Propagate and return
start_edge_and_position[4] += dist / start_edge_geom_length
start_edge_and_position[-1] += dist
return [in_route]
# Reach intersection at end of edge
# Propagate to intersection and recurse
dist -= distance_left_on_edge
start_edge_and_position[4] = 1.
start_edge_and_position[-1] += distance_left_on_edge
intersection_edges = get_out_edges(graph, start_edge_and_position[2]).copy()
if intersection_edges.shape[1] == 0 or len(in_route) >= num_inter_cut_off:
# Dead-end and one-way or exceeded max intersections
if all_routes:
return [in_route]
else:
return [None]
if len(intersection_edges) == 1 and intersection_edges[0][1] == start_edge_and_position[1] \
and intersection_edges[0][2] == start_edge_and_position[3]:
# Dead-end and two-way -> Only option is u-turn
if all_routes:
return [in_route]
else:
new_routes = []
for new_edge in intersection_edges:
# If not u-turn or loop continue route search on new edge
if (not (new_edge[1] == start_edge_and_position[1] and new_edge[2] == start_edge_and_position[3])) \
and not (new_edge == in_route[:, 1:4]).all(1).any():
add_edge = np.array([[0, *new_edge, 0, 0, 0, start_edge_and_position[-1]]])
new_route = np.append(in_route,
add_edge,
axis=0)
new_routes += get_possible_routes(graph, new_route, dist, all_routes, num_inter_cut_off)
if all_routes:
return [in_route] + new_routes
else:
return new_routes
def extend_routes(graph, routes, add_distance, all_routes=True):
"""
Extend routes to a further distance.
:param graph: encodes road network, simplified and projected to UTM
:param routes: list of arrays
columns: t, u, v, k, alpha, x, y, n_inter, d
t: float, time
u: int, edge start node
v: int, edge end node
k: int, edge key
alpha: in [0,1], position along edge
x: float, metres, cartesian x coordinate
y: float, metres, cartesian y coordinate
n_inter: int, number of options if intersection
d: metres, distance travelled
:param add_distance: float
metres
additional distance to travel
:param all_routes: bool
if true return all routes possible <= d
else return only routes of length d
:return: list of numpy.ndarrays
each numpy.ndarray with shape = (_, 7)
each array describes a possible route
"""
out_routes = []
for route in routes:
out_routes += get_possible_routes(graph, route, add_distance, all_routes=all_routes)
return out_routes
def process_proposal_output(particle: np.ndarray,
sampled_route: np.ndarray,
sampled_dis_route: np.ndarray,
time_interval: float,
full_smoothing: bool) -> np.ndarray:
"""
Append sampled route to previous particle
:param particle: route up to previous observation
:param sampled_route: route since previous observation
:param sampled_dis_route: alpha, x, y, distance
:param time_interval: time between last observation and newly received observation
:param full_smoothing: whether to append to full particle or only last row
:return: appended particle
"""
# Append sampled route to old particle
new_route_append = sampled_route
new_route_append[0, 0] = 0
new_route_append[0, 5:7] = 0
new_route_append[-1, 0] = particle[-1, 0] + time_interval
new_route_append[-1, 4:7] = sampled_dis_route[0:3]
new_route_append[-1, -1] = sampled_dis_route[-1]
if full_smoothing:
return np.append(particle, new_route_append, axis=0)
else:
return np.append(particle[-1:], new_route_append, axis=0)
def optimal_proposal(graph: MultiDiGraph,
particle: np.ndarray,
new_observation: Union[None, np.ndarray],
time_interval: float,
mm_model: MapMatchingModel,
full_smoothing: bool = True,
d_refine: float = 1.,
d_max: float = None,
d_max_fail_multiplier: float = 1.5,
d_max_threshold: tuple = (0.9, 0.1),
num_inter_cut_off: int = None,
only_norm_const: bool = False,
store_norm_quants: bool = False,
resample_fails: bool = True) -> Union[Tuple[Union[None, np.ndarray],
float,
Union[float, np.ndarray]], float]:
"""
Samples a single particle from the (distance discretised) optimal proposal.
:param graph: encodes road network, simplified and projected to UTM
:param particle: single element of MMParticles.particles
:param new_observation: cartesian coordinate in UTM
:param time_interval: time between last observation and newly received observation
:param mm_model: MapMatchingModel
:param full_smoothing: if True returns full trajectory
otherwise returns only x_t-1 to x_t
:param d_refine: metres, resolution of distance discretisation
:param d_max: optional override of d_max = mm_model.d_max(time_interval)
:param d_max_fail_multiplier: extension of d_max in case all probs are 0
:param d_max_threshold: tuple defining when to extend d_max
extend if total sample prob of distances > d_max * d_max_threshold[0] larger than d_max_threshold[1]
:param num_inter_cut_off: maximum number of intersections to cross in the time interval
:param only_norm_const: if true only return prior normalising constant (don't sample)
:param store_norm_quants: whether to additionally return quantities needed for gradient EM step
assuming deviation prior is used
:param resample_fails: whether to return None (and induce later resampling of whole trajectory)
if proposal fails to find route with positive probability
if False assume distance=0
:return: (particle, unnormalised weight, prior_norm) or (particle, unnormalised weight, dev_norm_quants)
"""
if particle is None:
return 0. if only_norm_const else (None, 0., 0.)
if isinstance(new_observation, list):
new_observation = np.array(new_observation)
if num_inter_cut_off is None:
num_inter_cut_off = max(int(time_interval / 1.5), 10)
if d_max is None:
d_max = mm_model.d_max(time_interval)
# Extract all possible routes from previous position
start_position = particle[-1:].copy()
start_position[0, -1] = 0
# possible_routes = get_possible_routes(cam_graph, start_position, d_max, all_routes=True,
# num_inter_cut_off=num_inter_cut_off)
possible_routes = get_all_possible_routes_overshoot(graph, start_position, d_max,
num_inter_cut_off=num_inter_cut_off)
# Get all possible positions on each route
discretised_routes_indices_list = []
discretised_routes_list = []
for i, route in enumerate(possible_routes):
# All possible end positions of route
discretised_edge_matrix = discretise_edge(graph, route[-1, 1:4], d_refine)
if route.shape[0] == 1:
discretised_edge_matrix = discretised_edge_matrix[discretised_edge_matrix[:, 0] >= particle[-1, 4]]
discretised_edge_matrix[:, -1] -= discretised_edge_matrix[-1, -1]
else:
discretised_edge_matrix[:, -1] += route[-2, -1]
discretised_edge_matrix = discretised_edge_matrix[discretised_edge_matrix[:, -1] < d_max + 1e-5]
# Track route index and append to list
if discretised_edge_matrix is not None and len(discretised_edge_matrix) > 0:
discretised_routes_indices_list += [np.ones(discretised_edge_matrix.shape[0], dtype=int) * i]
discretised_routes_list += [discretised_edge_matrix]
# Concatenate into numpy.ndarray
discretised_routes_indices = np.concatenate(discretised_routes_indices_list)
discretised_routes = np.concatenate(discretised_routes_list)
if len(discretised_routes) == 0 or (len(discretised_routes) == 1 and discretised_routes[0][-1] == 0):
if only_norm_const:
return 0
if resample_fails:
return None, 0., 0.
else:
sampled_dis_route = discretised_routes[0]
# Append sampled route to old particle
sampled_route = possible_routes[0]
proposal_out = process_proposal_output(particle, sampled_route, sampled_dis_route, time_interval,
full_smoothing)
return proposal_out, 0., 0.
# Distance prior evals
distances = discretised_routes[:, -1]
distance_prior_evals = mm_model.distance_prior_evaluate(distances, time_interval)
# Deviation prior evals
deviation_prior_evals = mm_model.deviation_prior_evaluate(particle[-1, 5:7],
discretised_routes[:, 1:3],
| |
0x120
0x10, 0x00, 0x04, 0x03, # 0x124
0x04, 0x01, 0x02, 0x03, # 0x128
0x07, 0x01, 0x24, 0x00, # 0x12c
0x01, 0x00, 0x11, 0x00, # 0x130
0x00, 0x02, None, None, # 0x134
None, None, 0x64, 0x00, # 0x138
0x02, 0x03, 0x00, 0x80, # 0x13c
0x00, 0x00, 0x00, 0x80, # 0x140
None, None, None, None, # 0x144
0x64, 0x00, 0x02, 0x03, # 0x148
0x00, 0x80, 0x00, 0x00, # 0x14c
0x00, 0x80, 0xff, 0xff, # 0x150
0xff, 0xff, 0xff] # 0x154
# Where is the boot block?
if product_no[-1] == "T":
boot_block = "top"
elif product_no[-1] == "B":
boot_block = "bottom"
else:
return ("The product no (" + product_no + ") should end with TQ0/T00 "
"(for top) or BQ0/B00 (for bottom), not '"
+ product_no[-3] + "'")
# Chip size?
if product_no[3:6] == "640": # 64 Mbit
blocks = 64
config['device-id'] = iff(boot_block == "bottom", 0x881a, 0x8817)
device_geometry = [0x17, 0x01, 0x00, 0x06, 0x00, 0x02]
if boot_block == "bottom":
device_geometry += [0x03, 0x00, 0x80, 0x00, 0x3e, 0x00, 0x00, 0x02]
else:
device_geometry += [0x3e, 0x00, 0x00, 0x02, 0x03, 0x00, 0x80, 0x00]
device_geometry += [0x00, 0x00, 0x00, 0x00]
if boot_block == "bottom":
config['cfi-query'][0x136:0x13a] = [0x03, 0x00, 0x80, 0x00]
config['cfi-query'][0x144:0x148] = [0x3e, 0x00, 0x00, 0x02]
else:
config['cfi-query'][0x136:0x13a] = [0x3e, 0x00, 0x00, 0x02]
config['cfi-query'][0x144:0x148] = [0x03, 0x00, 0x80, 0x00]
elif product_no[3:6] == "128": # 128 Mbit
blocks = 128
config['device-id'] = iff(boot_block == "bottom", 0x881b, 0x8818)
device_geometry = [0x18, 0x01, 0x00, 0x06, 0x00, 0x02]
if boot_block == "bottom":
device_geometry += [0x03, 0x00, 0x80, 0x00, 0x7e, 0x00, 0x00, 0x02]
else:
device_geometry += [0x7e, 0x00, 0x00, 0x02, 0x03, 0x00, 0x80, 0x00]
device_geometry += [0x00, 0x00, 0x00, 0x00]
if boot_block == "bottom":
config['cfi-query'][0x136:0x13a] = [0x03, 0x00, 0x80, 0x00]
config['cfi-query'][0x144:0x148] = [0x7e, 0x00, 0x00, 0x02]
else:
config['cfi-query'][0x136:0x13a] = [0x7e, 0x00, 0x00, 0x02]
config['cfi-query'][0x144:0x148] = [0x03, 0x00, 0x80, 0x00]
elif product_no[3:6] == "256": # 256 Mbit
blocks = 256
config['device-id'] = iff(boot_block == "bottom", 0x891c, 0x8919)
device_geometry = [0x19, 0x01, 0x00, 0x06, 0x00, 0x02]
if boot_block == "bottom":
device_geometry += [0x03, 0x00, 0x80, 0x00, 0xfe, 0x00, 0x00, 0x02]
else:
device_geometry += [0xfe, 0x00, 0x00, 0x02, 0x03, 0x00, 0x80, 0x00]
device_geometry += [0x00, 0x00, 0x00, 0x00]
if boot_block == "bottom":
config['cfi-query'][0x136:0x13a] = [0x03, 0x00, 0x00, 0x80]
config['cfi-query'][0x144:0x148] = [0xfe, 0x00, 0x00, 0x02]
else:
config['cfi-query'][0x136:0x13a] = [0xfe, 0x00, 0x00, 0x02]
config['cfi-query'][0x144:0x148] = [0x03, 0x00, 0x00, 0x80]
else:
return ("The product no (" + product_no + ") should contain a valid "
"size specification (640/128/256), not '"
+ product_no[3:6] + "'")
size = 1 << device_geometry[0]
for i in range(0x27, 0x39):
config['cfi-query'][i] = device_geometry[i - 0x27]
if boot_block == "top":
config['unit-size'] = [0x20000] * (blocks - 1) + [0x8000] * 4
else:
config['unit-size'] = [0x8000] * 4 + [0x20000] * (blocks - 1)
return (config, size)
#
# Completion function for:
# Am29DL323GB
# Am29DL323GT
#
# finish(product_no, config) -> (config_updated, size of one flash chip, in bytes)
def finish_config_Am29DL323G_(product_no, config):
# check what where the boot block is
if product_no[-1] == "T":
boot_block = "top"
elif product_no[-1] == "B":
boot_block = "bottom"
else:
return "The product no (" + product_no + ") should end with T (for top) or B (for bottom), not '" + product_no[-1] + "'"
if boot_block == "top":
config['device-id'] = 0x2250
config['unit-size'] = [0x10000]*63 + [0x2000]*8
config["cfi-query"][0x4f] = 0x03
else:
config['device-id'] = 0x2253
config['unit-size'] = [0x2000]*8 + [0x10000]*63
config["cfi-query"][0x4f] = 0x02
return finish_default(product_no, config)
#
# Completion function for:
# S29GL128N
# S29GL256N
# S29GL512N
#
def finish_config_S29GL___N(product_no, config):
# check size
if product_no[5:8] == "128":
size = 128
elif product_no[5:8] == "256":
size = 256
elif product_no[5:8] == "512":
size = 512
else:
return "The product no (" + product_no + ") is not supported. Only 128,256 or 512 Mbit are supported."
config['unit-size'] = [128*1024]*size
if size == 128:
config["cfi-query"][0x27] = 0x18
config["cfi-query"][0x2d] = 0x7f
config["cfi-query"][0x2e] = 0x00
elif size == 256:
config["cfi-query"][0x27] = 0x19
config["cfi-query"][0x2d] = 0xff
config["cfi-query"][0x2e] = 0x00
else:
config["cfi-query"][0x27] = 0x1a
config["cfi-query"][0x2d] = 0xff
config["cfi-query"][0x2e] = 0x01
# not sure on this one
config["cfi-query"][0x4f] = 0x04 # bottom WP protect
#config["cfi-query"][0x4f] = 0x05 # top WP protect
return finish_default(product_no, config)
#
# list of completion functions
#
complete_functions = {
"28F___C3_" : finish_config_28F___C3_,
"28F___J3A" : finish_config_28F___J3A,
"28F___J3" : finish_config_28F___J3,
"28F___S3" : finish_config_28F___S3,
"28F___P30_" : finish_config_28F___P30_,
"82802-8" : finish_default,
"Am29F040B" : finish_default,
"Am29F016D" : finish_default,
"Am29SL160CT": finish_default,
"Am29LV640MH": finish_default,
"Am29LV64_D": finish_default,
"Am29LV160MB": finish_default,
"SG29GL064M": finish_default,
"Am29DL323B": finish_default,
"Am29DL323G_": finish_config_Am29DL323G_,
"MBM29LV650UE": finish_default,
"S29GL___N": finish_config_S29GL___N,
"AT49BV001A": finish_default,
"AT49BV001AT": finish_default,
"Am29DL163D": finish_default,
}
#
# static description of flash memory chips
#
flash_descriptions = {
"28F___C3_" : {
"cfi-query" : [0x89, 0x00, 0x00, 0x00, # 0x00
0x00, 0x00, 0x00, 0x00, # 0x04
0x00, 0x00, 0x00, 0x00, # 0x08
0x00, 0x00, 0x00, 0x00, # 0x0C
0x51, 0x52, 0x59, 0x03, # 0x10
0x00, 0x35, 0x00, 0x00, # 0x14
0x00, 0x00, 0x00, 0x27, # 0x18
0x36, 0xB4, 0xC6, 0x05, # 0x1C
0x00, 0x0A, 0x00, 0x04, # 0x20
0x00, 0x03, 0x00, None, # 0x24
None, None, None, None, # 0x28
None, None, None, None, # 0x2C
None, None, None, None, # 0x30
None, # 0x34
0x50, 0x52, 0x49, 0x31, # 0x35 Extended Query
0x30, 0x66, 0x00, 0x00, # 0x39
0x00, 0x01, 0x03, 0x00, # 0x3D
0x33, 0xC0, 0x01, 0x80, # 0x41
0x00, 0x03, 0x03], # 0x45
"device-id" : None,
"manufacturer-id" : 0x0089, # intel
"max-chip-width" : 16, # 16-bits chips
"unit-size" : None,
"intel_write_buffer" : 0, # no write-buffer in C3
"intel_protection_program" : 1,
"intel_configuration" : 1,
"intel_lock" : 2 # advanced locking
},
"28F___P30_" : {
"cfi-query" : [0x89, 0x00, 0x00, 0x00, # 0x00
0x00, 0x00, 0x00, 0x00, # 0x04
0x00, 0x00, 0x00, 0x00, # 0x08
0x00, 0x00, 0x00, 0x00, # 0x0c
0x51, 0x52, 0x59, 0x01, # 0x10
0x00, 0x0a, 0x01, 0x00, # 0x14
0x00, 0x00, 0x00, 0x17, # 0x18
0x20, 0x85, 0x95, 0x08, # 0x1c
0x09, 0x0a, 0x00, 0x01, # 0x20
0x01, 0x02, 0x00, None, # 0x24
0x01, 0x00, 0x06, 0x00, # 0x28
# Device geometry - filled in by complete function
None, None, None, None, # 0x2c
None, None, None, None, # 0x30
None, None, None, None, # 0x34
None],
"device-id" : None,
"manufacturer-id" : 0x0089, # Intel
"max-chip-width" : 16,
"unit-size" : None,
# TODO: verify these
"intel_write_buffer" : 1,
"intel_protection_program" : 1,
"intel_configuration" : 1,
"intel_lock" : 2 # Advanced locking
},
"28F___S3" : {
"cfi-query" : [0xb0, 0x00, 0x00, 0x00, # 0x00 Sharp Manufacturer ID
0x00, 0x00, 0x00, 0x00, # 0x04
0x00, 0x00, 0x00, 0x00, # 0x08
0x00, 0x00, 0x00, 0x00, # 0x0C
0x51, 0x52, 0x59, 0x01, # 0x10
0x00, 0x31, 0x00, 0x00, # 0x14 0x15 is Pointer to Extended Query
0x00, 0x00, 0x00, 0x27, # 0x18
0x55, 0x27, 0x55, 0x03, # 0x1C
0x06, 0x0A, 0x0f, 0x04, # 0x20
0x04, 0x04, 0x04, None, # 0x24
None, None, None, None, # 0x28
None, None, None, None, # 0x2C
None,
0x50, 0x52, 0x49, 0x31, # 0x31 Extended Query
0x30, 0x0f, 0x00, 0x00, # 0x35
0x00, 0x01, 0x03, 0x00, # 0x39
0x50, 0x50], # 0x3D
"device-id" : None, #
"manufacturer-id" : 0x00b0, # Sharp Manufacturer ID is verbatim from Intel docs.
"max-chip-width" : 16, # 16-bits chips
"unit-size" : None,
"intel_write_buffer" : 1,
"intel_protection_program" : 0, # No protection command on S3
"intel_configuration" : 1,
"intel_lock" : 1 # Simple locking
},
"28F___J3A" : {
"cfi-query" : [0x89, 0x00, 0x00, 0x00, # 0x00
0x00, 0x00, 0x00, 0x00, # 0x04
0x00, 0x00, 0x00, 0x00, # 0x08
0x00, 0x00, 0x00, 0x00, # 0x0C
0x51, 0x52, 0x59, 0x01, # 0x10
0x00, 0x31, 0x00, 0x00, # 0x14
0x00, 0x00, 0x00, 0x27, # 0x18
0x36, 0x00, 0x00, 0x07, # 0x1C
0x07, 0x0A, 0x00, 0x04, # 0x20
0x04, 0x04, 0x00, None, # 0x24
None, None, None, None, # 0x28
None, None, None, None, # 0x2C
None,
0x50, 0x52, 0x49, 0x31, # 0x31 Extended Query
0x31, 0x0A, 0x00, 0x00, # 0x35
0x00, 0x01, 0x01, 0x00, # 0x39
0x33, 0x00, 0x01, 0x00, # 0x3D
| |
#!usr/bin/env ipython
# Functions related to loading, saving, processing datasets
import tensorflow.keras.datasets as datasets
from tensorflow.keras import Model
import numpy as np
import pandas as pd
import os
from pathlib import Path
from scipy.stats import entropy
from scipy.spatial.distance import cosine
from sklearn.random_projection import GaussianRandomProjection
from sklearn.decomposition import PCA
import ipdb
from cfg_utils import load_cfg
from model_utils import build_model
# CONSTANTS
FOREST_PATH = os.path.join('data', 'covtype.data')
ADULT_PATH = os.path.join('data', 'adult.data')
ADULT_TEST_PATH = os.path.join('data', 'adult.test')
CIFAR10_PRETRAIN_PATH = os.path.join('data', 'cifar10_pretrain.npy')
def min_max_rescale(df_train, df_test, good_columns=None):
if good_columns is None:
col_mins = df_train.min(axis=0)
col_maxs = df_train.max(axis=0)
col_ranges = col_maxs - col_mins
good_columns = (col_ranges > 0)
print('Deleting', df_train.shape[1] - sum(good_columns), 'columns for not exhibiting variability')
df_train = df_train[:, good_columns]
df_test = df_test[:, good_columns]
print('Rescaling to [0, 1]...')
col_mins = df_train.min(axis=0)
col_maxs = df_train.max(axis=0)
col_ranges = np.float32(col_maxs - col_mins)
# if there's no variability, basically just mapping it to 0.5
col_ranges[col_ranges == 0] = 2*col_maxs[col_ranges == 0] + 1e-5
df_train = (df_train - col_mins)/col_ranges
df_test = (df_test - col_mins)/col_ranges
assert np.isnan(df_train).sum() == 0
assert np.isnan(df_test).sum() == 0
return df_train, df_test
def load_data(options, replace_index):
# these are shared options
data_type = options['name']
data_privacy = 'all'
print('WARNING: Data privacy is fixed to all right now')
if data_type == 'mnist':
flatten = options['flatten']
binary = options['binary']
if binary:
# only care about doing this for binary classification atm, could just make an option
enforce_max_norm = True
else:
enforce_max_norm = False
if 'preprocessing' in options:
if options['preprocessing'] == 'PCA':
project = True
pca = True
crop = False
elif options['preprocessing'] == 'GRP':
project = True
pca = False
crop = False
elif options['preprocessing'] == 'crop':
project = False
pca = False
crop = True
else:
project = False
pca = False
crop = False
x_train, y_train, x_test, y_test = load_mnist(binary=binary,
enforce_max_norm=enforce_max_norm,
flatten=flatten,
data_privacy=data_privacy,
project=project,
crop=crop,
pca=pca)
elif data_type == 'cifar10':
flatten = options['flatten']
binary = options['binary']
subset = options['subset']
if binary:
enforce_max_norm = True
else:
enforce_max_norm = False
if flatten:
project = True
pca = True
else:
project = False
pca = False
x_train, y_train, x_test, y_test = load_cifar10(binary=binary,
enforce_max_norm=enforce_max_norm,
flatten=flatten,
data_privacy=data_privacy,
project=project,
pca=pca,
subset=subset)
elif data_type == 'cifar10_pretrain':
binary = options['binary']
if binary:
enforce_max_norm = True
else:
enforce_max_norm = False
x_train, y_train, x_test, y_test = load_cifar10_pretrain(binary=binary,
enforce_max_norm=enforce_max_norm)
elif data_type == 'cifar100':
# No options here
x_train, y_train, x_test, y_test = load_cifar100()
elif data_type == 'forest':
x_train, y_train, x_test, y_test = load_forest(data_privacy=data_privacy)
elif data_type == 'adult':
pca = False
if 'preprocessing' in options and options['preprocessing'] == 'PCA':
print('WARNING: When are we doing PCA with adult?')
pca = True
x_train, y_train, x_test, y_test = load_adult(data_privacy=data_privacy, pca=pca)
else:
raise ValueError(data_type)
x_train, y_train, x_vali, y_vali, x_test, y_test = validation_split(x_train, y_train, x_test, y_test, replace_index)
# Convert everything to float32
x_train = np.float32(x_train)
y_train = np.float32(y_train)
x_vali = np.float32(x_vali)
y_vali = np.float32(y_vali)
x_test = np.float32(x_test)
y_test = np.float32(y_test)
return x_train, y_train, x_vali, y_vali, x_test, y_test
def validation_split(x_train, y_train, x_test, y_test, replace_index):
# we need to generate a validation set (do it from the train set)
N = x_train.shape[0]
n_vali = int(0.1*N)
vali_idx = range(n_vali)
train_idx = [i for i in range(N) if i not in vali_idx]
assert len(set(vali_idx).intersection(set(train_idx))) == 0
x_vali = x_train[vali_idx]
y_vali = y_train[vali_idx]
x_train = x_train[train_idx]
y_train = y_train[train_idx]
if replace_index:
replace_index = int(replace_index)
# we always replace with ELEMENT 0 (wlog, ish), then don't use the first row
# (this is to avoid an effect where experiments where the replace_index is low encounter an unusually
# low-variance batch at the start of training!)
special_idx = 0
x_special = x_train[special_idx]
y_special = y_train[special_idx]
x_train[replace_index] = x_special
y_train[replace_index] = y_special
x_train = np.delete(x_train, special_idx, axis=0)
y_train = np.delete(y_train, special_idx, axis=0)
return x_train, y_train, x_vali, y_vali, x_test, y_test
def load_forest(data_privacy='all'):
path = os.path.join('data', 'forest_' + data_privacy + '.npy')
try:
data = np.load(path, allow_pickle=True).item()
x_train = data['x_train']
x_test = data['x_test']
y_train = data['y_train']
y_test = data['y_test']
except FileNotFoundError:
print('Loading...')
all_data = pd.read_csv(FOREST_PATH, header=None)
# select just types 1 and 2 (these are the most common)
print('Selecting classes 1 and 2')
binary_data = all_data.loc[all_data.iloc[:, -1].isin({1, 2}), :]
# split into features and labels
y = binary_data.iloc[:, -1].values
# rescale to 0 and 1!
y = y - 1
assert set(y) == set([0, 1])
features = binary_data.iloc[:, :-1].values
assert features.shape[1] == 54
N = features.shape[0]
print('Resulting number of examples:', N)
# test-train split
print('Doing test-train split')
train_frac = 0.85
n_train = int(N*train_frac)
train_idx = np.random.choice(N, n_train, replace=False)
test_idx = [x for x in range(N) if x not in train_idx]
print('n train:', n_train, 'n test:', len(test_idx))
x_train = features[train_idx, :]
x_test = features[test_idx, :]
y_train = y[train_idx]
y_test = y[test_idx]
# need to keep this to make sure the columns are all the same... when we do public/private split
x_train_orig = x_train.copy()
# do public/private split
x_train, y_train, x_test, y_test = public_private_split('forest', data_privacy,
x_train, y_train,
x_test, y_test)
# now we need to normalise this
# rescale to 0-1 first
col_mins = x_train_orig.min(axis=0)
col_maxs = x_train_orig.max(axis=0)
col_ranges = col_maxs - col_mins
good_columns = (col_ranges > 0)
del x_train_orig
x_train, x_test = min_max_rescale(x_train, x_test, good_columns=good_columns)
# and NOW we project to the unit sphere
print('Projecting to sphere...')
x_train = x_train / np.linalg.norm(x_train, axis=1).reshape(-1, 1)
x_test = x_test / np.linalg.norm(x_test, axis=1).reshape(-1, 1)
assert np.all(np.abs(np.linalg.norm(x_train, axis=1) - 1) < 1e-6)
assert np.all(np.abs(np.linalg.norm(x_test, axis=1) - 1) < 1e-6)
data = {'x_train': x_train,
'x_test': x_test,
'y_train': y_train,
'y_test': y_test}
print('Saving...')
np.save(path, data)
return x_train, y_train, x_test, y_test
def public_private_split(dataset, data_privacy, x_train, y_train, x_test, y_test):
"""
"""
if data_privacy == 'all':
print('Including all data')
else:
print('Splitting data into public/private!')
split_path = os.path.join('data', dataset + '_public_private_split.npy')
try:
split = np.load(split_path, allow_pickle=True).item()
print('Loaded pre-computed split from', split_path)
public_train_idx = split['public_train_idx']
public_test_idx = split['public_test_idx']
private_train_idx = split['private_train_idx']
private_test_idx = split['private_test_idx']
except FileNotFoundError:
print('No pre-defined split found!')
N_train = x_train.shape[0]
N_test = x_test.shape[0]
public_train_idx = np.random.choice(N_train, int(0.5*N_train), replace=False)
public_test_idx = np.random.choice(N_test, int(0.5*N_test), replace=False)
private_train_idx = np.array([i for i in range(N_train) if i not in public_train_idx])
private_test_idx = np.array([i for i in range(N_test) if i not in public_test_idx])
assert len(set(public_train_idx).intersection(set(private_train_idx))) == 0
assert len(set(public_test_idx).intersection(set(private_test_idx))) == 0
split = {'public_train_idx': public_train_idx,
'public_test_idx': public_test_idx,
'private_train_idx': private_train_idx,
'private_test_idx': private_test_idx}
np.save(split_path, split)
print('Saved split to', split_path)
if data_privacy == 'public':
x_train = x_train[public_train_idx]
y_train = y_train[public_train_idx]
x_test = x_test[public_test_idx]
y_test = y_test[public_test_idx]
elif data_privacy == 'private':
x_train = x_train[private_train_idx]
y_train = y_train[private_train_idx]
x_test = x_test[private_test_idx]
y_test = y_test[private_test_idx]
return x_train, y_train, x_test, y_test
def load_mnist(binary=False, enforce_max_norm=False, flatten=True,
data_privacy='all', project=True, pca=False, crop=False):
dataset_identifier = 'mnist' + '_' + data_privacy + '_binary'*binary + '_maxnorm'*enforce_max_norm + '_square'*(not flatten) + '_pca'*pca + '_crop'*crop + '.npy'
dataset_string = os.path.join('data', dataset_identifier)
try:
data = np.load(dataset_string, allow_pickle=True).item()
x_train = data['x_train']
x_test = data['x_test']
y_train = data['y_train']
y_test = data['y_test']
print('Loaded data from', dataset_string)
except FileNotFoundError:
print('Couldn\'t load data from', dataset_string)
# cant load from file, build it up again
mnist = datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, y_train, x_test, y_test = public_private_split('mnist', data_privacy, x_train, y_train, x_test, y_test)
if binary:
# keep only 3 and 5 (I chose these randomly)
keep_train = (y_train == 3) | (y_train == 5)
keep_test = (y_test == 3) | (y_test == 5)
x_train = x_train[keep_train]
x_test = x_test[keep_test]
y_train = y_train[keep_train]
y_test = y_test[keep_test]
# convert to binary (5 is 1, 3 is 0)
y_train[y_train == 5] = 1
y_train[y_train == 3] = 0
y_test[y_test == 5] = 1
y_test[y_test == 3] = 0
# sanity check
assert set(y_train) == {1, 0}
assert set(y_test) == {1, 0}
# typical normalisation
x_train, x_test = x_train/255.0, x_test/255.0
if crop:
assert x_train.shape[1:] == (28, 28)
assert x_test.shape[1:] == (28, 28)
x_train = x_train[:, 9:19, 9:19]
x_test = x_test[:, 9:19, 9:19]
side_length = 10
else:
side_length = 28
if flatten:
x_train = x_train.reshape(-1, side_length*side_length)
x_test = x_test.reshape(-1, side_length*side_length)
if project:
# you can only project flattened data
# by default we do gaussian random projections
if pca:
# do PCA down to 50
# in the Abadi paper they do 60 dimensions, but to help comparison with Wu I'd rather do 50 here
transformer | |
be patient!'.format(col))
fig = plt.figure(figsize=(20, 15))
sns.distplot(data, bins=bins, kde=False, rug=True)
plt.title('Histograms of {}'.format(col), fontsize=20)
plt.xlabel('{}'.format(col), fontsize=20)
plt.ylabel('number of counts', fontsize=20)
plt.savefig(out_path + '/02-hist/' + "{}.png".format(col))
if display:
plt.show()
plt.clf()
plt.close(fig)
if tracking:
print('Histograms plots are DONE!!!')
if tracking:
end = time.time()
print('Generate histograms plots took = ' + str(end - start) + ' s')
else:
print('Caution: no numerical features in the dataset!!!')
def bar_plot(df_in, top_n=20, rotation=True, output_dir=None, display=False, tracking=False):
"""
Bar plot for the categorical features in the rdd data frame.
:param df_in: the input rdd data frame
:param top_n: the number of the most frequent feature to show in the bar plot
:param rotation: the flag for rotating the xticks in the plot, the default value is True
:param output_dir: the out put directory, the default value is the current working directory
:param display: the flag for displaying the figures, the default value is False
:param tracking: the flag for displaying CPU time, the default value is False
"""
_, _, cat_fields, date_fields, _ = dtypes_class(df_in)
cat_fields = cat_fields + date_fields
if cat_fields:
df_in = df_in.select(cat_fields)
if output_dir is None:
out_path = os.getcwd() + '/Audited'
else:
out_path = output_dir + '/Audited'
mkdir(out_path)
print('================================================================')
print('The Bar plot Bar_plots.pdf was located at:')
print(out_path)
if tracking:
start = time.time()
pdf = PdfPages(out_path + '/03-Bar_plots.pdf')
for col in df_in.columns:
p_data = df_in.select(col).na.drop().groupBy(col).count().sort(F.desc('count')).limit(top_n).toPandas()
if tracking:
print('Plotting barplot of {}.... Please be patient!'.format(col))
plt.ioff()
fig = plt.figure(figsize=(20, 15))
sns.barplot(x=col, y="count", data=p_data)
plt.title('Barplot of {}'.format(col), fontsize=20)
plt.xlabel('{}'.format(col), fontsize=20)
plt.ylabel('number of counts', fontsize=20)
if rotation:
plt.xticks(rotation=90)
pdf.savefig(fig)
if display:
plt.show()
plt.close(fig)
if tracking:
print('Bar plots are DONE!!!')
pdf.close()
if tracking:
end = time.time()
print('Generate bar plots took = ' + str(end - start) + ' s')
else:
print('Caution: no categorical features in the dataset!!!')
def trend_plot(df_in, types='day', d_time=None, rotation=True, output_dir=None, display=False, tracking=False):
"""
Trend plot for the aggregated time series data if the rdd data frame has date features and numerical features.
:param df_in: the input rdd data frame
:param types: the types for time feature aggregation: day, month, year, the default value is day
:param d_time: the specific feature name of the date feature, the default value
is the first date feature in the rdd data frame
:param rotation: the flag for rotating the xticks in the plot, the default value is True
:param output_dir: the out put directory, the default value is the current working directory
:param display: the flag for displaying the figures, the default value is False
:param tracking: the flag for displaying CPU time, the default value is False
"""
_, num_fields, _, date_fields, _ = dtypes_class(df_in)
if date_fields and num_fields:
df_in = df_in.select(date_fields+num_fields)
if d_time is None:
d_time = date_fields[0]
if output_dir is None:
out_path = os.getcwd() + '/Audited'
else:
out_path = output_dir + '/Audited'
mkdir(out_path)
print('================================================================')
print('The Trend plot Trend_plots.pdf was located at:')
print(out_path)
if tracking:
start = time.time()
pdf = PdfPages(out_path + '/04-Trend_plots.pdf')
if types == 'day':
ts_format = 'yyyy-MM-dd'
elif types == 'month':
ts_format = 'yyyy-MM'
elif types == 'year':
ts_format = 'yyyy'
for col in num_fields:
p_data = df_in.select(F.date_format(d_time, ts_format).alias(types), col) \
.groupBy(types).agg(F.mean(col).alias("mean"), F.sum(col).alias("sum")).toPandas()
if tracking:
print('Plotting trend plot of {}.... Please be patient!'.format(col))
plt.ioff()
sns.set(style="ticks", rc={"lines.linewidth": 2})
fig, axes = plt.subplots(1, 2, figsize=(20, 10))
sns.lineplot(x=types, y="mean", data=p_data, ax=axes[0])
axes[0].set_title('Mean trend of {} in {}'.format(col, types))
sns.lineplot(x=types, y="sum", data=p_data, ax=axes[1])
axes[1].set_title('Sum trend of {} in {}'.format(col, types))
if rotation:
for ax in fig.axes:
plt.sca(ax)
plt.xticks(rotation=90, fontsize=8)
pdf.savefig(fig)
if display:
plt.show()
plt.close(fig)
if tracking:
print('Trend plots are DONE!!!')
pdf.close()
if tracking:
end = time.time()
print('Generate trend plots took = ' + str(end - start) + ' s')
else:
print('Caution: no date features in the dataset!!!')
def dataset_summary(df_in, tracking=False):
"""
The data set basics summary.
:param df_in: the input rdd data frame
:param tracking: the flag for displaying CPU time, the default value is False
:return: data set summary in pandas data frame
"""
if tracking:
start = time.time()
sample_size = df_in.count()
col_names = ['summary', 'value']
all_fields, num_fields, cat_fields, date_fields, unsupported_fields = dtypes_class(df_in)
d_types = all_fields.groupby('DataType').count().reset_index()
d_types.columns = col_names
cols = df_in.columns
mask = df_in.withColumn('zero_count', sum(F.when(F.col(c) == 0, 1).otherwise(0) for c in cols)) \
.withColumn('null_count', sum(F.col(c).isNull().cast('int') for c in cols)) \
.withColumn('empty_count', sum(F.when(F.trim(F.col(c)) == '', 1).otherwise(0) for c in cols)) \
.select(['null_count', 'empty_count', 'zero_count'])
row_w_null = mask.filter(F.col('null_count') > 0).count()
row_w_empty = mask.filter(F.col('empty_count') > 0).count()
row_w_zero = mask.filter(F.col('zero_count') > 0).count()
r_avg = mask.agg(*[F.avg(c).alias('row_avg_' + c) for c in mask.columns]).toPandas().transpose().reset_index()
r_avg.columns = col_names
single_unique_feature = sum([df_in.na.drop(subset=[c]).select(c).distinct().count() == 1 for c in cols])
size_names = ['sample_size', 'feature_size', 'single_unique_feature', 'row_w_null', 'row_w_empty', 'row_w_zero']
size_values = [sample_size, len(cols), single_unique_feature, row_w_null, row_w_empty, row_w_zero]
size_summary = pd.DataFrame({'summary': size_names, 'value': size_values})
avg_summary = r_avg
field_names = ['numerical_fields', 'categorical_fields', 'date_fields', 'unsupported_fields']
field_values = [len(num_fields), len(cat_fields), len(date_fields), len(unsupported_fields)]
field_summary = pd.DataFrame({'summary': field_names, 'value': field_values})
types_summary = pd.DataFrame(df_in.dtypes, columns=['value','dtypes'])\
.groupby('dtypes').count().rename_axis('summary').reset_index()
summary = pd.concat([size_summary, avg_summary, field_summary, types_summary], axis=0)
if tracking:
end = time.time()
print('Generate data set summary took = ' + str(end - start) + ' s')
return summary
def numeric_summary(df_in, columns=None, deciles=False, top_n=5, tracking=False):
"""
The auditing function for numerical rdd data frame.
:param df_in: the input rdd data frame
:param columns: the specific feature columns, the default value is None
:param deciles: the flag for generate the deciles
:param top_n: the number of the most frequent item
:param tracking: the flag for displaying CPU time, the default value is False
:return: the audited results for the numerical features in pandas data frame
"""
_, num_fields, _, _, _ = dtypes_class(df_in)
if num_fields:
if tracking:
start = time.time()
num = df_in.select(num_fields)
if columns:
num = num.select(columns)
d_types = data_types(num, tracking=tracking)
f_counts = counts(num, tracking=tracking)
des = describe(num, columns=columns, tracking=tracking)
p_df = percentiles(num, deciles=deciles, tracking=tracking)
f_len = feature_len(num, tracking=tracking)
freq = freq_items(num, top_n=top_n, tracking=tracking)
rate = rates(num, columns=columns, numeric=True, tracking=tracking)
data_frames = [d_types, f_counts, des, p_df, f_len, freq, rate]
num_summary = df_merge(data_frames, 'feature').drop(['count'], axis=1)
if tracking:
end = time.time()
print('Auditing numerical data took = ' + str(end - start) + ' s')
return num_summary
else:
print('Caution: no numerical features in the dataset!!!')
def category_summary(df_in, columns=None, top_n=5, tracking=False):
"""
The auditing function for categorical rdd data frame.
:param df_in: the input rdd data frame
:param columns: the specific feature columns, the default value is None
:param top_n: the number of the most frequent item
:param tracking: the flag for displaying CPU time, the default value is False
:return: the audited results for the categorical features in pandas data frame
"""
_, _, cat_fields, _, _ = dtypes_class(df_in)
if cat_fields:
if tracking:
start = time.time()
cat = df_in.select(cat_fields)
if columns:
cat = cat.select(columns)
d_types = data_types(cat, tracking=tracking)
f_counts = counts(cat,tracking=tracking)
f_len = feature_len(cat, tracking=tracking)
freq = freq_items(cat, top_n=top_n, tracking=tracking)
rate = rates(cat, columns=columns, numeric=False, tracking=tracking)
data_frames = [d_types, f_counts, f_len, freq, rate]
cat_summary = df_merge(data_frames, 'feature')
if tracking:
end = time.time()
print('Auditing categorical data took = ' + str(end - start) + ' s')
return cat_summary
else:
print('Caution: no numerical features in the dataset!!!')
def fig_plots(df_in, output_dir=None, bins=50, top_n=20, types='day', d_time=None,
rotation=True, sample_size=None, display=False, tracking=False):
"""
The wrapper for the plot functions.
:param df_in: the input rdd data frame
:param output_dir: the out put directory, the default value is the current working directory
:param bins: the number of bins for generate the bar plots
:param top_n: the number of the most frequent feature to show in the bar plot
:param types: the types for time feature aggregation: day, month, year, the default value is day
:param d_time: the specific feature name of the date feature, the default value
is the first date feature in the rdd data frame
:param rotation: the flag for rotating the xticks in the plot, the default value is True
:param sample_size: the size for generate the subset from the rdd data frame, the
default value none
:param display: the flag for displaying the figures, the default value is False
:param tracking: the flag for displaying | |
"""Derived agent class."""
from swarms.lib.agent import Agent
import numpy as np
from swarms.utils.bt import BTConstruct
# from swarms.utils.results import Results
from py_trees import Behaviour, Blackboard
# import copy
from py_trees.meta import inverter
import py_trees
from py_trees.composites import Sequence, Selector
from py_trees.trees import BehaviourTree
from swarms.behaviors.sbehaviors import (
NeighbourObjects, IsVisitedBefore,
IsCarrying, IsInPartialAttached, RandomWalk, Move, AvoidSObjects
# ObjectsOnGrid, IsAgentDead,
)
from swarms.behaviors.scbehaviors import (
CompositeDrop, CompositeSingleCarry, MoveTowards,
Explore, CompositeDropPartial, CompositeMultipleCarry,
NewExplore, NewMoveAway, NewMoveTowards
# , AgentDead, AvoidTrap, ObstacleStuck
)
# import py_trees
from ponyge.operators.initialisation import initialisation
from ponyge.fitness.evaluation import evaluate_fitness
from ponyge.operators.crossover import crossover
from ponyge.operators.mutation import mutation
from ponyge.operators.replacement import replacement
from ponyge.operators.selection import selection
class SimForgAgentWithout(Agent):
"""Simulation agent.
An minimalistic behavior tree for swarm agent
implementing carry and drop behavior.
"""
def __init__(self, name, model, xmlstring=None):
super().__init__(name, model)
self.location = ()
self.direction = model.random.rand() * (2 * np.pi)
self.speed = 2
self.radius = 3
self.moveable = True
self.shared_content = dict()
self.carryable = False
self.passable = True
# Define a BTContruct object
self.bt = BTConstruct(None, self)
class DummyIndividual:
def __init__(self):
self.phenotype = None
dummyind = DummyIndividual()
self.individual = [dummyind]
self.individual[0].phenotype = xmlstring
# self.bt.xmlstring = xmlstring
# self.bt.construct()
#
# neighobst = NeighbourObjects('NeighbourObjects_Obstacles')
# neighobst.setup(0, self, 'Obstacles')
# Drop branch
dseq = py_trees.composites.Sequence('DSequence')
iscarrying = IsCarrying('IsCarrying_Food')
iscarrying.setup(0, self, 'Food')
neighhub = NeighbourObjects('NeighbourObjects_Hub')
neighhub.setup(0, self, 'Hub')
notneighhub = py_trees.meta.inverter(NeighbourObjects)(
'NeighbourObjects_Hub')
notneighhub.setup(0, self, 'Hub')
drop = CompositeDrop('CompositeDrop_Food')
drop.setup(0, self, 'Food')
dseq.add_children([neighhub, drop])
# Carry branch
cseq = py_trees.composites.Sequence('CSequence')
neighsite = NeighbourObjects('NeighbourObjects_Sites')
neighsite.setup(0, self, 'Sites')
neighfood = NeighbourObjects('NeighbourObjects_Food')
neighfood.setup(0, self, 'Food')
invcarrying = py_trees.meta.inverter(IsCarrying)('IsCarrying_Food')
invcarrying.setup(0, self, 'Food')
carry = CompositeSingleCarry('CompositeSingleCarry_Food')
carry.setup(0, self, 'Food')
cseq.add_children([neighsite, neighfood, invcarrying, carry])
# Locomotion branch
# Move to site
siteseq = py_trees.composites.Sequence('SiteSeq')
sitefound = IsVisitedBefore('IsVisitedBefore_Sites')
sitefound.setup(0, self, 'Sites')
gotosite = MoveTowards('MoveTowards_Sites')
gotosite.setup(0, self, 'Sites')
# siteseq.add_children([neighobst, neightrap, sitefound, invcarrying, gotosite])
siteseq.add_children([sitefound, invcarrying, gotosite])
# siteseq.add_children([invcarrying])
# Move to hub
hubseq = py_trees.composites.Sequence('HubSeq')
gotohub = MoveTowards('MoveTowards_Hub')
gotohub.setup(0, self, 'Hub')
# hubseq.add_children([neighobst, neightrap, iscarrying, gotohub])
hubseq.add_children([iscarrying, gotohub])
sitenotfound = py_trees.meta.inverter(IsVisitedBefore)(
'IsVisitedBefore_Sites')
sitenotfound.setup(0, self, 'Sites')
explore = Explore('Explore')
explore.setup(0, self)
# randwalk = py_trees.composites.Sequence('Randwalk')
# randwalk.add_children([neighobst, neightrap, sitenotfound, explore])
# randwalk.add_children([sitenotfound, explore])
locoselect = py_trees.composites.Selector('Move')
locoselect.add_children([siteseq, hubseq, explore])
# locoselect.add_children([hubseq, randwalk])
select = py_trees.composites.Selector('Main')
select.add_children([dseq, cseq, locoselect])
self.behaviour_tree = py_trees.trees.BehaviourTree(select)
# py_trees.display.render_dot_tree(
# self.behaviour_tree.root, name=model.pname + '/forgehc')
# py_trees.logging.level = py_trees.logging.Level.DEBUG
# py_trees.display.print_ascii_tree(select)
def step(self):
# self.bt.behaviour_tree.tick()
self.behaviour_tree.tick()
def advance(self):
pass
class SimForgAgentWith(Agent):
"""Simulation agent.
An minimalistic behavior tree for swarm agent
implementing carry and drop behavior.
"""
def __init__(self, name, model, xmlstring=None):
super().__init__(name, model)
self.location = ()
self.direction = model.random.rand() * (2 * np.pi)
self.speed = 2
self.radius = 3
self.moveable = True
self.shared_content = dict()
self.carryable = False
self.passable = True
# Define a BTContruct object
self.bt = BTConstruct(None, self)
class DummyIndividual:
def __init__(self):
self.phenotype = None
dummyind = DummyIndividual()
self.individual = [dummyind]
self.individual[0].phenotype = xmlstring
# self.bt.xmlstring = xmlstring
# self.bt.construct()
#
# neighobst = NeighbourObjects('NeighbourObjects_Obstacles')
# neighobst.setup(0, self, 'Obstacles')
# Drop branch
dseq = py_trees.composites.Sequence('DSequence')
iscarrying = IsCarrying('IsCarrying_Food')
iscarrying.setup(0, self, 'Food')
neighhub = NeighbourObjects('NeighbourObjects_Hub')
neighhub.setup(0, self, 'Hub')
notneighhub = py_trees.meta.inverter(NeighbourObjects)(
'NeighbourObjects_Hub')
notneighhub.setup(0, self, 'Hub')
drop = CompositeDrop('CompositeDrop_Food')
drop.setup(0, self, 'Food')
dseq.add_children([neighhub, drop])
# ## Obstacles and Trap
# neighobs = NeighbourObjects('NeighbourObjects_Obs')
# neighobs.setup(0, self, 'Obstacle')
# neightrap = NeighbourObjects('NeighbourObjects_Trap')
# neightrap.setup(0, self, 'Traps')
# avoidobstacle = AvoidSObjects('Obstacle')
# avoidobstacle.setup(0, agent)
# avoidtrap = AvoidSObjects('Trap')
# avoidtrap.setup(0, agent, item='Traps')
# otseq = py_trees.composites.Sequence('OTSequence')
# otseq.add_children([neighobs, avoidobstacle, neightrap, avoidtrap])
# Carry branch
cseq = py_trees.composites.Sequence('CSequence')
neighsite = NeighbourObjects('NeighbourObjects_Sites')
neighsite.setup(0, self, 'Sites')
neighfood = NeighbourObjects('NeighbourObjects_Food')
neighfood.setup(0, self, 'Food')
invcarrying = py_trees.meta.inverter(IsCarrying)('IsCarrying_Food')
invcarrying.setup(0, self, 'Food')
carry = CompositeSingleCarry('CompositeSingleCarry_Food')
carry.setup(0, self, 'Food')
cseq.add_children([neighsite, neighfood, invcarrying, carry])
# Locomotion branch
# Move to site
siteseq = py_trees.composites.Sequence('SiteSeq')
sitefound = IsVisitedBefore('IsVisitedBefore_Sites')
sitefound.setup(0, self, 'Sites')
gotosite = NewMoveTowards('NewMoveTowards_Sites')
gotosite.setup(0, self, 'Sites')
# siteseq.add_children([neighobst, neightrap, sitefound, invcarrying, gotosite])
siteseq.add_children([sitefound, invcarrying, gotosite])
# siteseq.add_children([invcarrying])
# Move to hub
hubseq = py_trees.composites.Sequence('HubSeq')
gotohub = NewMoveTowards('NewMoveTowards_Hub')
gotohub.setup(0, self, 'Hub')
# hubseq.add_children([neighobst, neightrap, iscarrying, gotohub])
hubseq.add_children([iscarrying, gotohub])
sitenotfound = py_trees.meta.inverter(IsVisitedBefore)(
'IsVisitedBefore_Sites')
sitenotfound.setup(0, self, 'Sites')
explore = NewExplore('NewExplore')
explore.setup(0, self)
# randwalk = py_trees.composites.Sequence('Randwalk')
# randwalk.add_children([neighobst, neightrap, sitenotfound, explore])
# randwalk.add_children([sitenotfound, avoidt, explore])
locoselect = py_trees.composites.Selector('Move')
locoselect.add_children([siteseq, hubseq, explore])
# locoselect.add_children([hubseq, randwalk])
select = py_trees.composites.Selector('Main')
select.add_children([dseq, cseq, locoselect])
self.behaviour_tree = py_trees.trees.BehaviourTree(select)
# py_trees.display.render_dot_tree(
# self.behaviour_tree.root, name=model.pname + '/forgehc')
# py_trees.logging.level = py_trees.logging.Level.DEBUG
# py_trees.display.print_ascii_tree(select)
def step(self):
# self.bt.behaviour_tree.tick()
self.behaviour_tree.tick()
def advance(self):
pass
class EvolAgent(Agent):
"""An minimalistic swarm agent."""
def __init__(self, name, model):
"""Initialize the agent."""
super().__init__(name, model)
self.location = ()
self.phenotypes = dict()
self.direction = model.random.rand() * (2 * np.pi)
self.speed = 2
self.radius = 3
self.results = "file" # This can take 2 values. db or file
# self.exchange_time = model.random.randint(2, 4)
# This doesn't help. Maybe only perform genetic operations when
# an agents meet 10% of its total population
# """
self.operation_threshold = 2
self.genome_storage = []
# Define a BTContruct object
self.bt = BTConstruct(None, self)
# self.blackboard = Blackboard()
# self.blackboard.shared_content = dict()
self.shared_content = dict()
# self.shared_content = dict(
self.carryable = False
self.passable = True
self.beta = 0.0001
self.food_collected = 0
# Grammatical Evolution part
from ponyge.algorithm.parameters import Parameters
parameter = Parameters()
parameter_list = ['--parameters', '../..,' + model.parm]
# Comment when different results is desired.
# Else set this for testing purpose
# parameter.params['RANDOM_SEED'] = name
# # np.random.randint(1, 99999999)
parameter.params['POPULATION_SIZE'] = self.operation_threshold // 2
parameter.set_params(parameter_list)
self.parameter = parameter
individual = initialisation(self.parameter, 1)
individual = evaluate_fitness(individual, self.parameter)
self.individual = individual
self.bt.xmlstring = self.individual[0].phenotype
self.bt.construct()
self.diversity_fitness = self.individual[0].fitness
self.delayed_reward = 0
# Location history
self.location_history = set()
self.timestamp = 0
self.step_count = 0
self.fitness_name = True
def get_food_in_hub(self):
"""Return food in the hub."""
grid = self.model.grid
hub_loc = self.model.hub.location
neighbours = grid.get_neighborhood(hub_loc, 10)
food_objects = grid.get_objects_from_list_of_grid('Food', neighbours)
agent_food_objects = []
for food in food_objects:
if (
food.agent_name == self.name and
food.phenotype == self.individual[0].phenotype):
agent_food_objects.append(food)
return agent_food_objects
def detect_food_carrying(self):
"""Detect if the agent is carrying food."""
if len(self.attached_objects) > 0:
print('Food carying', self.name, self.attached_objects)
output = py_trees.display.ascii_tree(self.bt.behaviour_tree.root)
print(output)
def store_genome(self, cellmates):
"""Store the genome from neighbours."""
# cellmates.remove(self)
# self.genome_storage += [agent.individual[0] for agent in cellmates]
for agent in cellmates:
if agent.food_collected > 0:
self.genome_storage += agent.individual
elif len(agent.attached_objects) > 0:
self.genome_storage += agent.individual
elif agent.exploration_fitness() > 10:
self.genome_storage += agent.individual
def exchange_chromosome(self,):
"""Perform genetic operations."""
# print('from exchange', self.name)
individuals = self.genome_storage
parents = selection(self.parameter, individuals)
cross_pop = crossover(self.parameter, parents)
new_pop = mutation(self.parameter, cross_pop)
new_pop = evaluate_fitness(new_pop, self.parameter)
individuals = replacement(self.parameter, new_pop, individuals)
individuals.sort(reverse=False)
self.individual = [individuals[0]]
self.individual[0].fitness = 0
self.genome_storage = []
def genetic_step(self):
"""Additional procedures called after genecti step."""
self.delayed_reward = self.individual[0].fitness
self.exchange_chromosome()
self.bt.xmlstring = self.individual[0].phenotype
self.bt.construct()
self.food_collected = 0
self.location_history = set()
self.timestamp = 0
self.diversity_fitness = self.individual[0].fitness
def overall_fitness(self):
"""Compute complete fitness.
Goals are represented by objective function. We use combination of
objective function to define overall fitness of the agents
performance.
"""
# Use a decyaing function to generate fitness
# Use two step decaying function
# First block gives importance to exploration and when as soon
# food has been found, the next block will focus on dropping
# the food on hub
self.individual[0].fitness = (1 - self.beta) * self.delayed_reward \
+ self.exploration_fitness() + self.carrying_fitness() \
+ self.food_collected
def carrying_fitness(self):
"""Compute carrying fitness.
This fitness supports the carrying behavior of
the agents.
"""
return len(self.attached_objects) * (self.timestamp)
def exploration_fitness(self):
"""Compute the exploration fitness."""
# Use exploration space as fitness values
return len(self.location_history) - 1
# New Agent methods for behavior based robotics
def sense(self):
"""Sense included in behavior tree."""
pass
def plan(self):
"""Plan not required for now."""
pass
def step(self):
"""Agent action at a single time step."""
# py_trees.logging.level = py_trees.logging.Level.DEBUG
# output = py_trees.display.ascii_tree(self.bt.behaviour_tree.root)
# Couting variables
self.timestamp += 1
self.step_count += 1
# Increase beta
self.beta = self.step_count / self.model.iter
self.location_history.add(self.location)
# Compute the behavior tree
self.bt.behaviour_tree.tick()
# Find the no.of food collected from the BT execution
self.food_collected = len(self.get_food_in_hub())
# Computes overall fitness using Beta function
self.overall_fitness()
self.phenotypes = dict()
self.phenotypes[self.individual[0].phenotype] = (
self.individual[0].fitness)
cellmates = self.model.grid.get_objects_from_grid(
type(self).__name__, self.location)
# Create a results instance and save it to a file
"""
self.results | |
<filename>egret/model_library/transmission/tx_calc.py<gh_stars>0
# ___________________________________________________________________________
#
# EGRET: Electrical Grid Research and Engineering Tools
# Copyright 2019 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
# This software is distributed under the Revised BSD License.
# ___________________________________________________________________________
"""
This module collects some helper functions useful for performing
different computations for transmission models
"""
import math
import weakref
import collections.abc as abc
import numpy as np
import scipy.sparse as sp
import scipy.sparse.linalg
import networkx as nx
from math import cos, sin
from egret.model_library.defn import BasePointType, ApproximationType
from egret.common.log import logger
def calculate_conductance(branch):
rs = branch['resistance']
xs = branch['reactance']
return rs / (rs**2 + xs**2)
def calculate_susceptance(branch):
rs = branch['resistance']
xs = branch['reactance']
return -xs / (rs**2 + xs**2)
def calculate_y_matrix_from_branch(branch):
rs = branch['resistance']
xs = branch['reactance']
bs = branch['charging_susceptance']
tau = 1.0
shift = 0.0
if branch['branch_type'] == 'transformer':
tau = branch['transformer_tap_ratio']
shift = branch['transformer_phase_shift']
return calculate_y_matrix(rs, xs, bs, tau, shift)
def calculate_y_matrix(rs, xs, bc, tau, shift):
"""
Compute the y matrix from various branch properties
Parameters
----------
rs : float
Branch resistance
xs : float
Branch reactance
bc : float
Branch charging susceptance
tau : float
Branch transformer tap ratio
shift : float
Branch transformer phase shift
Returns
-------
list : list of floats representing the y matrix
[Y(ifr,vfr), Y(ifr,vfj), Y(ifr,vtr), Y(ifr,vtj),
Y(ifj,vfr), Y(ifj,vfj), Y(ifj,vtr), Y(ifj,vtj),
Y(itr,vfr), Y(itr,vfj), Y(itr,vtr), Y(itr,vtj),
Y(itj,vfr), Y(itj,vfj), Y(itj,vtr), Y(itj,vtj)]
"""
bc = bc/2
tr = tau * math.cos(math.radians(shift))
tj = tau * math.sin(math.radians(shift))
mag = rs**2 + xs**2
a = rs/(tau**2*mag) # c1
b = (1/tau**2) * (xs/mag - bc) # c2
c = (-rs*tr - xs*tj)/(tau**2 * mag) # c3
d = (rs*tj - xs*tr)/(tau**2 * mag) # c4
e = -b # -c2
f = a # c1
g = -d # -c4
h = c # c3
i = (xs*tj - rs*tr)/(tau**2 * mag) # c7
j = (-rs*tj - xs*tr)/(tau**2 * mag) # c8
k = rs/mag # c5
l = xs/mag - bc # c6
m = -j # -c8
n = i # c7
o = -l # -c6
p = k # c5
# y = [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p]
y_dict = {}
y_dict[('ifr', 'vfr')] = a
y_dict[('ifr', 'vfj')] = b
y_dict[('ifr', 'vtr')] = c
y_dict[('ifr', 'vtj')] = d
y_dict[('ifj', 'vfr')] = e
y_dict[('ifj', 'vfj')] = f
y_dict[('ifj', 'vtr')] = g
y_dict[('ifj', 'vtj')] = h
y_dict[('itr', 'vfr')] = i
y_dict[('itr', 'vfj')] = j
y_dict[('itr', 'vtr')] = k
y_dict[('itr', 'vtj')] = l
y_dict[('itj', 'vfr')] = m
y_dict[('itj', 'vfj')] = n
y_dict[('itj', 'vtr')] = o
y_dict[('itj', 'vtj')] = p
return y_dict
def calculate_ifr(vfr, vfj, vtr, vtj, y_matrix):
"""
Compute ifr from voltages and the y_matrix (computed
from the branch properties using :py:meth:`calculate_branch_y_matrix`)
"""
ifr = y_matrix['ifr', 'vfr'] * vfr + y_matrix['ifr', 'vfj'] * vfj + \
y_matrix['ifr', 'vtr'] * vtr + y_matrix['ifr', 'vtj'] * vtj
return ifr
def calculate_ifj(vfr, vfj, vtr, vtj, y_matrix):
"""
Compute ify from voltages and the y_matrix (computed
from the branch properties using :py:meth:`calculate_branch_y_matrix`)
"""
ifj = y_matrix['ifj', 'vfr'] * vfr + y_matrix['ifj', 'vfj'] * vfj + \
y_matrix['ifj', 'vtr'] * vtr + y_matrix['ifj', 'vtj'] * vtj
return ifj
def calculate_itr(vfr, vfj, vtr, vtj, y_matrix):
"""
Compute itr from voltages and the y_matrix (computed
from the branch properties using :py:meth:`calculate_branch_y_matrix`)
"""
itr = y_matrix['itr', 'vfr'] * vfr + y_matrix['itr', 'vfj'] * vfj + \
y_matrix['itr', 'vtr'] * vtr + y_matrix['itr', 'vtj'] * vtj
return itr
def calculate_itj(vfr, vfj, vtr, vtj, y_matrix):
"""
Compute itj from voltages and the y_matrix (computed
from the branch properties using :py:meth:`calculate_branch_y_matrix`)
"""
itj = y_matrix['itj', 'vfr'] * vfr + y_matrix['itj', 'vfj'] * vfj + \
y_matrix['itj', 'vtr'] * vtr + y_matrix['itj', 'vtj'] * vtj
return itj
def calculate_ir(p, q, vr, vj):
"""
Compute ir from power flows and voltages
"""
ir = (q*vj+p*vr)/(vj**2 + vr**2)
return ir
def calculate_ij(p, q, vr, vj):
"""
Compute ij from power flows and voltages
"""
ij = (p*vj-q*vr)/(vj**2 + vr**2)
return ij
def calculate_p(ir, ij, vr, vj):
"""
Compute real power flow from currents and voltages
"""
p = vr * ir + vj * ij
return p
def calculate_q(ir, ij, vr, vj):
"""
Compute reactive power flow from currents and voltages
"""
q = vj * ir - vr * ij
return q
def calculate_vr_from_vm_va(vm, va):
"""
Compute the value of vr from vm and va
Parameters
----------
vm : float
The value of voltage magnitude (per)
va : float
The value of voltage angle (degrees)
Returns
-------
float : the value of vr or None if
either vm or va (or both) is None
"""
if vm is not None and va is not None:
vr = vm * math.cos(va*math.pi/180)
return vr
return None
def calculate_vj_from_vm_va(vm, va):
"""
Compute the value of vj from vm and va
Parameters
----------
vm : float
The value of voltage magnitude (per)
va : float
The value of voltage angle (degrees)
Returns
-------
float : the value of vj or None if
either vm or va (or both) is None
"""
if vm is not None and va is not None:
vj = vm * math.sin(va*math.pi/180)
return vj
return None
def calculate_vm_from_vj_vr(vj,vr):
"""
Compute the value of vm from vj and vr
Parameters
----------
vj : float
The value of the imaginary part of the voltage phasor (per)
vr : float
The value of the real part of the voltage phasor (per)
Returns
-------
float : the value of the voltage magnitude vm or None if
either vj or vr (or both) is None
"""
if vj is not None and vr is not None:
vm = math.sqrt(vj**2 + vr**2)
return vm
return None
def calculate_va_from_vj_vr(vj, vr):
"""
Compute the value of va from vj and vr
Parameters
----------
vj : float
The value of the imaginary part of the voltage phasor (per)
vr : float
The value of the real part of the voltage phasor (per)
Returns
-------
float : the value of the voltage angle va in degrees or None if
either vj or vr (or both) is None
"""
if vj is not None and vr is not None:
va = math.degrees(math.atan(vj/vr))
return va
return None
def _get_susceptance(branch, approximation_type):
if branch['branch_type'] == 'transformer':
tau = branch['transformer_tap_ratio']
else:
tau = 1.
if approximation_type == ApproximationType.PTDF:
x = branch['reactance']
b = -1./(tau*x)
elif approximation_type == ApproximationType.PTDF_LOSSES:
b = calculate_susceptance(branch)/tau
else:
raise RuntimeError("Could not find appropriate susceptance value")
return b
def _calculate_J11(branches,buses,index_set_branch,index_set_bus,mapping_bus_to_idx,base_point=BasePointType.FLATSTART,approximation_type=ApproximationType.PTDF):
"""
Compute the power flow Jacobian for partial derivative of real power flow to voltage angle
"""
_len_bus = len(index_set_bus)
_len_branch = len(index_set_branch)
data = []
row = []
col = []
for idx_row, branch_name in enumerate(index_set_branch):
branch = branches[branch_name]
from_bus = branch['from_bus']
to_bus = branch['to_bus']
b = _get_susceptance(branch, approximation_type)
if base_point == BasePointType.FLATSTART:
val = -b
elif base_point == BasePointType.SOLUTION: # TODO: check that we are loading the correct values (or results)
vn = buses[from_bus]['vm']
vm = buses[to_bus]['vm']
tn = buses[from_bus]['va']
tm = buses[to_bus]['va']
val = -b * vn * vm * cos(tn - tm)
idx_col = mapping_bus_to_idx[from_bus]
row.append(idx_row)
col.append(idx_col)
data.append(val)
idx_col = mapping_bus_to_idx[to_bus]
row.append(idx_row)
col.append(idx_col)
data.append(-val)
J11 = sp.coo_matrix( (data, (row,col)), shape=(_len_branch, _len_bus))
return J11.tocsc()
def _calculate_Bd(branches,index_set_branch,base_point=BasePointType.FLATSTART,approximation_type=ApproximationType.PTDF):
"""
Compute the power flow Jacobian for partial derivative of real power flow to voltage angle
"""
_len_branch = len(index_set_branch)
data = []
row = []
col = []
for idx_row, branch_name in enumerate(index_set_branch):
branch = branches[branch_name]
from_bus = branch['from_bus']
to_bus = branch['to_bus']
b = _get_susceptance(branch, approximation_type)
if base_point == BasePointType.FLATSTART:
val = b
elif base_point == BasePointType.SOLUTION: # TODO: check that we are loading the correct values (or results)
vn = buses[from_bus]['vm']
vm = buses[to_bus]['vm']
tn = buses[from_bus]['va']
tm = buses[to_bus]['va']
val = b * vn * vm * cos(tn - tm)
data.append(val)
row.append(idx_row)
col.append(idx_row)
Bd = sp.coo_matrix( (data, (row,col)), shape=(_len_branch, _len_branch))
return Bd.tocsc()
def _calculate_L11(branches,buses,index_set_branch,index_set_bus,mapping_bus_to_idx,base_point=BasePointType.FLATSTART):
"""
Compute the power flow Jacobian for partial derivative | |
<gh_stars>0
from __future__ import division
from builtins import range
from future.utils import with_metaclass
import numpy as np
from numpy import newaxis as na
import abc
import copy
from scipy.special import logsumexp
from pyhsmm.util.stats import sample_discrete
try:
from pyhsmm.util.cstats import sample_markov, count_transitions
except ImportError:
from pyhsmm.util.stats import sample_markov, count_transitions
from pyhsmm.util.general import rle
######################
# Mixins and bases #
######################
class _StatesBase(with_metaclass(abc.ABCMeta, object)):
def __init__(
self,
model,
T=None,
data=None,
stateseq=None,
generate=True,
initialize_from_prior=True,
fixed_stateseq=False,
):
self.model = model
self.T = T if T is not None else data.shape[0]
self.data = data
self.clear_caches()
self.fixed_stateseq = fixed_stateseq
if fixed_stateseq:
assert (
stateseq is not None
), "fixed_stateseq requires a stateseq to be supplied"
if stateseq is not None:
self.stateseq = np.array(stateseq, dtype=np.int32)
elif generate:
if data is not None and not initialize_from_prior:
self.resample()
else:
self.generate_states()
def copy_sample(self, newmodel):
new = copy.copy(self)
new.clear_caches() # saves space, though may recompute later for likelihoods
new.model = newmodel
new.stateseq = self.stateseq.copy()
return new
_kwargs = {} # used in subclasses for joblib stuff
### model properties
@property
def obs_distns(self):
return self.model.obs_distns
@property
def trans_matrix(self):
return self.model.trans_distn.trans_matrix
@property
def pi_0(self):
return self.model.init_state_distn.pi_0
@property
def num_states(self):
return self.model.num_states
### convenience properties
@property
def stateseq_norep(self):
return rle(self.stateseq)[0]
@property
def durations(self):
return rle(self.stateseq)[1]
### generation
@abc.abstractmethod
def generate_states(self):
pass
### messages and likelihoods
# some cached things depends on model parameters, so caches should be
# cleared when the model changes (e.g. when parameters are updated)
def clear_caches(self):
self._aBl = self._mf_aBl = None
self._normalizer = None
@property
def aBl(self):
if self._aBl is None:
data = self.data
aBl = self._aBl = np.empty((data.shape[0], self.num_states))
for idx, obs_distn in enumerate(self.obs_distns):
aBl[:, idx] = obs_distn.log_likelihood(data).ravel()
aBl[np.isnan(aBl).any(1)] = 0.0
return self._aBl
@abc.abstractmethod
def log_likelihood(self):
pass
class _SeparateTransMixin(object):
def __init__(self, group_id, **kwargs):
assert not isinstance(group_id, np.ndarray)
self.group_id = group_id
self._kwargs = dict(self._kwargs, group_id=group_id)
super(_SeparateTransMixin, self).__init__(**kwargs)
# access these to be sure they're instantiated
self.trans_matrix
self.pi_0
@property
def trans_matrix(self):
return self.model.trans_distns[self.group_id].trans_matrix
@property
def pi_0(self):
return self.model.init_state_distns[self.group_id].pi_0
@property
def mf_trans_matrix(self):
return np.maximum(
self.model.trans_distns[self.group_id].exp_expected_log_trans_matrix, 1e-3
)
@property
def mf_pi_0(self):
return self.model.init_state_distns[
self.group_id
].exp_expected_log_init_state_distn
class _PossibleChangepointsMixin(object):
def __init__(self, model, data, changepoints=None, **kwargs):
changepoints = (
changepoints
if changepoints is not None
else [(t, t + 1) for t in range(data.shape[0])]
)
self.changepoints = changepoints
self.segmentstarts = np.array(
[start for start, stop in changepoints], dtype=np.int32
)
self.segmentlens = np.array(
[stop - start for start, stop in changepoints], dtype=np.int32
)
assert all(l > 0 for l in self.segmentlens)
assert sum(self.segmentlens) == data.shape[0]
assert (
self.changepoints[0][0] == 0 and self.changepoints[-1][-1] == data.shape[0]
)
self._kwargs = dict(self._kwargs, changepoints=changepoints)
super(_PossibleChangepointsMixin, self).__init__(
model, T=len(changepoints), data=data, **kwargs
)
def clear_caches(self):
self._aBBl = self._mf_aBBl = None
self._stateseq = None
super(_PossibleChangepointsMixin, self).clear_caches()
@property
def Tblock(self):
return len(self.changepoints)
@property
def Tfull(self):
return self.data.shape[0]
@property
def stateseq(self):
if self._stateseq is None:
self._stateseq = self.blockstateseq.repeat(self.segmentlens)
return self._stateseq
@stateseq.setter
def stateseq(self, stateseq):
assert len(stateseq) == self.Tblock or len(stateseq) == self.Tfull
if len(stateseq) == self.Tblock:
self.blockstateseq = stateseq
else:
self.blockstateseq = stateseq[self.segmentstarts]
self._stateseq = None
def _expected_states(self, *args, **kwargs):
expected_states = super(_PossibleChangepointsMixin, self)._expected_states(
*args, **kwargs
)
return expected_states.repeat(self.segmentlens, axis=0)
@property
def aBl(self):
if self._aBBl is None:
aBl = super(_PossibleChangepointsMixin, self).aBl
aBBl = self._aBBl = np.empty((self.Tblock, self.num_states))
for idx, (start, stop) in enumerate(self.changepoints):
aBBl[idx] = aBl[start:stop].sum(0)
return self._aBBl
@property
def mf_aBl(self):
if self._mf_aBBl is None:
aBl = super(_PossibleChangepointsMixin, self).mf_aBl
aBBl = self._mf_aBBl = np.empty((self.Tblock, self.num_states))
for idx, (start, stop) in enumerate(self.changepoints):
aBBl[idx] = aBl[start:stop].sum(0)
return self._mf_aBBl
def plot(self, *args, **kwargs):
from matplotlib import pyplot as plt
super(_PossibleChangepointsMixin, self).plot(*args, **kwargs)
plt.xlim((0, self.Tfull))
# TODO do generate() and generate_states() actually work?
####################
# States classes #
####################
class HMMStatesPython(_StatesBase):
### generation
def generate_states(self):
T = self.T
nextstate_distn = self.pi_0
A = self.trans_matrix
stateseq = np.zeros(T, dtype=np.int32)
for idx in range(T):
stateseq[idx] = sample_discrete(nextstate_distn)
nextstate_distn = A[stateseq[idx]]
self.stateseq = stateseq
return stateseq
### message passing
def log_likelihood(self):
if self._normalizer is None:
self.messages_forwards_normalized() # NOTE: sets self._normalizer
return self._normalizer
def _messages_log(self, trans_matrix, init_state_distn, log_likelihoods):
alphal = self._messages_forwards_log(
trans_matrix, init_state_distn, log_likelihoods
)
betal = self._messages_backwards_log(trans_matrix, log_likelihoods)
return alphal, betal
def messages_log(self):
return self._messages_log(self.trans_matrix, self.pi_0, self.aBl)
@staticmethod
def _messages_backwards_log(trans_matrix, log_likelihoods):
errs = np.seterr(over="ignore")
Al = np.log(trans_matrix)
aBl = log_likelihoods
betal = np.zeros_like(aBl)
for t in range(betal.shape[0] - 2, -1, -1):
betal[t] = logsumexp(Al + betal[t + 1] + aBl[t + 1], axis=1)
np.seterr(**errs)
return betal
def messages_backwards_log(self):
betal = self._messages_backwards_log(self.trans_matrix, self.aBl)
assert not np.isnan(betal).any()
self._normalizer = logsumexp(np.log(self.pi_0) + betal[0] + self.aBl[0])
return betal
@staticmethod
def _messages_forwards_log(trans_matrix, init_state_distn, log_likelihoods):
errs = np.seterr(over="ignore")
Al = np.log(trans_matrix)
aBl = log_likelihoods
alphal = np.zeros_like(aBl)
alphal[0] = np.log(init_state_distn) + aBl[0]
for t in range(alphal.shape[0] - 1):
alphal[t + 1] = logsumexp(alphal[t] + Al.T, axis=1) + aBl[t + 1]
np.seterr(**errs)
return alphal
def messages_forwards_log(self):
alphal = self._messages_forwards_log(self.trans_matrix, self.pi_0, self.aBl)
assert not np.any(np.isnan(alphal))
self._normalizer = logsumexp(alphal[-1])
return alphal
@staticmethod
def _messages_backwards_normalized(trans_matrix, init_state_distn, log_likelihoods):
aBl = log_likelihoods
A = trans_matrix
T = aBl.shape[0]
betan = np.empty_like(aBl)
logtot = 0.0
betan[-1] = 1.0
for t in range(T - 2, -1, -1):
cmax = aBl[t + 1].max()
betan[t] = A.dot(betan[t + 1] * np.exp(aBl[t + 1] - cmax))
norm = betan[t].sum()
logtot += cmax + np.log(norm)
betan[t] /= norm
cmax = aBl[0].max()
logtot += cmax + np.log(
(np.exp(aBl[0] - cmax) * init_state_distn * betan[0]).sum()
)
return betan, logtot
def messages_backwards_normalized(self):
betan, self._normalizer = self._messages_backwards_normalized(
self.trans_matrix, self.pi_0, self.aBl
)
return betan
@staticmethod
def _messages_forwards_normalized(trans_matrix, init_state_distn, log_likelihoods):
aBl = log_likelihoods
A = trans_matrix
T = aBl.shape[0]
alphan = np.empty_like(aBl)
logtot = 0.0
in_potential = init_state_distn
for t in range(T):
cmax = aBl[t].max()
alphan[t] = in_potential * np.exp(aBl[t] - cmax)
norm = alphan[t].sum()
if norm != 0:
alphan[t] /= norm
logtot += np.log(norm) + cmax
else:
alphan[t:] = 0.0
return alphan, -np.inf
in_potential = alphan[t].dot(A)
return alphan, logtot
def messages_forwards_normalized(self):
alphan, self._normalizer = self._messages_forwards_normalized(
self.trans_matrix, self.pi_0, self.aBl
)
return alphan
### Gibbs sampling
def resample_log(self):
betal = self.messages_backwards_log()
self.sample_forwards_log(betal)
def resample_normalized(self):
alphan = self.messages_forwards_normalized()
self.sample_backwards_normalized(alphan)
def resample(self):
if not self.fixed_stateseq:
return self.resample_normalized()
@staticmethod
def _sample_forwards_log(betal, trans_matrix, init_state_distn, log_likelihoods):
A = trans_matrix
aBl = log_likelihoods
T = aBl.shape[0]
stateseq = np.empty(T, dtype=np.int32)
nextstate_unsmoothed = init_state_distn
for idx in range(T):
logdomain = betal[idx] + aBl[idx]
logdomain[nextstate_unsmoothed == 0] = -np.inf
if np.any(np.isfinite(logdomain)):
stateseq[idx] = sample_discrete(
nextstate_unsmoothed * np.exp(logdomain - np.amax(logdomain))
)
else:
stateseq[idx] = sample_discrete(nextstate_unsmoothed)
nextstate_unsmoothed = A[stateseq[idx]]
return stateseq
def sample_forwards_log(self, betal):
self.stateseq = self._sample_forwards_log(
betal, self.trans_matrix, self.pi_0, self.aBl
)
@staticmethod
def _sample_forwards_normalized(
betan, trans_matrix, init_state_distn, log_likelihoods
):
A = trans_matrix
aBl = log_likelihoods
T = aBl.shape[0]
stateseq = np.empty(T, dtype=np.int32)
nextstate_unsmoothed = init_state_distn
for idx in range(T):
logdomain = aBl[idx]
logdomain[nextstate_unsmoothed == 0] = -np.inf
stateseq[idx] = sample_discrete(
nextstate_unsmoothed * betan * np.exp(logdomain - np.amax(logdomain))
)
nextstate_unsmoothed = A[stateseq[idx]]
return stateseq
def sample_forwards_normalized(self, betan):
self.stateseq = self._sample_forwards_normalized(
betan, self.trans_matrix, self.pi_0, self.aBl
)
@staticmethod
def _sample_backwards_normalized(alphan, trans_matrix_transpose):
AT = trans_matrix_transpose
T = alphan.shape[0]
stateseq = np.empty(T, dtype=np.int32)
next_potential = np.ones(AT.shape[0])
for t in range(T - 1, -1, -1):
stateseq[t] = sample_discrete(next_potential * alphan[t])
next_potential = AT[stateseq[t]]
return stateseq
def sample_backwards_normalized(self, alphan):
self.stateseq = self._sample_backwards_normalized(
alphan, np.swapaxes(self.trans_matrix, -1, -2).copy()
)
### Mean Field
@property
def mf_aBl(self):
if self._mf_aBl is None:
T = self.data.shape[0]
self._mf_aBl = aBl = np.empty((T, self.num_states))
for idx, o in enumerate(self.obs_distns):
aBl[:, idx] = o.expected_log_likelihood(self.data).ravel()
aBl[np.isnan(aBl).any(1)] = 0.0
return self._mf_aBl
@property
def mf_trans_matrix(self):
return self.model.trans_distn.exp_expected_log_trans_matrix
@property
def mf_pi_0(self):
return self.model.init_state_distn.exp_expected_log_init_state_distn
@property
def all_expected_stats(self):
return self.expected_states, self.expected_transcounts, self._normalizer
@all_expected_stats.setter
def all_expected_stats(self, vals):
self.expected_states, self.expected_transcounts, self._normalizer = vals
self.stateseq = self.expected_states.argmax(1).astype("int32") # for plotting
def meanfieldupdate(self):
self.clear_caches()
self.all_expected_stats = self._expected_statistics(
self.mf_trans_matrix, self.mf_pi_0, self.mf_aBl
)
self._mf_param_snapshot = (
np.log(self.mf_trans_matrix),
np.log(self.mf_pi_0),
self.mf_aBl,
self._normalizer,
)
def _init_mf_from_gibbs(self):
expected_states = np.eye(self.num_states)[self.stateseq]
expected_transcounts = count_transitions(self.stateseq, self.num_states)
self.all_expected_stats = expected_states, expected_transcounts, -np.inf
def get_vlb(self, most_recently_updated=False):
if (
(self._normalizer is None)
or (self._mf_param_snapshot is None)
or not hasattr(self, "expected_states")
or not hasattr(self, "expected_transcounts")
):
self.meanfieldupdate()
# see https://github.com/mattjj/pyhsmm/issues/45#issuecomment-102721960
if most_recently_updated:
return self._normalizer
else:
# TODO TODO something wrong in here
_, _, new_normalizer = self._expected_statistics(
self.mf_trans_matrix, self.mf_pi_0, self.mf_aBl
)
new_params = np.log(self.mf_trans_matrix), np.log(self.mf_pi_0), self.mf_aBl
old_params, old_normalizer = (
self._mf_param_snapshot[:3],
self._mf_param_snapshot[-1],
)
E_stats = (
self.expected_transcounts,
self.expected_states[0],
self.expected_states,
)
linear_term = sum(
np.dot(np.ravel(a - b), np.ravel(c))
for | |
<reponame>AndrewSpano/UC_Berkeley_AI_Projects
# search.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by <NAME>
# (<EMAIL>) and <NAME> (<EMAIL>).
# Student side autograding was added by <NAME>, <NAME>, and
# <NAME> (<EMAIL>).
"""
In search.py, you will implement generic search algorithms which are called by
Pacman agents (in searchAgents.py).
"""
import util
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem.
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def depthFirstSearch(problem):
# check if the starting state is a solution
if problem.isGoalState(problem.getStartState()):
return []
# import the Stack class which will be used to pop the state the first state that was pushed (LIFO)
from util import Stack
stack = Stack()
# the first item of the stack will be the starting state
stack.push(problem.getStartState())
# a dictionary (more like hash table) that is used to check if a state has already beem visited in O(1) time
visited = {problem.getStartState(): True}
# a dictionary (more like hash table) to store the path taken to reach every state
path = {problem.getStartState(): []}
# the currentState becomes the starting state
currentState = problem.getStartState()
while True:
# if the stack is empty, then we have explored all states reachable from the StartState
# and we did not get to the goal State. Therefore it is unreachable. So we return None.
if stack.isEmpty():
return None
# pop the next state that will be visited
currentState = stack.pop()
# mark the state as visited in the dictionary
visited[currentState] = True
# check if the currentState is a solution to the problem, and if so return a list with the solution
if problem.isGoalState(currentState):
return path.get(currentState)
# get the successors of the currentState
successors = problem.getSuccessors(currentState)
# REMEMBER: tuple[0] is the state, tuple[1] is the action and tuple[2] is the cost of the action
for tuple in successors:
# check if the state (tuple[0]) has already been visited
if visited.get(tuple[0], None) == None:
# if it hasn't, construct it's path in the path dictionary
temp_list = path.get(currentState)[:]
temp_list.append(tuple[1])
path[tuple[0]] = temp_list
# then push it into the stack
stack.push(tuple[0])
util.raiseNotDefined()
def breadthFirstSearch(problem):
# check if the starting state is a solution
if problem.isGoalState(problem.getStartState()):
return []
# import the Queue class which will be used to pop the state the first state that was pushed (FIFO)
from util import Queue
queue = Queue()
# the first item of the queue will be the starting state
queue.push(problem.getStartState())
# a dictionary (more like hash table) that is used to check if a state has already beem visited in O(1) time
visited = {problem.getStartState(): True}
# a dictionary (more like hash table) to store the path taken to reach every state
path = {problem.getStartState(): []}
# the current state is initialized as the starting state
currentState = problem.getStartState()
while True:
# if the queue is empty, then we have explored all states reachable from the StartState
# and we did not get to the goal State. Therefore it is unreachable. So we return None.
if queue.isEmpty():
return None
# pop the lastest state that was inserted
currentState = queue.pop()
# check if it is a solution, and if it is return the path
if problem.isGoalState(currentState):
return path.get(currentState)
# get the successors of the current state
successors = problem.getSuccessors(currentState)
# REMEMBER: tuple[0] is the state, tuple[1] is the action and tuple[2] is the cost of the action
for tuple in successors:
# if the state has not been visited
if visited.get(tuple[0], None) == None:
# add the state (tuple[0]) to the visited dictionary and mark it's path using the path dictionary
visited[tuple[0]] = True
# the state's (tuple[0]) path is the path to it's predecessor (currentState) + the new action (tuple[2])
temp_list = path.get(currentState)[:]
temp_list.append(tuple[1])
path[tuple[0]] = temp_list
# push the state (tuple[0]) to the queue
queue.push(tuple[0])
util.raiseNotDefined()
def uniformCostSearch(problem):
# check if the starting state is a solution
if problem.isGoalState(problem.getStartState()):
return []
# import the Priority Queue class which will be used to pop the state with the lowest cost
from util import PriorityQueue
priority_queue = PriorityQueue()
# the starting state has a cost of 0
priority_queue.push(problem.getStartState(), 0)
# a dictionary (more like hash table) that is used to check if a state has already beem visited in O(1) time
visited = {problem.getStartState(): True}
# a dictionary (more like hash table) to store the path taken to reach every state
path = {problem.getStartState(): []}
# a dictionary (more like hash table) to store the predecessor of every state
# this dictionary is not needed in dfs and bfs because in those searches the predecessor
# of a state is always the variable currentState
predecessor = {problem.getStartState(): None}
# a dictionary (more like hash table) to store lowest cost needed to reach every state
# this dictionary was not used in the previous searches for the same reasons as above
cost = {problem.getStartState(): 0}
# the current state of the problem becomes the starting state
currentState = problem.getStartState()
while True:
# if the priority queue is empty, then we have explored all states reachable from the StartState
# and we did not get to the goal State. Therefore it is unreachable. So we return None.
if priority_queue.isEmpty():
return None
# the new current state will become the successor state with the smallest priority (cost)
currentState = priority_queue.pop()
# check if the currentState is the goal State. If it is it means we have found a minimum cost
# solution. Return the path we have built for it.
if problem.isGoalState(currentState):
return path.get(currentState);
# get the successors states of the currentState
successors = problem.getSuccessors(currentState)
# REMEMBER: tuple[0] is the state, tuple[1] is the action and tuple[2] is the cost of the action
for tuple in successors:
if visited.get(tuple[0], None) == None:
# mark state as visited
visited[tuple[0]] = True
# the predecessor of the state tuple[0] is the state from which we got the tuple, which is currentState
predecessor[tuple[0]] = currentState
# the cost of the state tuple[0] is equal to the cost to get to the previous state + the cost of the action
cost[tuple[0]] = cost[predecessor[tuple[0]]] + tuple[2]
# make the path
temp_list = path.get(currentState)[:]
temp_list.append(tuple[1])
path[tuple[0]] = temp_list
# push the state in the priority queue with its cost, which we calculated above
priority_queue.push(tuple[0], cost[tuple[0]])
else:
# we have an already visited state, so we must check if the cost to get to it can | |
def winEnumHandler(hwnd, ctx):
if win32gui.IsWindowVisible(hwnd):
print(hex(hwnd), win32gui.GetWindowText(hwnd))
win32gui.EnumWindows(winEnumHandler, None)
def grab_hpbar_locations(gamename=False):
if gamename:
wincap = WindowCapture(gamename, [100, 135, 1223, 688])
original_image = wincap.get_screenshot()
else:
original_image = cv2.imread(os.path.dirname(
os.path.abspath(__file__)) + "/testimages/healthbars.jpg")
filter = HsvFilter(20, 174, 245, 26, 193, 255, 0, 0, 0, 0)
output_image = BotUtils.filter_blackwhite_invert(
filter, original_image, True)
output_image = cv2.blur(output_image, (2, 2))
_, thresh = cv2.threshold(output_image, 127, 255, 0)
contours, _ = cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
if len(contours) < 2:
return False
contours.pop(0)
rectangles = []
for contour in contours:
(x, y), _ = cv2.minEnclosingCircle(contour)
rectangles.append([x-10, y, 20, 5])
rectangles.append([x-10, y, 20, 5])
rectangles, _ = cv2.groupRectangles(
rectangles, groupThreshold=1, eps=0.8)
points = []
for (x, y, w, h) in rectangles:
center_x = x + int(w/2)
center_y = y + int(h/2)
points.append((center_x, center_y))
return points
def grab_character_location(player_name, gamename=False):
player_chars = "".join(set(player_name))
if gamename:
wincap = WindowCapture(gamename, [200, 235, 1123, 688])
original_image = wincap.get_screenshot()
else:
original_image = cv2.imread(os.path.dirname(
os.path.abspath(__file__)) + "/testimages/test_sensitive.jpg")
filter = HsvFilter(0, 0, 119, 179, 49, 255, 0, 0, 0, 0)
output_image = BotUtils.filter_blackwhite_invert(
filter, original_image, return_gray=True)
rgb = cv2.cvtColor(output_image, cv2.COLOR_GRAY2RGB)
tess_config = '--psm 6 --oem 3 -c tessedit_char_whitelist=' + player_chars
results = pytesseract.image_to_data(
rgb, output_type=pytesseract.Output.DICT, lang='eng', config=tess_config)
try:
best_match, _ = process.extractOne(
player_name, results["text"], score_cutoff=0.8)
i = results["text"].index(best_match)
x = int(results["left"][i] + (results["width"][i]/2))
y = int(results["top"][i] + (results["height"][i]/2))
# Account for the rect
x += 200
y += 235
return x, y
except:
return 640, 382
def shift_channel(c, amount):
if amount > 0:
lim = 255 - amount
c[c >= lim] = 255
c[c < lim] += amount
elif amount < 0:
amount = -amount
lim = amount
c[c <= lim] = 0
c[c > lim] -= amount
return c
def filter_blackwhite_invert(filter: HsvFilter, existing_image, return_gray=False, threshold=67, max=255):
hsv = cv2.cvtColor(existing_image, cv2.COLOR_BGR2HSV)
hsv_filter = filter
# add/subtract saturation and value
h, s, v = cv2.split(hsv)
s = BotUtils.shift_channel(s, hsv_filter.sAdd)
s = BotUtils.shift_channel(s, -hsv_filter.sSub)
v = BotUtils.shift_channel(v, hsv_filter.vAdd)
v = BotUtils.shift_channel(v, -hsv_filter.vSub)
hsv = cv2.merge([h, s, v])
# Set minimum and maximum HSV values to display
lower = np.array([hsv_filter.hMin, hsv_filter.sMin, hsv_filter.vMin])
upper = np.array([hsv_filter.hMax, hsv_filter.sMax, hsv_filter.vMax])
# Apply the thresholds
mask = cv2.inRange(hsv, lower, upper)
result = cv2.bitwise_and(hsv, hsv, mask=mask)
# convert back to BGR
img = cv2.cvtColor(result, cv2.COLOR_HSV2BGR)
# now change it to greyscale
grayImage = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# now change it to black and white
(thresh, blackAndWhiteImage) = cv2.threshold(
grayImage, threshold, max, cv2.THRESH_BINARY)
# now invert it
inverted = (255-blackAndWhiteImage)
if return_gray:
return inverted
inverted = cv2.cvtColor(inverted, cv2.COLOR_GRAY2BGR)
return inverted
def convert_pynput_to_pag(button):
PYNPUT_SPECIAL_CASE_MAP = {
'alt_l': 'altleft',
'alt_r': 'altright',
'alt_gr': 'altright',
'caps_lock': 'capslock',
'ctrl_l': 'ctrlleft',
'ctrl_r': 'ctrlright',
'page_down': 'pagedown',
'page_up': 'pageup',
'shift_l': 'shiftleft',
'shift_r': 'shiftright',
'num_lock': 'numlock',
'print_screen': 'printscreen',
'scroll_lock': 'scrolllock',
}
# example: 'Key.F9' should return 'F9', 'w' should return as 'w'
cleaned_key = button.replace('Key.', '')
if cleaned_key in PYNPUT_SPECIAL_CASE_MAP:
return PYNPUT_SPECIAL_CASE_MAP[cleaned_key]
return cleaned_key
def detect_player_name(gamename):
plyrname_rect = [165, 45, 320, 65]
plyrname_wincap = WindowCapture(gamename, plyrname_rect)
plyrname_filt = HsvFilter(0, 0, 103, 89, 104, 255, 0, 0, 0, 0)
# get an updated image of the game
image = plyrname_wincap.get_screenshot()
# pre-process the image
image = BotUtils.apply_hsv_filter(
image, plyrname_filt)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pytesseract.image_to_data(
rgb, output_type=pytesseract.Output.DICT, lang='eng')
biggest = 0
name = False
for entry in results["text"]:
if len(entry) > biggest:
name = entry
biggest = len(entry)
return name
def detect_level_name(gamename):
wincap = WindowCapture(gamename, [1121, 31, 1248, 44])
existing_image = wincap.get_screenshot()
filter = HsvFilter(0, 0, 0, 169, 34, 255, 0, 0, 0, 0)
save_image = BotUtils.apply_hsv_filter(existing_image, filter)
gray_image = cv2.cvtColor(save_image, cv2.COLOR_BGR2GRAY)
(thresh, blackAndWhiteImage) = cv2.threshold(
gray_image, 129, 255, cv2.THRESH_BINARY)
# now invert it
inverted = (255-blackAndWhiteImage)
save_image = cv2.cvtColor(inverted, cv2.COLOR_GRAY2BGR)
rgb = cv2.cvtColor(save_image, cv2.COLOR_BGR2RGB)
tess_config = '--psm 7 --oem 3 -c tessedit_char_whitelist=01234567890ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
result = pytesseract.image_to_string(
rgb, lang='eng', config=tess_config)[:-2]
return result
def apply_hsv_filter(original_image, hsv_filter: HsvFilter):
# convert image to HSV
hsv = cv2.cvtColor(original_image, cv2.COLOR_BGR2HSV)
# add/subtract saturation and value
h, s, v = cv2.split(hsv)
s = BotUtils.shift_channel(s, hsv_filter.sAdd)
s = BotUtils.shift_channel(s, -hsv_filter.sSub)
v = BotUtils.shift_channel(v, hsv_filter.vAdd)
v = BotUtils.shift_channel(v, -hsv_filter.vSub)
hsv = cv2.merge([h, s, v])
# Set minimum and maximum HSV values to display
lower = np.array([hsv_filter.hMin, hsv_filter.sMin, hsv_filter.vMin])
upper = np.array([hsv_filter.hMax, hsv_filter.sMax, hsv_filter.vMax])
# Apply the thresholds
mask = cv2.inRange(hsv, lower, upper)
result = cv2.bitwise_and(hsv, hsv, mask=mask)
# convert back to BGR for imshow() to display it properly
img = cv2.cvtColor(result, cv2.COLOR_HSV2BGR)
return img
def detect_sect_clear(gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename, custom_rect=[
464+156, 640, 464+261, 641])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
if a+b+c > 700:
if d+e+f > 700:
return True
return False
def detect_boss_healthbar(gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename, custom_rect=[
415+97, 105+533, 415+98, 105+534])
image = wincap.get_screenshot()
# bgr
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
if c+f > 440:
if a+b+d+e < 80:
return True
return False
def detect_xprompt(gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename, custom_rect=[
1137, 694, 1163, 695])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
if a+b+d+e > 960 and c+f == 140:
return True
else:
return False
def grab_player_pos(gamename=False, map_rect=None, rect_rel=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
if not map_rect:
wincap = WindowCapture(gamename, [561, 282, 1111, 666])
else:
wincap = WindowCapture(gamename, map_rect)
filter = HsvFilter(34, 160, 122, 50, 255, 255, 0, 0, 0, 0)
image = wincap.get_screenshot()
save_image = BotUtils.filter_blackwhite_invert(filter, image)
vision = Vision('plyr.jpg')
rectangles = vision.find(
save_image, threshold=0.31, epsilon=0.5)
if len(rectangles) < 1:
return False, False
points = vision.get_click_points(rectangles)
x, y = points[0]
if not map_rect:
x += 561
y += 282
return x, y
elif rect_rel:
x += map_rect[0]
y += map_rect[1]
return x, y
else:
x += wincap.window_rect[0]
y += wincap.window_rect[1]
return x, y
def grab_level_rects():
rects = {}
# Load the translation from name to num
with open("lvl_name_num.txt") as f:
num_names = f.readlines()
for i, entry in enumerate(num_names):
num_names[i] = entry.split("-")
# Load the num to rect catalogue
with open("catalogue.txt") as f:
nums_rects = f.readlines()
for i, entry in enumerate(nums_rects):
nums_rects[i] = entry.split("-")
# Then add each rect to the rects dict against name
for number, name in num_names:
for num, area, rect in nums_rects:
if area == "FM" and num == number:
rects[name.rstrip().replace(" ", "")] = rect.rstrip()
if "1" in name:
rects[name.rstrip().replace(
" ", "").replace("1", "L")] = rect.rstrip()
if "ri" in name:
rects[name.rstrip().replace(
" ", "").replace("ri", "n").replace("1", "L")] = rect.rstrip()
break
return rects
def grab_level_rects_and_speeds():
rects = {}
speeds = {}
# Load the translation from name to num
with open("lvl_name_num.txt") as f:
num_names = f.readlines()
for i, entry in enumerate(num_names):
num_names[i] = entry.split("-")
# Load the num to rect catalogue
with open("catalogue.txt") as f:
nums_rects = f.readlines()
for i, entry in enumerate(nums_rects):
nums_rects[i] = entry.split("-")
# Finally load the level speeds
with open("lvl_speed.txt") as f:
num_speeds = f.readlines()
for i, entry in enumerate(num_speeds):
num_speeds[i] = entry.split("|")
# Then add each rect to the rects dict against name
# Also add each speed to the speed dict against name
for number, name in num_names:
for num, area, rect in nums_rects:
if area == "FM" and num == number:
rects[name.rstrip().replace(" ", "")] = rect.rstrip()
if "1" in name:
rects[name.rstrip().replace(
" ", "").replace("1", "L")] = rect.rstrip()
if "ri" in name:
rects[name.rstrip().replace(
" ", "").replace("ri", "n").replace("1", "L")] = rect.rstrip()
break
for num, speed in num_speeds:
if num == number:
speeds[name.rstrip().replace(
" ", "")] = float(speed.rstrip())
if "1" in name:
speeds[name.rstrip().replace(
" ", "").replace("1", "L")] = float(speed.rstrip())
if "ri" in name:
speeds[name.rstrip().replace(
" ", "").replace("ri", "n").replace("1", "L")] = float(speed.rstrip())
break
return rects, speeds
def string_to_rect(string: str):
# This converts the rect from catalogue into int list
return | |
error:
# TODO: Do not just catch Exception. Do narrower scope.
if hasattr(error, 'errno'):
log.error('Failed to connect to "%s" due to errno=%d. Exception was %s. Closing connection, '
'will re-attempt', self.__full_address, error.errno, str(error),
error_code='client/requestFailed')
else:
log.exception('Failed to send request due to exception. Closing connection, will re-attempt',
error_code='requestFailed')
return 'requestFailed', len(body_str), response
log.log(scalyr_logging.DEBUG_LEVEL_5, 'Response was received with body \"%s\"', response)
# If we got back an empty result, that often means the connection has been closed or reset.
if len(response) == 0:
log.error('Received empty response, server may have reset connection. Will re-attempt',
error_code='emptyResponse')
return 'emptyResponse', len(body_str), response
# Try to parse the response
# noinspection PyBroadException
try:
response_as_json = json_lib.parse(response)
except Exception:
# TODO: Do not just catch Exception. Do narrower scope. Also, log error here.
log.exception('Failed to parse response of \'%s\' due to exception. Closing connection, will '
're-attempt', scalyr_util.remove_newlines_and_truncate(response, 1000),
error_code='parseResponseFailed')
return 'parseResponseFailed', len(body_str), response
self.__last_success = current_time
if 'status' in response_as_json:
status = response_as_json['status']
if status == 'success':
was_success = True
elif status == 'error/client/badParam':
log.error('Request to \'%s\' failed due to a bad parameter value. This may be caused by an '
'invalid write logs api key in the configuration', self.__full_address,
error_code='error/client/badParam')
else:
log.error('Request to \'%s\' failed due to an error. Returned error code was \'%s\'',
self.__full_address, status, error_code='error/client/badParam')
return status, len(body_str), response
else:
log.error('No status message provided in response. Unknown error. Response was \'%s\'',
scalyr_util.remove_newlines_and_truncate(response, 1000), error_code='unknownError')
return 'unknownError', len(body_str), response
finally:
self.total_request_latency_secs += (time.time() - current_time)
if not was_success:
self.total_requests_failed += 1
self.close(current_time=current_time)
self.total_response_bytes_received += bytes_received
def close(self, current_time=None):
"""Closes the underlying connection to the Scalyr server.
@param current_time: If not None, the time to use for the current time. Used for testing purposes.
@type current_time: float or None
"""
if self.__connection is not None:
if current_time is None:
current_time = time.time()
self.__connection.close()
self.__connection = None
self.__last_connection_close = current_time
def add_events_request(self, session_info=None, max_size=1*1024*1024*1024):
"""Creates and returns a new AddEventRequest that can be later sent by this session.
The caller is expected to add events to this request and then submit it for transmission using
the 'send' method.
@param session_info: The session info for this session, which is basically any attributes that should
be added to all events uploaded by this agent, such as server attributes from the config file.
@param max_size: The maximum number of bytes to send in this request.
@type session_info: dict
@type max_size: int
@return: The request that can be populated.
@rtype: AddEventsRequest
"""
body = {
'token': self.__api_key,
'session': self.__session_id,
'threads': [],
}
if session_info is not None:
body['sessionInfo'] = session_info
return AddEventsRequest(body, max_size=max_size)
@staticmethod
def __get_user_agent(agent_version):
"""Determine the user agent to report in the request headers.
We construct an agent that gives Scalyr some information about the platform the customer is running on,
the Python version, and a few other tidbits. This is used to make decisions about support issues.
@param agent_version: The agent version number.
@type agent_version: str
@return: The user agent string.
@rtype: str
"""
# We will construct our agent string to look something like:
# Linux-redhat-7.0;python-2.7.2;agent-2.0.1;ssllib
python_version = sys.version_info
if len(python_version) >= 5:
python_version_str = 'python-%s.%s.%s' % (python_version[0], python_version[1], python_version[2])
else:
python_version_str = 'python-unknown'
# Try for a linux distribution first. This doesn't seem to work for Amazon AMIs, but for most
# distributions it hopefully will provide something readable.
platform_value = None
# noinspection PyBroadException
try:
distribution = platform.dist()
if len(distribution[0]) > 0:
platform_value = 'Linux-%s-%s' % (distribution[0], distribution[1])
except Exception:
platform_value = None
# Try Mac
if platform_value is None:
# noinspection PyBroadException
try:
mac_ver = platform.mac_ver()[0]
if len(mac_ver) > 0:
platform_value = 'MacOS-%s' % mac_ver
except Exception:
platform_value = None
# Fall back for all others. This should print out something reasonable for
# Windows.
if platform_value is None:
platform_value = platform.platform(terse=1)
# Include a string to indicate if python has a true ssl library available to record
# whether or not the client is doing server certificate verification.
if __has_ssl__:
ssl_str = 'ssllib'
else:
ssl_str = 'nossllib'
return '%s;%s;agent-%s;%s;' % (platform_value, python_version_str, agent_version, ssl_str)
class AddEventsRequest(object):
"""Used to construct an AddEventsRequest to eventually send.
This abstraction has three key features. First, it uses a generally more efficient scheme to build
up the string to eventually use as the body for an add_events request. Secondly, it does not require all events
at construction time. Instead, you can incrementally add more events before the request is actually sent. This
leads to better memory utilization when combined with an abstraction that is incrementally reading events from disk.
It will also prevent you from exceeding the maximum request size. Third, you may undo the effect of adding events
to the request before it is sent. This is useful to rollback the request state to a previous state if some
problem occurs.
"""
def __init__(self, base_body, max_size=1*1024*1024):
"""Initializes the instance.
@param base_body: A JsonObject or dict containing the information to send as the body of the add_events
request, with the exception of the events field. The events and client_timestamp fields must not be
included because they will be added later. Note, base_body must have some fields set, such as 'ts' which is
required by the server.
@param max_size: The maximum number of bytes this request can consume when it is serialized to JSON.
"""
assert len(base_body) > 0, "The base_body object must have some fields defined."
assert not 'events' in base_body, "The base_body object cannot already have 'events' set."
assert not 'client_time' in base_body, "The base_body object cannot already have 'client_time' set."
# As an optimization, we use a StringIO object to serialize the request. We also
# do a little bit of the JSON object assembly by hand. Specifically, we serialize the request
# to JSON without the 'events' field, but then delete the last '}' so that we can manually
# add in the 'events: [ ... ]' ourselves. This way we can watch the size of the buffer as
# we build up events.
string_buffer = StringIO()
json_lib.serialize(base_body, output=string_buffer, use_fast_encoding=True)
# Now go back and find the last '}' and delete it so that we can open up the JSON again.
location = string_buffer.tell()
while location > 0:
location -= 1
string_buffer.seek(location)
if string_buffer.read(1) == '}':
break
# Now look for the first non-white character. We need to add in a comma after it.
last_char = None
while location > 0:
location -= 1
string_buffer.seek(location)
last_char = string_buffer.read(1)
if not last_char.isspace():
break
# If the character happened to a comma, back up over that since we want to write our own comma.
if location > 0 and last_char == ',':
location -= 1
if location < 0:
raise Exception('Could not locate trailing "}" and non-whitespace in base JSON for add events request')
# Now chop off everything after the character at the location.
location += 1
string_buffer.seek(location)
string_buffer.truncate()
# Append the start of our events field.
string_buffer.write(', events: [')
# The string that must be append after all of the events to terminate the JSON. We will
# later replace TIMESTAMP with the real timestamp.
self.__post_fix = '], client_time: TIMESTAMP }'
# The time that will be sent as the 'client_time' parameter for the addEvents request.
# This may be later updated using the set_client_time method in the case where the same AddEventsRequest
# is being reused to send the events again.
self.__client_time = time.time()
self.__buffer = string_buffer
self.__max_size = max_size
self.__current_size = self.__buffer.tell() + len(self.__get_post_fix(self.__client_time))
self.__events_added = 0
# If we have finished serializing the body, it is stored here until the close() method is invoked.
self.__body = None
def add_event(self, event, timestamp=None):
"""Adds the serialized JSON for event if it does not cause the maximum request size to be exceeded.
It will automatically add in a 'ts' field to event containing a new timestamp | |
neighboring bins.
INPUT: drive, single frequency drive signal, sampled with some dt
resp, arbitrary response to be 'binned'
dt, sample spacing in seconds [s]
nbins, number of samples in the final resp(drive)
nharmonics, number of harmonics to include in filter
harms, list of desired harmonics (overrides nharmonics)
width, filter width in Hertz [Hz]
sg_filter, boolean value indicating use of a Savitsky-Golay
filter for final smoothing of resp(drive)
sg_params, parameters of the savgol filter
(see scipy.signal.savgol_filter for explanation)
verbose, usual boolean switch for printing
maxfreq, top-hat filter cutoff
add_mean, boolean switch toadd back the mean of each signal
correct_phase_shift, boolean switch to adjust the phase of
of the response to match the drive
grad_sign, -1, 0 or 1 to indicate the sign of the drive's
derivative to include in order to select either
'forward-going' or 'backward-going' data
OUTPUT: drivevec, vector of drive values, monotonically increasing
respvec, resp as a function of drivevec'''
nsamp = len(drive)
if len(resp) != nsamp:
if verbose:
print("Data Error: x(t) and f(t) don't have the same length")
sys.stdout.flush()
return
### Generate t array
t = np.linspace(0, len(drive) - 1, len(drive)) * dt
### Generate FFTs for filtering
drivefft = np.fft.rfft(drive)
respfft = np.fft.rfft(resp)
freqs = np.fft.rfftfreq(len(drive), d=dt)
### Find the drive frequency, ignoring the DC bin
maxind = np.argmin( np.abs(freqs - maxfreq) )
fund_ind = np.argmax( np.abs(drivefft[1:maxind]) ) + 1
drive_freq = freqs[fund_ind]
mindrive = np.min(drive)
maxdrive = np.max(drive)
meanresp = np.mean(resp)
### Build the notch filter
drivefilt = np.zeros_like(drivefft) #+ np.random.randn(len(drivefft))*1.0e-3
drivefilt[fund_ind] = 1.0 + 0.0j
errfilt = np.zeros_like(drivefilt)
noise_bins = (freqs > 10.0) * (freqs < 100.0)
errfilt[noise_bins] = 1.0+0.0j
errfilt[fund_ind] = 0.0+0.0j
#plt.loglog(freqs, np.abs(respfft))
#plt.loglog(freqs, np.abs(respfft)*errfilt)
#plt.show()
### Error message triggered by verbose option
if verbose:
if ( (np.abs(drivefft[fund_ind-1]) > 0.03 * np.abs(drivefft[fund_ind])) or \
(np.abs(drivefft[fund_ind+1]) > 0.03 * np.abs(drivefft[fund_ind])) ):
print("More than 3% power in neighboring bins: spatial binning may be suboptimal")
sys.stdout.flush()
plt.loglog(freqs, np.abs(drivefft))
plt.loglog(freqs[fund_ind], np.abs(drivefft[fund_ind]), '.', ms=20)
plt.show()
### Expand the filter to more than a single bin. This can introduce artifacts
### that appear like lissajous figures in the resp vs. drive final result
if width:
lower_ind = np.argmin(np.abs(drive_freq - 0.5 * width - freqs))
upper_ind = np.argmin(np.abs(drive_freq + 0.5 * width - freqs))
drivefilt[lower_ind:upper_ind+1] = drivefilt[fund_ind]
### Generate an array of harmonics
if not len(harms):
harms = np.array([x+2 for x in range(nharmonics)])
### Loop over harmonics and add them to the filter
for n in harms:
harm_ind = np.argmin( np.abs(n * drive_freq - freqs) )
drivefilt[harm_ind] = 1.0+0.0j
errfilt[harm_ind] = 0.0+0.0j
if width:
h_lower_ind = harm_ind - (fund_ind - lower_ind)
h_upper_ind = harm_ind + (upper_ind - fund_ind)
drivefilt[h_lower_ind:h_upper_ind+1] = drivefilt[harm_ind]
if correct_phase_shift:
phase_shift = np.angle(respfft[fund_ind]) - np.angle(drivefft[fund_ind])
drivefilt2 = drivefilt * np.exp(-1.0j * phase_shift)
else:
drivefilt2 = np.copy(drivefilt)
if add_mean:
drivefilt[0] = 1.0+0.0j
drivefilt2[0] = 1.0+0.0j
### Apply the filter to both drive and response
drivefft_filt = drivefilt * drivefft
respfft_filt = drivefilt2 * respfft
errfft_filt = errfilt * respfft
### Reconstruct the filtered data
drive_r = np.fft.irfft(drivefft_filt)
resp_r = np.fft.irfft(respfft_filt)
err_r = np.fft.irfft(errfft_filt)
### Sort reconstructed data, interpolate and resample
mindrive = np.min(drive_r)
maxdrive = np.max(drive_r)
grad = np.gradient(drive_r)
sortinds = drive_r.argsort()
drive_r = drive_r[sortinds]
resp_r = resp_r[sortinds]
err_r = err_r[sortinds]
if grad_sign < 0:
ginds = grad[sortinds] < 0
elif grad_sign > 0:
ginds = grad[sortinds] > 0
elif grad_sign == 0.0:
ginds = np.ones(len(grad[sortinds]), dtype=np.bool)
bin_spacing = (maxdrive - mindrive) * (1.0 / nbins)
drivevec = np.linspace(mindrive+0.5*bin_spacing, maxdrive-0.5*bin_spacing, nbins)
### This part is slow, don't really know the best way to fix that....
respvec = []
errvec = []
for bin_loc in drivevec:
inds = (drive_r[ginds] >= bin_loc - 0.5*bin_spacing) * \
(drive_r[ginds] < bin_loc + 0.5*bin_spacing)
val = np.mean( resp_r[ginds][inds] )
err_val = np.mean( err_r[ginds][inds] )
respvec.append(val)
errvec.append(err_val)
respvec = np.array(respvec)
errvec = np.array(errvec)
#plt.plot(drive_r, resp_r)
#plt.plot(drive_r[ginds], resp_r[ginds], linewidth=2)
#plt.plot(drive_r[np.invert(ginds)], resp_r[np.invert(ginds)], linewidth=2)
#plt.plot(drivevec, respvec, linewidth=5)
#plt.show()
if sg_filter:
respvec = signal.savgol_filter(respvec, sg_params[0], sg_params[1])
if plot:
plt.figure()
drive_asd = np.abs(drivefft)
resp_asd = np.abs(respfft)
plt.loglog(freqs, drive_asd / np.max(drive_asd), label='Drive')
plt.loglog(freqs, resp_asd / np.max(resp_asd), label='Response')
plt.loglog(freqs[np.abs(drivefilt)>0], \
resp_asd[np.abs(drivefilt)>0] / np.max(resp_asd),
'X', label='Filter', ms=10)
plt.xlabel('Frequency [Hz]')
plt.ylabel('ASD [arb.]')
plt.legend()
plt.figure()
plt.errorbar(drivevec, respvec, yerr=errvec, ls='', marker='o', ms=6)
plt.xlabel('Drive units')
plt.ylabel('Response units')
plt.show()
return drivevec, respvec, errvec
def rebin(xvec, yvec, errs=[], nbin=500, plot=False, correlated_errs=False):
'''Slow and derpy function to re-bin based on averaging. Works
with any value of nbins, but can be slow since it's a for loop.'''
if len(errs):
assert len(errs) == len(yvec), 'error vec is not the right length'
if nbin > 0.25 * len(xvec):
nbin = int(0.25 * len(xvec))
lenx = np.max(xvec) - np.min(xvec)
dx = lenx / nbin
xvec_new = np.linspace(np.min(xvec)+0.5*dx, np.max(xvec)-0.5*dx, nbin)
yvec_new = np.zeros_like(xvec_new)
errs_new = np.zeros_like(xvec_new)
for xind, x in enumerate(xvec_new):
if x != xvec_new[-1]:
inds = (xvec >= x - 0.5*dx) * (xvec < x + 0.5*dx)
else:
inds = (xvec >= x - 0.5*dx) * (xvec <= x + 0.5*dx)
if len(errs):
errs_new[xind] = np.sqrt( np.mean(errs[inds]**2))
else:
if correlated_errs:
errs_new[xind] = np.std(yvec[inds])
else:
errs_new[xind] = np.std(yvec[inds]) / np.sqrt(np.sum(inds))
yvec_new[xind] = np.mean(yvec[inds])
if plot:
plt.scatter(xvec, yvec, color='C0')
plt.errorbar(xvec_new, yvec_new, yerr=errs_new, fmt='o', color='C1')
plt.show()
return xvec_new, yvec_new, errs_new
def rebin_mean(a, *args):
'''Uses a technique based on a scipy cookbook to do vectorized
rebinning with the "evList" technique, which some consider
'ugly' in implementation:
https://scipy-cookbook.readthedocs.io.items/Rebinning.html
An arbitrarily shaped array a can be rebinned into a shape
given by *args. Output will have shape (args[0], args[1], ....),
with the caveat the ratio (points in the oversampled array) /
(points in the rebinned array) has to be an integer, much like
a downsampling factor. Elements of the rebinned array are the
mean of points within the appropriate window.
Needs to be applied separately for xvec and yvec'''
shape = a.shape
lenShape = len(shape)
factor = (np.asarray(shape)/np.asarray(args)).astype(int)
evList = ['a.reshape('] + \
['args[%d],factor[%d],'%(i,i) for i in range(lenShape)] + \
[')'] + ['.mean(%d)'%(i+1) for i in range(lenShape)]
#print ''.join(evList)
return eval(''.join(evList))
def rebin_std(a, *args):
'''Refer to rebin_mean() docstring. Not sure why, but this one
seems to have trouble with more than 1D input arrays.'''
shape = a.shape
lenShape = len(shape)
factor = (np.asarray(shape)/np.asarray(args)).astype(int)
evList = ['a.reshape('] + \
['args[%d],factor[%d],'%(i,i) for i in range(lenShape)] + \
[')'] + ['.std(%d)/np.sqrt(factor[%d])'%(i+1,i) for i in range(lenShape)]
return eval(''.join(evList))
def rebin_vectorized(xvec, yvec, nbin, model=None):
'''Takes a vector (1D numpy array) a and rebins it to size nbin,
with the caveats stated in rebin_mean() and rebin_std() docstrings.
If the underlying data should follow a model, this first fits the data
to said model and rebins the residuals to determine the appropriate
rebinned error array.'''
nbin_int = int(nbin)
xvec_rb = rebin_mean(xvec, nbin_int)
yvec_rb = rebin_mean(yvec, nbin_int)
if model is not None:
popt, pcov = opti.curve_fit(model, np.arange(nbin_int), yvec_rb)
resid = yvec - model(np.linspace(0, nbin_int-1, len(yvec)), *popt)
yvec_err_rb = rebin_std(resid, nbin_int)
else:
yvec_err_rb = rebin_std(yvec, nbin_int)
return xvec_rb, yvec_rb, yvec_err_rb
def correlation(drive, response, fsamp, fdrive, filt = False, band_width = 1):
'''Compute the full correlation between drive and response,
correctly normalized for use in step-calibration.
INPUTS: drive, drive signal as a function of time
response, resposne signal as a function of time
fsamp, sampling frequency
fdrive, predetermined drive frequency
filt, boolean switch for bandpass filtering
band_width, bandwidth in [Hz] of filter
OUTPUTS: corr_full, full and correctly normalized correlation'''
### First subtract of mean of signals to avoid correlating dc
drive = drive-np.mean(drive)
response = response-np.mean(response)
### bandpass filter around drive frequency if desired.
if filt:
b, a = signal.butter(3, [2.*(fdrive-band_width/2.)/fsamp, \
2.*(fdrive+band_width/2.)/fsamp ], btype = 'bandpass')
drive = signal.filtfilt(b, a, drive)
response = signal.filtfilt(b, a, response)
### Compute the number of points and drive amplitude to normalize correlation
lentrace = len(drive)
drive_amp = np.sqrt(2)*np.std(drive)
### Define the correlation vector which will be populated later
corr = np.zeros(int(fsamp/fdrive))
### Zero-pad the response
response = np.append(response, np.zeros(int(fsamp / fdrive) - | |
assert_pint_array_equal(arr1, arr2):
assert_array_equal(arr1.magnitude, arr2.magnitude)
assert str(arr1.units) == str(arr2.units)
if isinstance(_pint.UnitRegistry, NotAModule):
return
ureg = _pint.UnitRegistry()
p_arr = np.arange(10) * ureg.km / ureg.year
yt_arr = unyt_array(np.arange(10), "km/yr")
yt_arr2 = unyt_array.from_pint(p_arr)
p_quan = 10.0 * ureg.g ** 0.5 / (ureg.mm ** 3)
yt_quan = unyt_quantity(10.0, "sqrt(g)/mm**3")
yt_quan2 = unyt_quantity.from_pint(p_quan)
assert_pint_array_equal(p_arr, yt_arr.to_pint())
assert_array_equal(yt_arr, unyt_array.from_pint(p_arr))
assert_array_equal(yt_arr, yt_arr2)
assert_pint_array_equal(p_quan, yt_quan.to_pint())
assert_equal(yt_quan, unyt_quantity.from_pint(p_quan))
assert_equal(yt_quan, yt_quan2)
assert_array_equal(yt_arr, unyt_array.from_pint(yt_arr.to_pint()))
assert_equal(yt_quan, unyt_quantity.from_pint(yt_quan.to_pint()))
def test_subclass():
class unyt_a_subclass(unyt_array):
def __new__(
cls, input_array, units=None, registry=None, bypass_validation=None
):
return super(unyt_a_subclass, cls).__new__(
cls,
input_array,
units,
registry=registry,
bypass_validation=bypass_validation,
)
a = unyt_a_subclass([4, 5, 6], "g")
b = unyt_a_subclass([7, 8, 9], "kg")
nu = unyt_a_subclass([10, 11, 12], "")
nda = np.array([3, 4, 5])
yta = unyt_array([6, 7, 8], "mg")
loq = [unyt_quantity(6, "mg"), unyt_quantity(7, "mg"), unyt_quantity(8, "mg")]
ytq = unyt_quantity(4, "cm")
ndf = np.float64(3)
def op_comparison(op, inst1, inst2, compare_class):
assert_isinstance(op(inst1, inst2), compare_class)
assert_isinstance(op(inst2, inst1), compare_class)
ops = [operator.mul, operator.truediv]
for op in ops:
for inst in (b, ytq, ndf, yta, nda, loq):
op_comparison(op, a, inst, unyt_a_subclass)
op_comparison(op, ytq, nda, unyt_array)
op_comparison(op, ytq, yta, unyt_array)
for op in (operator.add, operator.sub):
op_comparison(op, nu, nda, unyt_a_subclass)
op_comparison(op, a, b, unyt_a_subclass)
op_comparison(op, a, yta, unyt_a_subclass)
op_comparison(op, a, loq, unyt_a_subclass)
assert_isinstance(a[0], unyt_quantity)
assert_isinstance(a[:], unyt_a_subclass)
assert_isinstance(a[:2], unyt_a_subclass)
assert_isinstance(unyt_a_subclass(yta), unyt_a_subclass)
assert_isinstance(a.to("kg"), unyt_a_subclass)
assert_isinstance(a.copy(), unyt_a_subclass)
assert_isinstance(copy.deepcopy(a), unyt_a_subclass)
with pytest.raises(RuntimeError):
a + "hello"
def test_h5_io():
if isinstance(_h5py.__version__, NotAModule):
return
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
reg = UnitRegistry()
reg.add("code_length", 10.0, dimensions.length)
warr = unyt_array(np.random.random((256, 256)), "code_length", registry=reg)
warr.write_hdf5("test.h5")
iarr = unyt_array.from_hdf5("test.h5")
assert_equal(warr, iarr)
assert_equal(warr.units.registry["code_length"], iarr.units.registry["code_length"])
# test code to overwrite existing dataset
warr.write_hdf5("test.h5")
giarr = unyt_array.from_hdf5("test.h5")
assert_equal(warr, giarr)
# test code to overwrite existing dataset with data that has a different
# shape
warr = unyt_array(np.random.random((255, 255)), "code_length", registry=reg)
warr.write_hdf5("test.h5")
giarr = unyt_array.from_hdf5("test.h5")
assert_equal(warr, giarr)
os.remove("test.h5")
# write to a group that doesn't exist
warr.write_hdf5(
"test.h5", dataset_name="test_dset", group_name="/arrays/test_group"
)
giarr = unyt_array.from_hdf5(
"test.h5", dataset_name="test_dset", group_name="/arrays/test_group"
)
assert_equal(warr, giarr)
os.remove("test.h5")
# write to a group that does exist
with _h5py.File("test.h5", "a") as f:
f.create_group("/arrays/test_group")
warr.write_hdf5(
"test.h5", dataset_name="test_dset", group_name="/arrays/test_group"
)
giarr = unyt_array.from_hdf5(
"test.h5", dataset_name="test_dset", group_name="/arrays/test_group"
)
assert_equal(warr, giarr)
os.remove("test.h5")
os.chdir(curdir)
shutil.rmtree(tmpdir)
def test_equivalencies():
import unyt as u
# equivalence is ignored if the conversion doesn't need one
data = 12.0 * u.g
data.convert_to_equivalent("kg", None)
assert data.value == 0.012
assert data.units == u.kg
data = 12.0 * u.g
data = data.to_equivalent("kg", None)
assert data.value == 0.012
assert data.units == u.kg
# incorrect usage of an equivalence raises errors
with pytest.raises(InvalidUnitEquivalence):
data.convert_to_equivalent("erg", "thermal")
with pytest.raises(InvalidUnitEquivalence) as excinfo:
data.convert_to_equivalent("m", "mass_energy")
assert (
str(excinfo.value)
== "The unit equivalence 'mass_energy: mass <-> energy' does not "
"exist for units 'kg' to convert to a new unit with dimensions "
"'(length)'."
)
with pytest.raises(InvalidUnitEquivalence):
data.to_equivalent("erg", "thermal")
with pytest.raises(InvalidUnitEquivalence):
data.to_equivalent("m", "mass_energy")
# Mass-energy
mp = u.mp.copy()
mp.convert_to_units("keV", "mass_energy")
assert_allclose_units(u.mp.in_units("keV", "mass_energy"), mp)
assert_allclose_units(mp, u.mp * u.clight * u.clight)
assert_allclose_units(u.mp, mp.in_units("g", "mass_energy"))
mp.convert_to_units("g", "mass_energy")
assert_allclose_units(u.mp, mp)
# Thermal
T = 1e8 * u.K
E = T.in_units("W*hr", "thermal")
assert_allclose_units(E, (u.kboltz * T).in_units("W*hr"))
assert_allclose_units(T, E.in_units("K", "thermal"))
T.convert_to_units("W*hr", "thermal")
assert_allclose_units(E, T)
T.convert_to_units("K", "thermal")
assert_allclose_units(T, 1e8 * u.K)
# Spectral
# wavelength to frequency
lam = 4000 * u.angstrom
nu = lam.in_units("Hz", "spectral")
assert_allclose_units(nu, u.clight / lam)
lam.convert_to_units("MHz", "spectral")
assert_allclose_units(lam, nu)
assert lam.units == u.MHz.units
assert nu.units == u.Hz.units
# wavelength to photon energy
lam = 4000 * u.angstrom
hnu = lam.in_units("erg", "spectral")
assert_allclose_units(hnu, u.h_mks * u.clight / lam)
lam.convert_to_units("eV", "spectral")
assert_allclose_units(lam, hnu)
assert lam.units == u.eV.units
assert hnu.units == u.erg.units
# wavelength to spatial frequency
lam = 4000 * u.angstrom
nubar = lam.in_units("1/angstrom", "spectral")
assert_allclose_units(nubar, 1 / lam)
lam.convert_to_units("1/cm", "spectral")
assert_allclose_units(lam, nubar)
assert lam.units == (1 / u.cm).units
assert nubar.units == (1 / u.angstrom).units
# frequency to wavelength
nu = 1.0 * u.MHz
lam = nu.to("km", "spectral")
assert_allclose_units(lam, u.clight / nu)
nu.convert_to_units("m", "spectral")
assert_allclose_units(lam, nu)
assert lam.units == u.km.units
assert nu.units == u.m.units
# frequency to spatial frequency
nu = 1.0 * u.MHz
nubar = nu.to("1/km", "spectral")
assert_allclose_units(nubar, nu / u.clight)
nu.convert_to_units("1/m", "spectral")
assert_allclose_units(nubar, nu)
assert nubar.units == (1 / u.km).units
assert nu.units == (1 / u.m).units
# frequency to photon energy
nu = 1.0 * u.MHz
E = nu.to("erg", "spectral")
assert_allclose_units(E, u.h_mks * nu)
nu.convert_to_units("J", "spectral")
assert_allclose_units(nu, E)
assert nu.units == u.J.units
assert E.units == u.erg.units
# photon energy to frequency
E = 13.6 * u.eV
nu = E.to("Hz", "spectral")
assert_allclose_units(nu, E / u.h_mks)
E.convert_to_units("MHz", "spectral")
assert_allclose_units(nu, E)
assert E.units == u.MHz.units
assert nu.units == u.Hz.units
# photon energy to wavelength
E = 13.6 * u.eV
lam = E.to("nm", "spectral")
assert_allclose_units(lam, u.h_mks * u.clight / E)
E.convert_to_units("angstrom", "spectral")
assert_allclose_units(E, lam)
assert E.units == u.angstrom.units
assert lam.units == u.nm.units
# photon energy to spatial frequency
E = 13.6 * u.eV
nubar = E.to("1/nm", "spectral")
assert_allclose_units(nubar, E / (u.h_mks * u.clight))
E.convert_to_units("1/angstrom", "spectral")
assert_allclose_units(E, nubar)
assert E.units == (1 / u.angstrom).units
assert nubar.units == (1 / u.nm).units
# spatial frequency to frequency
nubar = 1500.0 / u.cm
nu = nubar.to("Hz", "spectral")
assert_allclose_units(nu, nubar * u.clight)
nubar.convert_to_units("MHz", "spectral")
assert_allclose_units(nu, nubar)
assert nubar.units == u.MHz.units
assert nu.units == u.Hz.units
# spatial frequency to wavelength
nubar = 1500.0 / u.cm
lam = nubar.to("nm", "spectral")
assert_allclose_units(lam, 1 / nubar)
nubar.convert_to_units("angstrom", "spectral")
assert_allclose_units(nubar, lam)
assert nubar.units == u.angstrom.units
assert lam.units == u.nm.units
# spatial frequency to photon energy
nubar = 1500.0 / u.cm
E = nubar.to("erg", "spectral")
assert_allclose_units(E, u.h_mks * u.clight * nubar)
nubar.convert_to_units("J", "spectral")
assert_allclose_units(nubar, E)
assert nubar.units == u.J.units
assert E.units == u.erg.units
# Sound-speed
# tempearature <-> velocity
mu = 0.6
gg = 5.0 / 3.0
T = 1e8 * u.K
c_s = T.in_units("km/s", equivalence="sound_speed")
assert_allclose_units(c_s, np.sqrt(gg * u.kboltz * T / (mu * u.mh)))
assert_allclose_units(T, c_s.in_units("K", "sound_speed"))
T.convert_to_units("m/s", "sound_speed")
assert_allclose_units(c_s, T)
assert T.units == u.m.units / u.s.units
assert c_s.units == u.km.units / u.s.units
mu = 0.5
gg = 4.0 / 3.0
T = 1e8 * u.K
c_s = T.in_units("km/s", "sound_speed", mu=mu, gamma=gg)
assert_allclose_units(c_s, np.sqrt(gg * u.kboltz * T / (mu * u.mh)))
assert_allclose_units(T, c_s.in_units("K", "sound_speed", mu=mu, gamma=gg))
T.convert_to_units("m/s", "sound_speed", mu=mu, gamma=gg)
assert_allclose_units(c_s, T)
assert T.units == u.m.units / u.s.units
assert c_s.units == u.km.units / u.s.units
# tempearture <-> energy
mu = 0.5
gg = 4.0 / 3.0
T = 1e8 * u.K
kT = T.in_units("eV", "sound_speed", mu=mu, gamma=gg)
assert_allclose_units(kT, u.kboltz * T)
T.convert_to_units("erg", "sound_speed", mu=mu, gamma=gg)
assert_allclose_units(T, kT)
assert T.units == u.erg.units
assert kT.units == u.eV.units
assert_allclose_units(T.in_units("K", "sound_speed", mu=mu, gamma=gg), 1e8 * u.K)
kT.convert_to_units("K", "sound_speed", mu=mu, gamma=gg)
assert_allclose_units(kT, 1e8 * u.K)
# velocity <-> energy
c_s = 300 * u.m / u.s
kT = c_s.in_units("erg", "sound_speed", mu=mu, gamma=gg)
assert_allclose_units(kT, c_s ** 2 * mu * u.mh / gg)
c_s.convert_to_units("J", "sound_speed", mu=mu, gamma=gg)
assert_allclose_units(c_s, kT)
assert c_s.units == u.J.units
assert kT.units == u.erg.units
assert_allclose_units(
kT.in_units("m/s", "sound_speed", mu=mu, gamma=gg), 300 * u.m / u.s
)
c_s.convert_to_units("m/s", "sound_speed", mu=mu, gamma=gg)
assert_allclose_units(c_s, 300 * u.m / u.s)
# Lorentz
v = 0.8 * u.clight
g = v.in_units("dimensionless", "lorentz")
g2 = unyt_quantity(1.0 / np.sqrt(1.0 - 0.8 * 0.8), "dimensionless")
assert_allclose_units(g, g2)
v.convert_to_units("", "lorentz")
assert_allclose_units(v, g2)
v.convert_to_units("c", "lorentz")
v2 = g2.in_units("mile/hr", "lorentz")
assert_allclose_units(v2, v.in_units("mile/hr"))
# Schwarzschild
msun = 1.0 * u.unit_symbols.Msun
msun.convert_to_equivalent("km", "schwarzschild")
R = u.mass_sun_mks.in_units("kpc", "schwarzschild")
assert_allclose_units(msun, R)
assert_allclose_units(R.in_mks(), 2 * u.G * u.mass_sun_mks / (u.clight ** 2))
assert_allclose_units(u.mass_sun_mks, R.in_units("kg", "schwarzschild"))
R.convert_to_units("Msun", "schwarzschild")
assert_allclose_units(u.mass_sun_mks, R)
assert R.units == u.unit_symbols.Msun.units
assert msun.units == u.km.units
# Compton
me = 1.0 * u.me
me.convert_to_units("nm", "compton")
length = u.me.in_units("angstrom", "compton")
assert_allclose_units(length, me)
assert_allclose_units(length, u.h_mks / (u.me * u.clight))
assert_allclose_units(u.me, length.in_units("g", "compton"))
assert me.units == u.nm.units
assert length.units == u.angstrom.units
me.convert_to_units("me", "compton")
assert_almost_equal(me.value, 1.0)
# Number density
rho = u.mp / u.m ** 3
n = rho.in_units("m**-3", "number_density")
assert_allclose_units(n, rho / (u.mh * 0.6))
assert_allclose_units(rho, n.in_units("kg/m**3", "number_density"))
rho.convert_to_units("cm**-3", "number_density")
assert rho.units == (1 / u.cm ** 3).units
assert n.units == (1 / u.m ** 3).units
assert_allclose_units(n, rho)
rho.convert_to_units("kg/m**3", "number_density")
assert_allclose_units(u.mp / u.m ** 3, rho)
assert rho.units == (u.kg / u.m ** 3).units
rho = u.mp / u.m ** 3
n = rho.in_units("m**-3", equivalence="number_density", mu=0.75)
assert_allclose_units(n, rho / (u.mh * 0.75))
assert_allclose_units(
| |
# -*- python -*-
import os.path
import identifier
import string
from typeinfo import GetTypeInformation, VoidType, ternary
from omniidl import idlast, idlvisitor, idlutil, idltype, output
# For compatibility with older Pythons...
def zip(a1, a2):
l = min(len(a1), len(a2))
az = []
for i in range(0, l):
az.append((a1[i], a2[i]))
return az
class CGRSWalker(idlvisitor.AstVisitor):
"""Walks over the AST once and writes the CGRS code in the process"""
def visitAST(self, node):
"""Visit the top-level AST"""
self.outputInNamespaces = []
self.inputInNamespaces = ['CDA', 'CGRS']
self.cxx.out('// This file is automatically generated; do not edit.')
self.module = node.filebase
self.cxx.out('#include <exception>')
self.cxx.out('#include "Utilities.hxx"')
self.cxx.out('#include "IfaceCGRS.hxx"')
self.cxx.out('#include "CGRSImplementation.hpp"')
self.cxx.out('#include "CGRSBootstrap.hpp"')
self.cxx.out('#include "Iface' + self.module + '.hxx"')
for [k, v] in self.specialIncludes:
if k == self.module:
self.cxx.out('#include "%s"' % v)
for n in node.declarations():
n.accept(self)
self.leaveAllNamespaces()
self.cxx.out('void init_cgrsmodule_%s()' % self.module)
self.cxx.out('{')
self.cxx.inc_indent()
self.cxx.out('ObjRef<CDA_GenericsService> cgs = CreateGenericsServiceInternal();')
allMods = []
newMods = filter(lambda x: isinstance(x, idlast.Module), node.declarations())
allMods = allMods + newMods
while newMods != []:
newMods = reduce(lambda x, y: x + y, \
map(lambda x: filter(lambda y: (isinstance(y, idlast.Module)), \
x.definitions()), newMods), [])
allMods = allMods + newMods
allMods = filter(lambda x: x.mainFile(), allMods)
allIfaces = reduce(lambda x, y: x + y, \
map(lambda x: filter(lambda y: isinstance(y, idlast.Interface), x.definitions()), \
allMods), [])
allIfaceNames = map(lambda x: x.corbacxxscoped, allIfaces)
for n in allIfaces:
self.cxx.out('{')
self.cxx.inc_indent()
self.cxx.out('RETURN_INTO_OBJREF(iftmp, CDA::CGRS::%s, new CDA::CGRS::%s());' % (n.corbacxxscoped, n.corbacxxscoped))
self.cxx.out('cgs->registerInterface("%s", iftmp);' % n.corbacxxscoped)
self.cxx.dec_indent()
self.cxx.out('}')
for n in self.bootstrapSpecials:
if n[0] in allIfaceNames:
self.cxx.out('{')
self.cxx.inc_indent()
self.cxx.out('ObjRef<iface::%s> bstmp = %s();' % (n[0], n[1]))
self.cxx.out('ObjRef<iface::CGRS::GenericValue> objtmp = cgs->makeObject(bstmp);')
self.cxx.out('cgs->registerBootstrap("%s", objtmp);' % n[1])
self.cxx.dec_indent()
self.cxx.out('}')
allEnums = reduce(lambda x, y: x + y, \
map(lambda x: filter(lambda y: isinstance(y, idlast.Enum), x.definitions()), \
allMods) +\
map(lambda x: filter(lambda y: isinstance(y, idlast.Enum), x.declarations()), \
allIfaces), [])
for n in allEnums:
self.cxx.out('{')
self.cxx.inc_indent()
self.cxx.out('RETURN_INTO_OBJREF(typ, CDA::CGRS::%s, new CDA::CGRS::%s());' %\
(n.corbacxxscoped, n.corbacxxscoped))
self.cxx.out('cgs->registerType(typ);')
self.cxx.dec_indent()
self.cxx.out('}')
self.cxx.dec_indent()
self.cxx.out('}')
def visitModule(self, node):
"""Visit all the definitions in a module."""
self.inputInNamespaces.append(node.simplename)
for n in node.definitions():
if n.mainFile():
n.accept(self)
self.inputInNamespaces.pop()
def enterAllNamespaces(self):
if self.inputInNamespaces == self.outputInNamespaces:
return
try:
keepDepth = map(lambda (a, b): a != b, zip(self.inputInNamespaces, self.outputInNamespaces)).index(1)
except ValueError:
keepDepth = min(len(self.inputInNamespaces), len(self.outputInNamespaces))
for i in range (0, len(self.outputInNamespaces) - keepDepth):
self.cxx.dec_indent()
self.cxx.out('};')
for ns in self.inputInNamespaces[keepDepth:]:
self.cxx.out('namespace %s' % ns)
self.cxx.out('{')
self.cxx.inc_indent()
self.outputInNamespaces = self.inputInNamespaces
def leaveAllNamespaces(self):
for i in self.outputInNamespaces:
self.cxx.dec_indent()
self.cxx.out('};')
self.outputInNamespaces = []
def visitForward(self, node):
pass
def recursivelyFindAncestralBases(self, node):
ret = []
for inh in node.inherits():
# It seems inherits can return other things...
if isinstance(inh, idlast.Interface):
ret.append(inh)
ret = ret + self.recursivelyFindAncestralBases(inh)
return ret
def visitInterface(self, node):
self.iface = node.corbacxxscoped
self.enterAllNamespaces()
hasCallback = 0
for p in node.pragmas(): hasCallback = hasCallback or (p.text() == "user-callback")
if hasCallback:
self.cxx.out('class Callback%s' % node.simplename)
self.cxx.out(' : public %s, public CGRSCallback' % node.simplecxxscoped)
self.cxx.out('{')
self.cxx.out('public:')
self.cxx.inc_indent()
self.cxx.out('Callback%s(iface::CGRS::CallbackObjectValue* aValue)' % node.simplename)
self.cxx.out(' : mValue(aValue)')
self.cxx.out('{')
self.cxx.out('}')
self.cxx.out('already_AddRefd<iface::CGRS::CallbackObjectValue> unwrap() { mValue->add_ref(); return mValue.getPointer(); }')
self.generateCallbackFunctions(node, {})
self.cxx.dec_indent()
self.cxx.out('private:')
self.cxx.inc_indent()
self.cxx.out('ObjRef<iface::CGRS::CallbackObjectValue> mValue;')
self.cxx.dec_indent()
self.cxx.out('};')
self.cxx.out('class %s' % node.simplename)
self.cxx.out(' : public CDA_GenericInterfaceBase')
self.cxx.out('{')
self.cxx.out('public:')
self.cxx.inc_indent()
self.cxx.out('%s() {}' % node.simplename)
self.cxx.out('~%s() {}' % node.simplename)
self.cxx.out('CDA_IMPL_ID;')
self.cxx.out('CDA_IMPL_REFCOUNT;')
self.cxx.out('CDA_IMPL_QI1(CGRS::GenericInterface);')
bases = self.recursivelyFindAncestralBases(node)
self.cxx.out('int32_t baseCount() throw() { return %d; }' % len(bases))
self.cxx.out('already_AddRefd<iface::CGRS::GenericInterface> getBase(int32_t aBaseName)')
self.cxx.out(' throw(std::exception&)')
self.cxx.out('{')
self.cxx.inc_indent()
self.cxx.out('ObjRef<CDA_GenericsService> cgs = CreateGenericsServiceInternal();')
self.cxx.out('switch (aBaseName) {')
self.cxx.inc_indent()
for (i, b) in zip(range(0,len(bases)), bases):
self.cxx.out('case %d: return cgs->getInterfaceByName("%s");' % (i, b.simplecxxscoped))
self.cxx.out('default: throw iface::CGRS::CGRSError();')
self.cxx.dec_indent()
self.cxx.out('}') # Close switch
self.cxx.dec_indent()
self.cxx.out('}') # Close getBase()
# Do nested enums before we declare remainder private:
for n in node.contents():
if isinstance(n, idlast.Enum):
n.accept(self)
# Now define classes (within this class) for attributes and operations...
self.cxx.dec_indent()
self.cxx.out('private:')
self.cxx.inc_indent()
self.supportedAttributes = []
self.supportedOperations = []
for n in node.contents():
if isinstance(n, idlast.Attribute) or isinstance(n, idlast.Operation):
n.accept(self)
self.cxx.dec_indent()
self.cxx.out('public:')
self.cxx.inc_indent()
self.cxx.out('int32_t attributeCount() throw(std::exception&) { return %d; }' % len(self.supportedAttributes))
self.cxx.out('int32_t operationCount() throw(std::exception&) { return %d; }' % len(self.supportedOperations))
self.cxx.out('already_AddRefd<iface::CGRS::GenericAttribute>')
self.cxx.out('getAttributeByIndex(int32_t aAttributeNumber) throw(std::exception&)')
self.cxx.out('{')
self.cxx.inc_indent()
self.cxx.out('switch (aAttributeNumber)')
self.cxx.out('{')
self.cxx.inc_indent()
for (i, b) in zip(range(0,len(self.supportedAttributes)), self.supportedAttributes):
self.cxx.out('case %d: return new CDA::CGRS::%s::attr%s();' % (i, node.corbacxxscoped, b))
self.cxx.out('default: throw iface::CGRS::CGRSError();')
self.cxx.dec_indent()
self.cxx.out('}')
self.cxx.dec_indent()
self.cxx.out('}')
self.cxx.out('already_AddRefd<iface::CGRS::GenericMethod>')
self.cxx.out('getOperationByIndex(int32_t aOperationNumber) throw(std::exception&)')
self.cxx.out('{')
self.cxx.inc_indent()
self.cxx.out('switch (aOperationNumber)')
self.cxx.out('{')
self.cxx.inc_indent()
for (i, b) in zip(range(0,len(self.supportedOperations)), self.supportedOperations):
self.cxx.out('case %d: return new CDA::CGRS::%s::meth%s();' % (i, node.corbacxxscoped, b))
self.cxx.out('default: throw iface::CGRS::CGRSError();')
self.cxx.dec_indent()
self.cxx.out('}')
self.cxx.dec_indent()
self.cxx.out('}')
self.cxx.out('already_AddRefd<iface::CGRS::GenericAttribute>')
self.cxx.out('getAttributeByName(const std::string& aAttributeName) throw(std::exception&)')
self.cxx.out('{')
self.cxx.inc_indent()
elseV = ''
for b in self.supportedAttributes:
self.cxx.out('%sif (aAttributeName == "%s") return new CDA::CGRS::%s::attr%s();' % (elseV, b, node.corbacxxscoped, b))
elseV = 'else '
self.cxx.out('throw iface::CGRS::CGRSError();')
self.cxx.dec_indent()
self.cxx.out('}')
self.cxx.out('already_AddRefd<iface::CGRS::GenericMethod>')
self.cxx.out('getOperationByName(const std::string& aOperationName) throw(std::exception&)')
self.cxx.out('{')
self.cxx.inc_indent()
elseV = ''
for b in self.supportedOperations:
self.cxx.out('%sif (aOperationName == "%s") return new CDA::CGRS::%s::meth%s();' % (elseV, b, node.corbacxxscoped, b))
elseV = 'else '
self.cxx.out('throw iface::CGRS::CGRSError();')
self.cxx.dec_indent()
self.cxx.out('}')
self.cxx.out('void* makeCallbackProxy(iface::CGRS::CallbackObjectValue* aValue);')
self.cxx.dec_indent()
self.cxx.out('};') # Close class
self.leaveAllNamespaces()
self.cxx.out('void* CDA::CGRS::%s::makeCallbackProxy(iface::CGRS::CallbackObjectValue* aValue)' % node.corbacxxscoped)
self.cxx.out('{')
if hasCallback:
self.cxx.out(' return reinterpret_cast<void*>(static_cast<%s*>(new Callback%s(aValue)));' %\
(node.simplecxxscoped, node.simplename))
else:
self.cxx.out(' return NULL;')
self.cxx.out('}')
def generateCallbackFunctions(self, node, seen):
if (seen.has_key(node.simplecxxscoped)): return
seen[node.simplecxxscoped] = 1
for d in node.inherits():
self.generateCallbackFunctions(d, seen)
blacklist = []
if node.simplecxxscoped == "iface::XPCOM::IObject":
self.cxx.out('CDA_IMPL_REFCOUNT;')
self.cxx.out('CDA_IMPL_ID;')
self.cxx.out('void* query_interface(const std::string& aTarget) throw()')
self.cxx.out('{')
self.cxx.inc_indent()
# Firstly check with the target...
self.cxx.out('std::vector<iface::CGRS::GenericValue*> valseq;')
self.cxx.out('bool wasExcept;')
self.cxx.out('ObjRef<iface::CGRS::GenericValue> qiRet = ')
self.cxx.out(' mValue->invokeOnInterface("XPCOM::IObject", "query_interface", valseq, valseq, &wasExcept);')
self.cxx.out('if (wasExcept) return NULL;')
self.cxx.out('ObjRef<CDA_GenericsService> cgs = CreateGenericsServiceInternal();')
self.cxx.out('ObjRef<iface::CGRS::GenericValue> vv = cgs->makeVoid();\n')
self.cxx.out('if (!CDA_objcmp(vv, qiRet)) return NULL;')
self.cxx.out('ObjRef<iface::CGRS::GenericInterface> gi = cgs->getInterfaceByName(aTarget);\n')
self.cxx.out('if (gi == NULL) return NULL;')
self.cxx.out('return static_cast<CDA_GenericInterfaceBase*>(gi.getPointer())->makeCallbackProxy(mValue);')
self.cxx.dec_indent()
self.cxx.out('}')
blacklist = ['add_ref', 'release_ref', 'query_interface', 'objid']
for d in node.callables():
if isinstance(d, idlast.Operation):
if d.simplename in blacklist: continue
global dv
dv = d
exception = ternary(d.raises() == [], lambda: "std::exception", lambda: dv.raises()[0].simplecxxscoped)
self.generateCallbackFunction(\
node.corbacxxscoped, d.simplename, exception, d.returnType(),\
map(lambda x: (x.paramType(), x.is_in(), x.is_out()),\
d.parameters()))
elif isinstance(d, idlast.Attribute):
for a in d.declarators():
if a.simplename in blacklist: continue
self.generateCallbackFunction(\
node.corbacxxscoped, a.simplename, 'std::exception', d.attrType(), [])
if not d.readonly():
self.generateCallbackFunction(\
node.corbacxxscoped, a.simplename, 'std::exception', None, [(d.attrType(), 1, 0)])
def generateCallbackFunction(self, ifaceName, name, exception, ret, argInfo):
global retv # Workaround for Python 1.x
retv = ret
rtype = ternary(ret == None, lambda: None, lambda: GetTypeInformation(retv))
global rtypev
rtypev = rtype
rname = ternary(rtype == None, lambda: "void", lambda: rtypev.cppReturnSignatureType)
argsig = ''
for (i, (argType, argIn, argOut)) in zip(range(0, len(argInfo)), argInfo):
argt = GetTypeInformation(argType)
if argsig != '':
argsig = argsig + ', '
global argtv
argtv = argt
argsig = '%s%s arg%d' % (argsig, ternary(argOut, lambda: argtv.cppOutSignatureType, lambda: argtv.cppInSignatureType), i)
self.cxx.out('%s %s(%s)' % (rname, name, argsig))
self.cxx.out(' throw(std::exception&)')
self.cxx.out('{')
self.cxx.inc_indent()
self.cxx.out('ObjRef<CDA_GenericsService> cgs = CreateGenericsServiceInternal();')
self.cxx.out('std::vector<iface::CGRS::GenericValue*> inValSeq;')
self.cxx.out('scoped_destroy<std::vector<iface::CGRS::GenericValue*> > inValSeqReleaser(inValSeq, ' +\
'new container_destructor<std::vector<iface::CGRS::GenericValue*> >(new objref_destructor<iface::CGRS::GenericValue>()));')
for (i, (argType, argIn, argOut)) in zip(range(0, len(argInfo)), argInfo):
if argIn:
self.cxx.out('{')
self.cxx.inc_indent()
argt = GetTypeInformation(argType)
# Note: Objects can actually have one of two incompatible interfaces - callback or normal.
self.cxx.out('iface::%s* genval;' % argt.genericIface)
self.cxx.out(argt.convertNativeToGeneric('%sarg%d' % (argt.deref(argOut), i), 'genval'))
self.cxx.out('inValSeq.push_back(genval);')
self.cxx.dec_indent()
self.cxx.out('}')
self.cxx.out('std::vector<iface::CGRS::GenericValue*> outValSeq;')
self.cxx.out('scoped_destroy<std::vector<iface::CGRS::GenericValue*> > outValSeqReleaser(outValSeq, new container_destructor<std::vector<iface::CGRS::GenericValue*> >(new objref_destructor<iface::CGRS::GenericValue>()));')
self.cxx.out('bool wasException = false;')
self.cxx.out('ObjRef<iface::CGRS::GenericValue> genret = mValue->invokeOnInterface("%s", "%s", inValSeq, outValSeq, &wasException);' % (ifaceName, name))
self.cxx.out('if (wasException) throw %s();' % exception)
self.cxx.out('std::vector<iface::CGRS::GenericValue*>::iterator outVali = outValSeq.begin();')
for (i, (argType, argIn, argOut)) in zip(range(0, len(argInfo)), argInfo):
if argOut:
argt = GetTypeInformation(argType)
self.cxx.out('DECLARE_QUERY_INTERFACE_OBJREF(genout%d, (*outVali), %s);' % (i, argt.genericIface))
self.cxx.out(argt.convertGenericToNative('genout%d' % i, '%sarg%d' % (argt.deref(1), i)))
self.cxx.out('outVali++;')
if rtype != None and not isinstance(rtype, VoidType):
self.cxx.out('DECLARE_QUERY_INTERFACE_OBJREF(genreti, genret, %s);' % rtype.genericIface)
self.cxx.out(rtype.makeStorage('retval'))
self.cxx.out(rtype.convertGenericToNative('genreti', 'retval'))
self.cxx.out(rtype.returnStorage('retval'))
self.cxx.dec_indent()
self.cxx.out('}')
def visitTypedef(self, node):
pass
def visitMember(self, node):
pass
def visitStruct(self, node):
raise "Structs are not supported"
def visitUnion(self, node):
raise "Unions are not supported"
def visitEnumerator(self, node):
pass
def visitEnum(self, node):
self.enterAllNamespaces()
self.cxx.out('class %s' % node.simplename)
self.cxx.out(' : public iface::CGRS::EnumType')
self.cxx.out('{')
self.cxx.out('public:')
self.cxx.inc_indent()
self.cxx.out('%s()' % node.simplename)
self.cxx.out('{')
self.cxx.inc_indent()
for n in node.enumerators():
self.cxx.out('mNameToInt.insert(std::pair<std::string, uint32_t>("%s", %lu));' %\
(n.identifier(), n.value()))
self.cxx.out('mIntToName.insert(std::pair<uint32_t, std::string>(%lu, "%s"));' %\
(n.value(), n.identifier()))
self.cxx.dec_indent()
self.cxx.out('}')
self.cxx.out('CDA_IMPL_ID;')
self.cxx.out('CDA_IMPL_REFCOUNT;')
self.cxx.out('CDA_IMPL_QI2(CGRS::EnumType, CGRS::GenericType);')
self.cxx.out('std::string asString() throw() { return "%s"; }' % node.corbacxxscoped)
self.cxx.out('int32_t maxIndex() throw() { return %d; }' % (len(node.enumerators()) - 1))
self.cxx.out('std::string indexToName(int32_t aIndex) throw(std::exception&)')
self.cxx.out('{')
self.cxx.inc_indent()
self.cxx.out('std::map<uint32_t, std::string>::iterator i = mIntToName.find(aIndex);')
self.cxx.out('if (i == mIntToName.end()) throw iface::CGRS::CGRSError();')
self.cxx.out('return (*i).second;')
self.cxx.dec_indent()
self.cxx.out('}')
self.cxx.out('int32_t nameToIndex(const std::string& aName) throw(std::exception&)')
self.cxx.out('{')
self.cxx.inc_indent()
self.cxx.out('std::map<std::string, uint32_t>::iterator i = | |
0.01960784, 0.03921569,
0.05882353, 0.07843137, 0.09803922, 0.11764706, 0.1372549 ,
0.15686275, 0.17647059, 0.19607843, 0.21568627, 0.23529412,
0.25490196, 0.2745098 , 0.29411765, 0.31372549, 0.33333333,
0.35294118, 0.37254902, 0.39215686, 0.41176471, 0.43137255,
0.45098039, 0.47058824, 0.49019608, 0.50980392, 0.52941176,
0.54901961, 0.56862745, 0.58823529, 0.60784314, 0.62745098,
0.64705882, 0.66666667, 0.68627451, 0.70588235, 0.7254902 ,
0.74509804, 0.76470588, 0.78431373, 0.80392157, 0.82352941,
0.84313725, 0.8627451 , 0.88235294, 0.90196078, 0.92156863,
0.94117647, 0.96078431, 0.98039216, 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 0.97564338, 0.94316789,
0.9106924 , 0.87821691, 0.84574142, 0.81326593, 0.78079044,
0.74831495, 0.71583946, 0.68336397, 0.65088848, 0.61841299,
0.5859375 ]),
array([ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0.00995711, 0.01991422, 0.02987132,
0.03982843, 0.04978554, 0.05974265, 0.06969975, 0.07965686,
0.08961397, 0.09957108, 0.10952819, 0.11948529, 0.1294424 ,
0.13939951, 0.14935662, 0.15931373, 0.16927083, 0.17922794,
0.18918505, 0.19914216, 0.20909926, 0.21905637, 0.22901348,
0.23897059, 0.2489277 , 0.2588848 , 0.26884191, 0.27879902,
0.28875613, 0.29871324, 0.30867034, 0.31862745, 0.32858456,
0.33854167, 0.34849877, 0.35845588, 0.36841299, 0.3783701 ,
0.38832721, 0.39828431, 0.40824142, 0.41819853, 0.42815564,
0.43811275, 0.44806985, 0.45802696, 0.46798407, 0.47794118,
0.48789828, 0.49785539, 0.5078125 , 0.51746324, 0.52711397,
0.53676471, 0.54641544, 0.55606618, 0.56571691, 0.57536765,
0.58501838, 0.59466912, 0.60431985, 0.61397059, 0.62362132,
0.63327206, 0.64292279, 0.65257353, 0.66222426, 0.671875 ,
0.68152574, 0.69117647, 0.70082721, 0.71047794, 0.72012868,
0.72977941, 0.73943015, 0.74908088, 0.75873162, 0.76838235,
0.77803309, 0.78768382, 0.79733456, 0.80698529, 0.81663603,
0.82628676, 0.8359375 , 0.84558824, 0.85523897, 0.86488971,
0.87454044, 0.88419118, 0.89384191, 0.90349265, 0.91314338,
0.92279412, 0.93244485, 0.94209559, 0.95174632, 0.96139706,
0.97104779, 0.98069853, 0.99034926, 1. , 0.99019608,
0.98039216, 0.97058824, 0.96078431, 0.95098039, 0.94117647,
0.93137255, 0.92156863, 0.91176471, 0.90196078, 0.89215686,
0.88235294, 0.87254902, 0.8627451 , 0.85294118, 0.84313725,
0.83333333, 0.82352941, 0.81372549, 0.80392157, 0.79411765,
0.78431373, 0.7745098 , 0.76470588, 0.75490196, 0.74509804,
0.73529412, 0.7254902 , 0.71568627, 0.70588235, 0.69607843,
0.68627451, 0.67647059, 0.66666667, 0.65686275, 0.64705882,
0.6372549 , 0.62745098, 0.61764706, 0.60784314, 0.59803922,
0.58823529, 0.57843137, 0.56862745, 0.55882353, 0.54901961,
0.53921569, 0.52941176, 0.51960784, 0.50980392, 0.5 ,
0.49019608, 0.48039216, 0.47058824, 0.46078431, 0.45098039,
0.44117647, 0.43137255, 0.42156863, 0.41176471, 0.40196078,
0.39215686, 0.38235294, 0.37254902, 0.3627451 , 0.35294118,
0.34313725, 0.33333333, 0.32352941, 0.31372549, 0.30392157,
0.29411765, 0.28431373, 0.2745098 , 0.26470588, 0.25490196,
0.24509804, 0.23529412, 0.2254902 , 0.21568627, 0.20588235,
0.19607843, 0.18627451, 0.17647059, 0.16666667, 0.15686275,
0.14705882, 0.1372549 , 0.12745098, 0.11764706, 0.10784314,
0.09803922, 0.08823529, 0.07843137, 0.06862745, 0.05882353,
0.04901961, 0.03921569, 0.02941176, 0.01960784, 0.00980392, 0. ]),
array([ 3.12500000e-01, 3.23223039e-01, 3.33946078e-01,
3.44669118e-01, 3.55392157e-01, 3.66115196e-01,
3.76838235e-01, 3.87561275e-01, 3.98284314e-01,
4.09007353e-01, 4.19730392e-01, 4.30453431e-01,
4.41176471e-01, 4.51899510e-01, 4.62622549e-01,
4.73345588e-01, 4.84068627e-01, 4.94791667e-01,
5.05514706e-01, 5.16237745e-01, 5.26960784e-01,
5.37683824e-01, 5.48406863e-01, 5.59129902e-01,
5.69852941e-01, 5.80575980e-01, 5.91299020e-01,
6.02022059e-01, 6.12745098e-01, 6.23468137e-01,
6.34191176e-01, 6.44914216e-01, 6.55637255e-01,
6.66360294e-01, 6.77083333e-01, 6.87806373e-01,
6.98529412e-01, 7.09252451e-01, 7.19975490e-01,
7.30698529e-01, 7.41421569e-01, 7.52144608e-01,
7.62867647e-01, 7.73590686e-01, 7.84313725e-01,
7.95036765e-01, 8.05759804e-01, 8.16482843e-01,
8.27205882e-01, 8.37928922e-01, 8.48651961e-01,
8.59375000e-01, 8.42524510e-01, 8.25674020e-01,
8.08823529e-01, 7.91973039e-01, 7.75122549e-01,
7.58272059e-01, 7.41421569e-01, 7.24571078e-01,
7.07720588e-01, 6.90870098e-01, 6.74019608e-01,
6.57169118e-01, 6.40318627e-01, 6.23468137e-01,
6.06617647e-01, 5.89767157e-01, 5.72916667e-01,
5.56066176e-01, 5.39215686e-01, 5.22365196e-01,
5.05514706e-01, 4.88664216e-01, 4.71813725e-01,
4.54963235e-01, 4.38112745e-01, 4.21262255e-01,
4.04411765e-01, 3.87561275e-01, 3.70710784e-01,
3.53860294e-01, 3.37009804e-01, 3.20159314e-01,
3.03308824e-01, 2.86458333e-01, 2.69607843e-01,
2.52757353e-01, 2.35906863e-01, 2.19056373e-01,
2.02205882e-01, 1.85355392e-01, 1.68504902e-01,
1.51654412e-01, 1.34803922e-01, 1.17953431e-01,
1.01102941e-01, 8.42524510e-02, 6.74019608e-02,
5.05514706e-02, 3.37009804e-02, 1.68504902e-02,
0.00000000e+00, 1.53186275e-03, 3.06372549e-03,
4.59558824e-03, 6.12745098e-03, 7.65931373e-03,
9.19117647e-03, 1.07230392e-02, 1.22549020e-02,
1.37867647e-02, 1.53186275e-02, 1.68504902e-02,
1.83823529e-02, 1.99142157e-02, 2.14460784e-02,
2.29779412e-02, 2.45098039e-02, 2.60416667e-02,
2.75735294e-02, 2.91053922e-02, 3.06372549e-02,
3.21691176e-02, 3.37009804e-02, 3.52328431e-02,
3.67647059e-02, 3.82965686e-02, 3.98284314e-02,
4.13602941e-02, 4.28921569e-02, 4.44240196e-02,
4.59558824e-02, 4.74877451e-02, 4.90196078e-02,
5.05514706e-02, 5.20833333e-02, 5.36151961e-02,
5.51470588e-02, 5.66789216e-02, 5.82107843e-02,
5.97426471e-02, 6.12745098e-02, 6.28063725e-02,
6.43382353e-02, 6.58700980e-02, 6.74019608e-02,
6.89338235e-02, 7.04656863e-02, 7.19975490e-02,
7.35294118e-02, 7.50612745e-02, 7.65931373e-02,
7.81250000e-02, 7.73590686e-02, 7.65931373e-02,
7.58272059e-02, 7.50612745e-02, 7.42953431e-02,
7.35294118e-02, 7.27634804e-02, 7.19975490e-02,
7.12316176e-02, 7.04656863e-02, 6.96997549e-02,
6.89338235e-02, 6.81678922e-02, 6.74019608e-02,
6.66360294e-02, 6.58700980e-02, 6.51041667e-02,
6.43382353e-02, 6.35723039e-02, 6.28063725e-02,
6.20404412e-02, 6.12745098e-02, 6.05085784e-02,
5.97426471e-02, 5.89767157e-02, 5.82107843e-02,
5.74448529e-02, 5.66789216e-02, 5.59129902e-02,
5.51470588e-02, 5.43811275e-02, 5.36151961e-02,
5.28492647e-02, 5.20833333e-02, 5.13174020e-02,
5.05514706e-02, 4.97855392e-02, 4.90196078e-02,
4.82536765e-02, 4.74877451e-02, 4.67218137e-02,
4.59558824e-02, 4.51899510e-02, 4.44240196e-02,
4.36580882e-02, 4.28921569e-02, 4.21262255e-02,
4.13602941e-02, 4.05943627e-02, 3.98284314e-02,
3.90625000e-02, 3.82965686e-02, 3.75306373e-02,
3.67647059e-02, 3.59987745e-02, 3.52328431e-02,
3.44669118e-02, 3.37009804e-02, 3.29350490e-02,
3.21691176e-02, 3.14031863e-02, 3.06372549e-02,
2.98713235e-02, 2.91053922e-02, 2.83394608e-02,
2.75735294e-02, 2.68075980e-02, 2.60416667e-02,
2.52757353e-02, 2.45098039e-02, 2.37438725e-02,
2.29779412e-02, 2.22120098e-02, 2.14460784e-02,
2.06801471e-02, 1.99142157e-02, 1.91482843e-02,
1.83823529e-02, 1.76164216e-02, 1.68504902e-02,
1.60845588e-02, 1.53186275e-02, 1.45526961e-02,
1.37867647e-02, 1.30208333e-02, 1.22549020e-02,
1.14889706e-02, 1.07230392e-02, 9.95710784e-03,
9.19117647e-03, 8.42524510e-03, 7.65931373e-03,
6.89338235e-03, 6.12745098e-03, 5.36151961e-03,
4.59558824e-03, 3.82965686e-03, 3.06372549e-03,
2.29779412e-03, 1.53186275e-03, 7.65931373e-04,
0.00000000e+00]),
array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1.]),
)
### gist_stern ###
color_map_luts['gist_stern'] = \
(
array([ 0. , 0.0716923 , 0.14338459, 0.21507689, 0.28676919,
0.35846148, 0.43015378, 0.50184608, 0.57353837, 0.64523067,
0.71692297, 0.78861526, 0.86030756, 0.93199986, 0.99899382,
0.97945625, 0.95991869, 0.94038112, 0.92084356, 0.90130599,
0.88176843, 0.86223087, 0.8426933 , 0.82315574, 0.80361817,
0.78408061, 0.76454305, 0.74500548, 0.72546792, 0.70593035,
0.68639279, 0.66685523, 0.64731766, 0.6277801 , 0.60824253,
0.58870497, 0.5691674 , 0.54962984, 0.53009228, 0.51055471,
0.49101715, 0.47147958, 0.45194202, 0.43240446, 0.41286689,
0.39332933, 0.37379176, 0.3542542 , 0.33471664, 0.31517907,
0.29564151, 0.27610394, 0.25656638, 0.23702881, 0.21749125,
0.19795369, 0.17841612, 0.15887856, 0.13934099, 0.11980343,
0.10026587, 0.0807283 , 0.06119074, 0.04165317, 0.25098039,
0.25490196, 0.25882353, 0.2627451 , 0.26666667, 0.27058824,
0.2745098 , 0.27843137, 0.28235294, 0.28627451, 0.29019608,
0.29411765, 0.29803922, 0.30196078, 0.30588235, 0.30980392,
0.31372549, 0.31764706, 0.32156863, 0.3254902 , 0.32941176,
0.33333333, 0.3372549 , 0.34117647, 0.34509804, 0.34901961,
0.35294118, 0.35686275, 0.36078431, 0.36470588, 0.36862745,
0.37254902, 0.37647059, 0.38039216, 0.38431373, 0.38823529,
0.39215686, 0.39607843, 0.4 , 0.40392157, 0.40784314,
0.41176471, 0.41568627, 0.41960784, 0.42352941, 0.42745098,
0.43137255, 0.43529412, 0.43921569, 0.44313725, 0.44705882,
0.45098039, 0.45490196, 0.45882353, 0.4627451 , 0.46666667,
0.47058824, 0.4745098 , 0.47843137, 0.48235294, 0.48627451,
0.49019608, 0.49411765, 0.49803922, 0.50196078, 0.50588235,
0.50980392, 0.51372549, 0.51764706, 0.52156863, 0.5254902 ,
0.52941176, 0.53333333, 0.5372549 , 0.54117647, 0.54509804,
0.54901961, 0.55294118, 0.55686275, 0.56078431, 0.56470588,
0.56862745, 0.57254902, 0.57647059, 0.58039216, 0.58431373,
0.58823529, 0.59215686, 0.59607843, 0.6 , 0.60392157,
0.60784314, 0.61176471, 0.61568627, 0.61960784, 0.62352941,
0.62745098, 0.63137255, 0.63529412, 0.63921569, 0.64313725,
0.64705882, 0.65098039, 0.65490196, 0.65882353, 0.6627451 | |
<reponame>yarshure/CoreImagePython
"""
pycoreimage
Copyright 2018 Apple Inc. All rights reserved.
# Install
1. pip install pyobjc --ignore-installed --user
2. pip install numpy --ignore-installed --user
3. pip install scikit-image --user
"""
from pycoreimage.pyci import *
def demo_metadata(fpath):
img = cimg.fromFile(fpath)
depth = cimg.fromFile(fpath, useDepth=True)
matte = cimg.fromFile(fpath, useMatte=True)
print(img.ciimage.properties())
# show([img, depth, matte], title=['input', 'depth', 'matte'])
show(img, at=221, title='RGB {}x{}'.format(*img.size))
show(depth, at=223, title='depth {}x{}'.format(*depth.size))
show(matte, at=224, title='matte {}x{}'.format(*matte.size))
def demo_filters(fpath):
""" Example: filters, loading, saving """
# Support for most common image file types, including raw.
img = cimg.fromFile(fpath)
# Check our image type
print(type(img))
# Inspect the backing CIImage
print(img.ciimage)
# Print available filters
for i, f in enumerate(cimg.filters()):
print('{:3d} {}'.format(i, f))
# Print more info (including inputs) for a given filter
print(cimg.inputs('gaussianBlur'))
radii = [10, 50, 100]
for i, r in enumerate(radii):
# Apply a Gaussian blur filter on the input image
# Note: can also use the full filter name "CIGaussianBlur"
blur = img.gaussianBlur(radius=r)
# Save to disk
blur.save(fpath + '.CIGaussianBlur{}.jpg'.format(r))
# Display on screen
show(blur, at='1{}{}'.format(len(radii), i + 1), title='blur with radius={}'.format(r))
def demo_generators():
""" Example: CoreImage generators. """
qrcode = cimg.fromGenerator('CIQRCodeGenerator', message='robot barf')
checker = cimg.fromGenerator('CICheckerboardGenerator', crop=1024)
stripes = cimg.fromGenerator('CIStripesGenerator', crop=1024)
show([qrcode, checker, stripes], title=['QR code', 'Checkboard', 'Stripes'], interpolation='nearest')
def demo_numpy_to(fpath):
""" Example: from CoreImage to NumPy """
import numpy as np
# Apply a non trivial effect
img = cimg.fromFile(fpath)
vib = img.vibrance(amount=1.0)
# Render to a NumPy array
ary = vib.render();
# Operate on the NumPy array
print(ary[0, 0, 0])
print(ary.min())
coefs = 0.299, 0.587, 0.114
lum = np.tensordot(ary, coefs, axes=([2, 0]))
show([img, vib, lum], title=['input', 'vibrance', 'luminance'])
def demo_numpy_from():
""" Example: from NumPy to CoreImage """
import numpy as np
# Create a NumPy array
noise = np.random.rand(512, 512, 3)
noise[noise < 0.75] = 0
show(noise, title='NumPy', interpolation='nearest')
# CoreImage convenience wrapper
img = cimg(noise)
print(type(img))
# Apply filters
img = img.discBlur(radius=10).photoEffectChrome()
img = img.lightTunnel(center=(256, 256), radius=64)
img = img.exposureAdjust(EV=2)
img = img.gammaAdjust(power=2)
show(img, title='NumPy to Core Image')
def demo_slices(fpath):
""" Example: NumPy-style slicing. """
import numpy as np
img = cimg.fromFile(fpath)
# Resize
s = img.size
img = img.resize(1024, preserveAspect=1)
show(img, title='Resized from {} to {}'.format(s, img.size))
# Create a blank NumPy array
labelWidth = 400
composite = np.zeros((img.size[1], img.size[0] + labelWidth, 3))
rows = img.size[1] // 5
show(composite)
# Create our band processing function
def addBand(i, name, args):
# Band indices
lo, hi = i * rows, (i + 1) * rows
# Apply filter via explicit name and args
band = img.applyFilter(name, **args)
# Create a label with the filter name
label = cimg.fromGenerator('CITextImageGenerator',
text=name,
fontName='HelveticaNeue',
fontSize=40,
scaleFactor=1)
# Make the text red
label = label.colorInvert().whitePointAdjust(color=color(1, 0, 0, 1))
# Translate to left hand side
label = label.translate(-labelWidth, composite.shape[0] - hi)
# Composite over the image
band = label.over(band)
# Slice the CIImage here: render only happens in that band
composite[lo:hi, ...] = band[lo:hi, ...]
show(composite)
# Create composite bands using various filters
addBand(0, 'pixellate', {'center': (0, 0), 'scale': 10})
addBand(1, 'edgeWork', {'radius': 3})
addBand(2, 'gaussianBlur', {'radius': 5})
addBand(3, 'comicEffect', {})
addBand(4, 'hexagonalPixellate', {'center': (0, 0), 'scale': 10})
def demo_gpu_color(fpath):
""" Example: GPU color kernel """
img = cimg.fromFile(fpath)
# GPU shader written in the Core Image Kernel Language
# This is a Color Kernel, since only the color fragment is processed
src = """
kernel vec4 crush_red(__sample img, float a, float b) {
// Crush shadows from red
img.rgb *= smoothstep(a, b, img.r);
return img;
}
"""
# Apply
img2 = img.applyKernel(src, # kernel source code
0.25, # kernel arg 1
0.9) # kernel arg 2
show([img, img2], title=['input', 'GPU color kernel'])
def demo_gpu_general(fpath):
""" Example: GPU general kernel """
img = cimg.fromFile(fpath)
img = img.resize(512, preserveAspect=2)
# Bilateral filter written in the Core Image Kernel Language
src = """
kernel vec4 bilateral(sampler u, float k, float colorInv, float spatialInv)
{
vec2 dc = destCoord();
vec2 pu = samplerCoord(u);
vec2 uDelta = samplerTransform(u, dc+vec2(1.0)) - pu;
vec4 u_0 = sample(u, pu);
vec4 C = vec4(0.0);
float W = 0.0;
for (float x = -k; x <= k; x++) {
for (float y = -k; y <= k; y++){
float ws = exp(-(x*x+y*y) * spatialInv);
vec4 u_xy = sample(u, pu + vec2(x,y)*uDelta);
vec3 diff = u_xy.rgb-u_0.rgb;
float wc = exp(-dot(diff,diff) * colorInv);
W += ws * wc;
C += ws * wc * u_xy;
}
}
return W < 0.0001 ? u_0 : C / W;
}
"""
# Apply
sigmaSpatial = 20
sigmaColor = 0.15
bil = img.applyKernel(src, # kernel source
3 * sigmaSpatial, # kernel arg 1
sigmaColor ** -2, # kernel arg 2
sigmaSpatial ** -2, # kernel arg 3
# region of interest (ROI) callback
roi=lambda index, r: inset(r,
-3 * sigmaSpatial,
-3 * sigmaSpatial))
show([img, bil], title=['input', 'bilateral'])
# Create the detail layer
details = img.render() - bil.render()
show((details - details.min()) / (details.max() - details.min()) ** 1.5, title='detail layer')
# Bilateral sharpen
result = img.render() + 1.5 * details
show([img, result], title=['input', 'bilateral sharpening'])
def demo_geometry(paths):
""" Example: Affine transformations. """
import numpy as np
# Load our images in
imgs = [cimg.fromFile(path) for path in paths]
# Composite params
n = 100
size = 1024
composite = None
np.random.seed(3)
# Utility randomization function
def randomize(bar, atCenter=None):
# Apply random scale
scale = 0.025 + 0.075 * np.random.rand()
if atCenter:
scale = 0.1
bar = bar.scale(scale, scale)
# Zero origin
w, h = bar.size
bar = bar.translate(-h / 2, -w / 2)
# Apply random rotation
angle = np.random.rand() * 2.0 * np.pi
bar = bar.rotate(angle)
# Apply random translation
tx = np.random.rand() * size
ty = np.random.rand() * size
if atCenter:
tx, ty = atCenter
bar = bar.translate(tx, ty)
return bar
# Create the composite
for i in range(n):
if composite:
composite = composite.gaussianBlur(radius=1.5)
# Pick next image
bar = imgs[np.random.randint(0, len(imgs))]
bar = randomize(bar, atCenter=(size / 2, size / 2) if i == n - 1 else None)
# Composite over the image
composite = bar if not composite else bar.over(composite)
# Crop to input size
composite = composite.crop(size, size)
show(composite)
def demo_depth(fpath):
""" Example: depth processing """
import numpy as np
# Load image
foo = cimg.fromFile(fpath)
W, H = foo.size
# Load depth
depth = cimg.fromFile(fpath, useDepth=True)
w, h = depth.size
# Diagonal
d = np.sqrt(w ** 2 + h ** 2)
# Params
blur = min(100, 0.2 * d)
morpho = blur / 6.0
perc = 20
# Threshold depth at 75th percentile
depth_img = depth.render()
p = np.percentile(depth_img, perc)
mask = depth_img < p
mask = cimg(mask.astype(np.float32))
mask = mask.morphologyMaximum(radius=morpho)
mask = mask.gaussianBlur(radius=blur)
mask = mask.render()
# Downscale original
ds = cimg(foo).scale(w / float(W), h / float(H))
# Desaturate the background
bg = ds.photoEffectNoir()
# Make the foreground stand out
fg = ds.exposureAdjust(EV=0.5).colorControls(saturation=0.8, contrast=1.4)
# Render
bg = bg.render()
fg = fg.render()
# Make the foreground stand out
result = mask * fg + (1 - mask) * bg
# Show on screen
show(ds, at=221, title='input')
show(depth, at=222, color='jet', title='depth')
show(result, at=223, title='result')
show(mask[..., 0], at=224, color='gray', title='mask')
def demo_depth_blur(fpath):
""" Example: depth blur"""
img = cimg.fromFile(fpath)
matte = cimg.fromFile(fpath, useMatte=True)
disparity = cimg.fromFile(fpath, useDisparity=True)
for aperture in [2, 6, 22]:
effect = img.depthBlurEffect(
inputDisparityImage=disparity,
inputMatteImage=matte,
inputAperture=aperture
)
show(effect, title='Aperture = {}'.format(aperture))
if __name__ == '__main__':
import argparse, os
# Support file formats for dataset demo
exts = '.jpg', '.jpeg', '.heic', '.tiff', '.png'
# print('Syntax: pycoreimage_sandbox.py img.jpg imgDepthMatte.heic /path/to/directory/')
parser = argparse.ArgumentParser()
parser.add_argument('image', help='input image', type=str)
parser.add_argument('directory', help='directory containing images {}'.format(exts), type=str)
parser.add_argument('--imageWithDepth', help='input image containing depth and Portrait Effects Matte', type=str)
parser.add_argument('--tree', action='store_true', help='enable CI_PRINT_TREE=4')
args = parser.parse_args()
# CI_PRINT_TREE needs to be set before the first render call
if args.tree:
set_print_tree(4)
# Input check
abort = False
if not os.path.exists(args.image):
abort = True
print('Image not found:', args.image)
if args.imageWithDepth:
if not os.path.exists(args.imageWithDepth):
abort = True
print('Depth+matte image not found:', args.imageWithDepth)
if not os.path.exists(args.directory):
abort | |
case with sbml
- could need creating in this local namespace (i.e. internal to the RDF graph),
such as the case with CellML
Users need to know which use case they need for their annotation language.
Args:
about: The string to use as the metaid. When `type` is eUriType.MODEL_URI
then this string must be an existing metaid in the xml. When
`type` is eUriType.LOCAL_URI, this name can be arbitrary, as long
as its unique within the RDF graph.
type: either eUriType.MODEL_URI or eUriType.LOCAL_URI
Returns:
:class:`PhysicalEntity`. Reference to self
"""
self._obj = _pyom.physical_entity_about(self.get_ptr(), about.encode(), type)
propagate_omexmeta_error(self._obj)
return self
def is_part_of(self, is_part_of: str, type: eUriType = eUriType.IDENTIFIERS_URI) -> PhysicalEntity:
"""Adds an entry in the list of bqbiol:isPartOf predicates for this :class:`PhysicalEntity`
This method can be called an arbitrary number of times since the number of entries
in this list can be any. The level of organisation is assumed
to get smaller with successive entries. i.e. start big and get smaller as smaller things
are part of bigger things.
Args:
is_part_of: The string to use for bqbiol:isPartOf. If this string has the format "name:id"
and `type` is eUriType.IDENTIFIERS_URI the string will be expanded
into "https://identifiers.org/name:id". If it instead begins with
"https" then the string will be used as is.
type: A eUriType.
Returns:
:class:`PhysicalEntity`. Reference to self
"""
self._obj = _pyom.physical_entity_is_part_of(self.get_ptr(), is_part_of.encode(), type)
propagate_omexmeta_error(self._obj)
return self
def has_part(self, part: str) -> PhysicalEntity:
"""Adds an entry in the list of bqbiol:hasPart predicates
This method can be called an arbitrary number of times for situations
where you are adding items to complex.
Args:
part: String to use for resource portion of the hasPart triple.
Will be expanded into an identifiers.org identifier if the
string has the format "name:id:
Returns:
:class:`PhysicalEntity`. Reference to self
"""
self._obj = _pyom.physical_entity_has_part(self.get_ptr(), part.encode())
propagate_omexmeta_error(self._obj)
return self
class PhysicalProcess(_PropertyBearer):
"""Interface for creating PhysicalProcess type composite annotations
From section 2.3.7.2 of the OmexMeta specification:
The example annotation above is for a physical property of a physical entity. However,
models also include variables for the rates of physical processes, such as chemical reactions,
transport of solutes, flow of fluids in vessels, etc., For these entities, we recommend the
use of a composite annotation, where a custom physical process is instantiated and linked to
its participants: the energetic sources and sinks as well as mediators whose amounts modulate
the magnitude of the physical process property.
Sources, sinks and mediators are the physical entities that participate in the process: source
amounts are consumed by the process, sink amounts are produced, and mediator amounts remain
unchanged. In the biochemical domain, sources and sinks correspond to ”reactants” and ”products”,
but the former terms are much broader. Below, we provide an example of a composite annotation
for the rate of a chemical reaction with one source, one sink, and one mediator.
First, we assert statements indicating that the model includes a variable that represents
a physical property of a process that is a chemical flow rate (opb:OPB 00592).
myOMEX:MyModel.xml#property_metaid_0
bqbiol:isPropertyOf local:process_0 ;
bqbiol:isVersionOf opb:OPB_00592 .
The above annotation would be appropriate for a CellML model, where property metaid 0
points to the CellML variable representing the chemical flow rate. However, in SBML models,
processes are indicated by reactions, and there is no separate entity of variable for the
flow rate. Therefore, an annotation about chemical flow rate for an SBML model would have
to be defined within the RDF file, using the local: namespace. That annotation would make
reference to the process, which would have a corresponding entity (the reaction) in the SBML file.
We next assert statements that indicate the physical entity participants in the process: the
energetic sources, sinks, and mediators. In this case, there is one source, one sink, and one
mediator. Optionally, the sources and sinks can include stoichiometry statements (using the
semsim:hasMultiplier quali- fier) to indicate the production/consumption ratios for the process
participants (mediator participation statements cannot include stoichiometries).
Optionally, we can also name or describe the process itself. Often, there is no knowledge
resource for naming processes, but within the realm of biochemical reactions, many are
named by resources such as the Gene Ontology (process branch) and RHEA (for metabolic
reactions).
local:process_0
semsim:hasSourceParticipant local:source_0 ;
semsim:hasSinkParticipant local:sink_0 ;
semsim:hasMediatorParticipant local:mediator_0 ;
bqbiol:isVersionOf <https://identifiers.org/GO:0004022> .
local:source_0
semsim:hasMultiplier 1.0 ;
semsim:hasPhysicalEntityReference myOMEX:MyModel.xml#species_metaid_0 .
local:sink_0
semsim:hasMultiplier 2.0 ;
semsim:hasPhysicalEntityReference myOMEX:MyModel.xml#species_metaid_1 .
local:mediator_0
semsim:hasPhysicalEntityReference myOMEX:MyModel.xml#species_metaid_2 .
RDF statements indicating the biological identity of the chemical species that participate
in the process (the resources with the species metaid * URI fragments in this example)
would be included elsewhere in the RDF. For SBML models where these chemical species are
explicitly represented using <species> elements, the metadata IDs should point to those
elements in the SBML code. For other formats, such as CellML, that do not support such
elements, the metadata IDs should point to physical entities instantiated elsewhere in
the RDF, using the local: namespace.
We recognize that creating composite annotations for biological processes in
this manner can duplicate information that is present in an SBML model’s
reaction network structure. However, because annota- tions are stored in
a separate location from the source code, these composite annotations are
necessary, so that all biological features represented in a model are exposed
in the RDF metadata. This way, the community can more easily apply RDF-processing
tools to analyze, query, and reason over semantic metadata in COMBINE archives,
in a manner that is independent of the source code used by the model.
"""
def __init__(self, physical_process_ptr: ct.c_int64):
"""Constructor for :class:`PhysicalProcess`.
This constructor is not designed to be used directly by users. Instead
users should create a :class:`PhysicalProcess` directly from the an instance
of :class:`Editor`.
Args:
physical_process_ptr: A ctypes int64 integer representing the memory address (pointer)
of this PhysicalProcess.
"""
self._obj = physical_process_ptr
super().__init__("physical_process", self._obj)
def get_ptr(self) -> ct.c_int64:
"""Returns the memory address that points to this PhysicalProcess"""
return self._obj
def add_source(self, physical_entity_reference: str, uri_type: eUriType,
multiplier: float = 1.0) -> PhysicalProcess:
"""Adds an energetic source to this :class:`PhysicalProcess`, such as a reactant in a reaction
Args:
physical_entity_reference: The string of the metaid for the energetic source.
If `uri_type` is eUriType.MODEL_URI (for instance when
annotating sbml), this string needs to exist as a metaid
on an element of xml. If `uri_type` is eUriType.LOCAL_URI
(i.e. CellML) then this identifier can be string that is unique
in the rdf document
uri_type: One of eUriType.LOCAL_URI or eUriType.MODEL_URI
multiplier: int representing the stoichiometry of the source
Returns:
:class:`PhysicalProcess`. Reference to self.
"""
self._obj = _pyom.physical_process_add_source(
self._obj, physical_entity_reference.encode(), uri_type, multiplier
)
propagate_omexmeta_error(self._obj)
return self
def add_sink(self, physical_entity_reference: str, uri_type: eUriType, multiplier: float = 1.0) -> PhysicalProcess:
"""Adds an energetic sink to this :class:`PhysicalProcess`, such as a product in a reaction
Args:
physical_entity_reference: The string of the metaid for the energetic sink.
If `uri_type` is eUriType.MODEL_URI (for instance when
annotating sbml), this string needs to exist as a metaid
on an element of xml. If `uri_type` is eUriType.LOCAL_URI
(i.e. CellML) then this identifier can be string that is unique
in the rdf document
uri_type: One of eUriType.LOCAL_URI or eUriType.MODEL_URI
multiplier: int representing the stoichiometry of the sink
Returns:
:class:`PhysicalProcess`. Reference to self.
"""
self._obj = _pyom.physical_process_add_sink(
self._obj, physical_entity_reference.encode(), uri_type, multiplier
)
propagate_omexmeta_error(self._obj)
return self
def add_mediator(self, physical_entity_reference: str, uri_type: eUriType) -> PhysicalProcess:
"""Adds an energetic mediator to this :class:`PhysicalProcess`, such as a enzyme or other catalyst in a reaction
Args:
physical_entity_reference: The string of the metaid for the energetic mediator.
If `uri_type` is eUriType.MODEL_URI (for instance when
annotating sbml), this string needs to exist as a metaid
on an element of xml. If `uri_type` is eUriType.LOCAL_URI
(i.e. CellML) then this identifier can be string that is unique
in the rdf document
uri_type: One of eUriType.LOCAL_URI or eUriType.MODEL_URI
multiplier: int representing the stoichiometry of the mediator
Returns:
:class:`PhysicalProcess`. Reference | |
<reponame>joshcoales/pyparsing
#
# test_unit.py
#
# Unit tests for pyparsing module
#
# Copyright 2002-2020, <NAME>
#
#
import contextlib
import datetime
import sys
from io import StringIO
from unittest import TestCase
import pyparsing as pp
from examples.jsonParser import jsonObject
from pyparsing import ParseException
from pyparsing import ParserElement
from tests.json_parser_tests import test1, test2, test3, test4, test5
import platform
ppc = pp.pyparsing_common
ppt = pp.pyparsing_test
# see which Python implementation we are running
python_impl = platform.python_implementation()
CPYTHON_ENV = python_impl == "CPython"
IRON_PYTHON_ENV = python_impl == "IronPython"
JYTHON_ENV = python_impl == "Jython"
PYPY_ENV = python_impl == "PyPy"
# simple utility for flattening nested lists
def flatten(L):
if type(L) is not list:
return [L]
if L == []:
return L
return flatten(L[0]) + flatten(L[1:])
class resetting:
def __init__(self, *args):
ob = args[0]
attrnames = args[1:]
self.ob = ob
self.save_attrs = attrnames
self.save_values = [getattr(ob, attrname) for attrname in attrnames]
def __enter__(self):
pass
def __exit__(self, *args):
for attr, value in zip(self.save_attrs, self.save_values):
setattr(self.ob, attr, value)
class Test1_PyparsingTestInit(TestCase):
def runTest(self):
from pyparsing import (
__version__ as pyparsingVersion,
__versionTime__ as pyparsingVersionTime,
)
print(
"Beginning test of pyparsing, version",
pyparsingVersion,
pyparsingVersionTime,
)
print("Python version", sys.version)
class Test2_WithoutPackrat(ppt.TestParseResultsAsserts, TestCase):
suite_context = None
save_suite_context = None
def setUp(self):
self.suite_context.restore()
@contextlib.contextmanager
def assertRaises(self, expected_exception_type, msg=None):
"""
Simple wrapper to print out the exceptions raised after assertRaises
"""
try:
with super().assertRaises(expected_exception_type, msg=msg) as ar:
yield
finally:
if getattr(ar, "exception", None) is not None:
print(
"Raised expected exception: {}: {}".format(
type(ar.exception).__name__, str(ar.exception)
)
)
else:
print(
"Expected {} exception not raised".format(
expected_exception_type.__name__
)
)
return ar
def test000_assert_packrat_status(self):
print("Packrat enabled:", ParserElement._packratEnabled)
self.assertFalse(ParserElement._packratEnabled, "packrat enabled")
def testScanStringWithOverlap(self):
parser = pp.Word(pp.alphas, exact=3)
without_overlaps = sum(t for t, s, e in parser.scanString("ABCDEFGHI")).asList()
self.assertEqual(
["ABC", "DEF", "GHI"],
without_overlaps,
msg="scanString without overlaps failed",
)
with_overlaps = sum(
t for t, s, e in parser.scanString("ABCDEFGHI", overlap=True)
).asList()
self.assertEqual(
["ABC", "BCD", "CDE", "DEF", "EFG", "FGH", "GHI"],
with_overlaps,
msg="scanString with overlaps failed",
)
def testTransformString(self):
make_int_with_commas = ppc.integer().addParseAction(
lambda t: "{:,}".format(t[0])
)
lower_case_words = pp.Word(pp.alphas.lower(), asKeyword=True) + pp.Optional(
pp.White()
)
nested_list = pp.nestedExpr().addParseAction(pp.ParseResults.asList)
transformer = make_int_with_commas | nested_list | lower_case_words.suppress()
in_string = (
"I wish to buy 12345 shares of Acme Industries (as a gift to my (ex)wife)"
)
print(in_string)
out_string = transformer.transformString(in_string)
print(out_string)
self.assertEqual(
"I 12,345 Acme Industries asagifttomyexwife",
out_string,
msg="failure in transformString",
)
def testUpdateDefaultWhitespace(self):
prev_default_whitespace_chars = pp.ParserElement.DEFAULT_WHITE_CHARS
try:
pp.dblQuotedString.copyDefaultWhiteChars = False
pp.ParserElement.setDefaultWhitespaceChars(" \t")
self.assertEqual(
set(" \t"),
set(pp.sglQuotedString.whiteChars),
"setDefaultWhitespaceChars did not update sglQuotedString",
)
self.assertEqual(
set(prev_default_whitespace_chars),
set(pp.dblQuotedString.whiteChars),
"setDefaultWhitespaceChars updated dblQuotedString but should not",
)
finally:
pp.dblQuotedString.copyDefaultWhiteChars = True
pp.ParserElement.setDefaultWhitespaceChars(prev_default_whitespace_chars)
self.assertEqual(
set(prev_default_whitespace_chars),
set(pp.dblQuotedString.whiteChars),
"setDefaultWhitespaceChars updated dblQuotedString",
)
with ppt.reset_pyparsing_context():
pp.ParserElement.setDefaultWhitespaceChars(" \t")
self.assertNotEqual(
set(prev_default_whitespace_chars),
set(pp.dblQuotedString.whiteChars),
"setDefaultWhitespaceChars updated dblQuotedString but should not",
)
EOL = pp.LineEnd().suppress().setName("EOL")
# Identifiers is a string + optional $
identifier = pp.Combine(pp.Word(pp.alphas) + pp.Optional("$"))
# Literals (number or double quoted string)
literal = ppc.number | pp.dblQuotedString
expression = literal | identifier
# expression.setName("expression").setDebug()
# ppc.number.setDebug()
# ppc.integer.setDebug()
line_number = ppc.integer
# Keywords
PRINT = pp.CaselessKeyword("print")
print_stmt = PRINT - pp.ZeroOrMore(expression | ";")
statement = print_stmt
code_line = pp.Group(line_number + statement + EOL)
program = pp.ZeroOrMore(code_line)
test = """\
10 print 123;
20 print 234; 567;
30 print 890
"""
parsed_program = program.parseString(test)
print(parsed_program.dump())
self.assertEqual(
3,
len(parsed_program),
"failed to apply new whitespace chars to existing builtins",
)
def testUpdateDefaultWhitespace2(self):
with ppt.reset_pyparsing_context():
expr_tests = [
(pp.dblQuotedString, '"abc"'),
(pp.sglQuotedString, "'def'"),
(ppc.integer, "123"),
(ppc.number, "4.56"),
(ppc.identifier, "a_bc"),
]
NL = pp.LineEnd()
for expr, test_str in expr_tests:
parser = pp.Group(expr[1, ...] + pp.Optional(NL))[1, ...]
test_string = "\n".join([test_str] * 3)
result = parser.parseString(test_string, parseAll=True)
print(result.dump())
self.assertEqual(1, len(result), "failed {!r}".format(test_string))
pp.ParserElement.setDefaultWhitespaceChars(" \t")
for expr, test_str in expr_tests:
parser = pp.Group(expr[1, ...] + pp.Optional(NL))[1, ...]
test_string = "\n".join([test_str] * 3)
result = parser.parseString(test_string, parseAll=True)
print(result.dump())
self.assertEqual(3, len(result), "failed {!r}".format(test_string))
pp.ParserElement.setDefaultWhitespaceChars(" \n\t")
for expr, test_str in expr_tests:
parser = pp.Group(expr[1, ...] + pp.Optional(NL))[1, ...]
test_string = "\n".join([test_str] * 3)
result = parser.parseString(test_string, parseAll=True)
print(result.dump())
self.assertEqual(1, len(result), "failed {!r}".format(test_string))
def testParseFourFn(self):
import examples.fourFn as fourFn
import math
def test(s, ans):
fourFn.exprStack[:] = []
results = fourFn.BNF().parseString(s)
try:
resultValue = fourFn.evaluate_stack(fourFn.exprStack)
except Exception:
self.assertIsNone(ans, "exception raised for expression {!r}".format(s))
else:
self.assertEqual(
ans,
resultValue,
"failed to evaluate {}, got {:f}".format(s, resultValue),
)
print(s, "->", resultValue)
test("9", 9)
test("-9", -9)
test("--9", 9)
test("-E", -math.e)
test("9 + 3 + 6", 9 + 3 + 6)
test("9 + 3 / 11", 9 + 3.0 / 11)
test("(9 + 3)", (9 + 3))
test("(9+3) / 11", (9 + 3.0) / 11)
test("9 - 12 - 6", 9 - 12 - 6)
test("9 - (12 - 6)", 9 - (12 - 6))
test("2*3.14159", 2 * 3.14159)
test("3.1415926535*3.1415926535 / 10", 3.1415926535 * 3.1415926535 / 10)
test("PI * PI / 10", math.pi * math.pi / 10)
test("PI*PI/10", math.pi * math.pi / 10)
test("PI^2", math.pi ** 2)
test("round(PI^2)", round(math.pi ** 2))
test("6.02E23 * 8.048", 6.02e23 * 8.048)
test("e / 3", math.e / 3)
test("sin(PI/2)", math.sin(math.pi / 2))
test("10+sin(PI/4)^2", 10 + math.sin(math.pi / 4) ** 2)
test("trunc(E)", int(math.e))
test("trunc(-E)", int(-math.e))
test("round(E)", round(math.e))
test("round(-E)", round(-math.e))
test("E^PI", math.e ** math.pi)
test("exp(0)", 1)
test("exp(1)", math.e)
test("2^3^2", 2 ** 3 ** 2)
test("(2^3)^2", (2 ** 3) ** 2)
test("2^3+2", 2 ** 3 + 2)
test("2^3+5", 2 ** 3 + 5)
test("2^9", 2 ** 9)
test("sgn(-2)", -1)
test("sgn(0)", 0)
test("sgn(0.1)", 1)
test("foo(0.1)", None)
test("round(E, 3)", round(math.e, 3))
test("round(PI^2, 3)", round(math.pi ** 2, 3))
test("sgn(cos(PI/4))", 1)
test("sgn(cos(PI/2))", 0)
test("sgn(cos(PI*3/4))", -1)
test("+(sgn(cos(PI/4)))", 1)
test("-(sgn(cos(PI/4)))", -1)
def testParseSQL(self):
import examples.simpleSQL as simpleSQL
def test(s, num_expected_toks, expected_errloc=-1):
try:
sqlToks = flatten(simpleSQL.simpleSQL.parseString(s).asList())
print(s, sqlToks, len(sqlToks))
self.assertEqual(
num_expected_toks,
len(sqlToks),
"invalid parsed tokens, expected {}, found {} ({})".format(
num_expected_toks, len(sqlToks), sqlToks
),
)
except ParseException as e:
if expected_errloc >= 0:
self.assertEqual(
expected_errloc,
e.loc,
"expected error at {}, found at {}".format(
expected_errloc, e.loc
),
)
test("SELECT * from XYZZY, ABC", 6)
test("select * from SYS.XYZZY", 5)
test("Select A from Sys.dual", 5)
test("Select A,B,C from Sys.dual", 7)
test("Select A, B, C from Sys.dual", 7)
test("Select A, B, C from Sys.dual, Table2 ", 8)
test("Xelect A, B, C from Sys.dual", 0, 0)
test("Select A, B, C frox Sys.dual", 0, 15)
test("Select", 0, 6)
test("Select &&& frox Sys.dual", 0, 7)
test("Select A from Sys.dual where a in ('RED','GREEN','BLUE')", 12)
test(
"Select A from Sys.dual where a in ('RED','GREEN','BLUE') and b in (10,20,30)",
20,
)
test(
"Select A,b from table1,table2 where table1.id eq table2.id -- test out comparison operators",
10,
)
def testParseConfigFile(self):
from examples import configParse
def test(fnam, num_expected_toks, resCheckList):
print("Parsing", fnam, "...", end=" ")
with open(fnam) as infile:
iniFileLines = "\n".join(infile.read().splitlines())
iniData = configParse.inifile_BNF().parseString(iniFileLines)
print(len(flatten(iniData.asList())))
print(list(iniData.keys()))
self.assertEqual(
num_expected_toks,
len(flatten(iniData.asList())),
"file %s not parsed correctly" % fnam,
)
for chkkey, chkexpect in resCheckList:
var = iniData
for attr in chkkey.split("."):
var = getattr(var, attr)
print(chkkey, var, chkexpect)
self.assertEqual(
chkexpect,
var,
"ParseConfigFileTest: failed to parse ini {!r} as expected {!r}, found {}".format(
chkkey, chkexpect, var
),
)
print("OK")
test(
"tests/karthik.ini",
23,
[("users.K", "8"), ("users.mod_scheme", "'QPSK'"), ("users.Na", "K+2")],
)
test(
"examples/Setup.ini",
125,
[
("Startup.audioinf", "M3i"),
("Languages.key1", "0x0003"),
("test.foo", "bar"),
],
)
def testParseJSONData(self):
expected = [
[
[
"glossary",
[
["title", "example glossary"],
[
"GlossDiv",
[
["title", "S"],
[
"GlossList",
[
[
["ID", "SGML"],
["SortAs", "SGML"],
[
"GlossTerm",
"Standard Generalized Markup Language",
],
["Acronym", "SGML"],
["LargestPrimeLessThan100", 97],
["AvogadroNumber", 6.02e23],
["EvenPrimesGreaterThan2", None],
["PrimesLessThan10", [2, 3, 5, 7]],
["WMDsFound", False],
["IraqAlQaedaConnections", None],
["Abbrev", "ISO 8879:1986"],
[
"GlossDef",
"A meta-markup language, used to create markup languages such as "
"DocBook.",
],
["GlossSeeAlso", ["GML", "XML", "markup"]],
["EmptyDict", []],
["EmptyList", [[]]],
]
],
],
],
],
],
]
],
[
[
"menu",
[
["id", "file"],
["value", "File:"],
[
"popup",
[
[
"menuitem",
[
[
["value", "New"],
["onclick", "CreateNewDoc()"],
],
[["value", "Open"], ["onclick", "OpenDoc()"]],
[["value", "Close"], ["onclick", "CloseDoc()"]],
],
]
],
],
],
]
],
[
[
"widget",
[
["debug", "on"],
[
"window",
[
["title", "Sample Konfabulator Widget"],
["name", "main_window"],
["width", 500],
["height", 500],
],
],
[
"image",
[
["src", "Images/Sun.png"],
["name", "sun1"],
["hOffset", 250],
["vOffset", 250],
["alignment", "center"],
],
],
[
"text",
[
["data", "Click Here"],
["size", 36],
["style", "bold"],
["name", "text1"],
["hOffset", 250],
["vOffset", 100],
["alignment", "center"],
[
"onMouseUp",
"sun1.opacity = (sun1.opacity / 100) * 90;",
],
],
],
],
| |
<reponame>datosgobar/pydatajson
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import os
import re
import json
try:
from mock import patch, MagicMock, ANY
except ImportError:
from unittest.mock import patch, MagicMock, ANY
from .context import pydatajson
from pydatajson.federation import *
from pydatajson.helpers import is_local_andino_resource
from ckanapi.errors import NotFound, CKANAPIError
SAMPLES_DIR = os.path.join("tests", "samples")
class FederationSuite(unittest.TestCase):
@classmethod
def get_sample(cls, sample_filename):
return os.path.join(SAMPLES_DIR, sample_filename)
@patch('pydatajson.federation.RemoteCKAN')
class PushDatasetTestCase(FederationSuite):
@classmethod
def setUpClass(cls):
cls.catalog = pydatajson.DataJson(cls.get_sample('full_data.json'))
cls.catalog_id = cls.catalog.get('identifier', re.sub(
r'[^a-z-_]+', '', cls.catalog['title']).lower())
cls.dataset = cls.catalog.datasets[0]
cls.dataset_id = cls.dataset['identifier']
cls.distribution = cls.catalog.distributions[0]
cls.distribution_id = cls.distribution['identifier']
cls.minimum_catalog = pydatajson.DataJson(
cls.get_sample('minimum_data.json'))
cls.minimum_catalog_id = cls.minimum_catalog.get('identifier', re.sub(
r'[^a-z-_]+', '', cls.minimum_catalog['title']).lower())
cls.minimum_dataset = cls.minimum_catalog.datasets[0]
# PATCH: minimum_data no tiene los identificadores obligatorios.
# Se los agrego aca, pero hay que refactorizar
# tests y samples desactualizados para cumplir con los perfiles nuevos
cls.minimum_dataset['identifier'] = cls.dataset['identifier']
cls.minimum_dataset['distribution'][0][
'identifier'] = cls.dataset['distribution'][0]['identifier']
def test_id_is_created_correctly(self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'package_update':
raise NotFound
if action == 'package_create':
return data_dict
else:
return []
mock_portal.return_value.call_action = mock_call_action
res_id = push_dataset_to_ckan(
self.catalog,
'owner',
self.dataset_id,
'portal',
'key',
catalog_id=self.catalog_id)
self.assertEqual(self.catalog_id + '_' + self.dataset_id, res_id)
def test_id_is_updated_correctly(self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'package_update':
return data_dict
if action == 'package_create':
self.fail('should not be called')
else:
return []
mock_portal.return_value.call_action = mock_call_action
res_id = push_dataset_to_ckan(
self.catalog,
'owner',
self.dataset_id,
'portal',
'key',
catalog_id=self.catalog_id)
self.assertEqual(self.catalog_id + '_' + self.dataset_id, res_id)
def test_dataset_id_is_preserved_if_catalog_id_is_not_passed(
self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'package_update':
return data_dict
if action == 'package_create':
self.fail('should not be called')
else:
return []
mock_portal.return_value.call_action = mock_call_action
res_id = push_dataset_to_ckan(self.catalog, 'owner', self.dataset_id,
'portal', 'key')
self.assertEqual(self.dataset_id, res_id)
def test_tags_are_passed_correctly(self, mock_portal):
themes = self.dataset['theme']
keywords = [kw for kw in self.dataset['keyword']]
for theme in themes:
label = self.catalog.get_theme(identifier=theme)['label']
label = re.sub(r'[^\w .-]+', '', label, flags=re.UNICODE)
keywords.append(label)
def mock_call_action(action, data_dict=None):
if action == 'package_update':
try:
self.assertItemsEqual(
keywords, [
tag['name'] for tag in data_dict['tags']])
except AttributeError:
self.assertCountEqual(
keywords, [
tag['name'] for tag in data_dict['tags']])
return data_dict
if action == 'package_create':
self.fail('should not be called')
else:
return []
mock_portal.return_value.call_action = mock_call_action
res_id = push_dataset_to_ckan(
self.catalog,
'owner',
self.dataset_id,
'portal',
'key',
catalog_id=self.catalog_id)
self.assertEqual(self.catalog_id + '_' + self.dataset_id, res_id)
def test_licenses_are_interpreted_correctly(self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'license_list':
return [{'title': 'somelicense',
'url': 'somelicense.com', 'id': '1'},
{'title': 'otherlicense',
'url': 'otherlicense.com', 'id': '2'}]
elif action == 'package_update':
self.assertEqual('notspecified', data_dict['license_id'])
return data_dict
else:
return []
mock_portal.return_value.call_action = mock_call_action
push_dataset_to_ckan(self.catalog, 'owner', self.dataset_id,
'portal', 'key', catalog_id=self.catalog_id)
def test_dataset_without_license_sets_notspecified(self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'license_list':
return [{'title': 'somelicense',
'url': 'somelicense.com', 'id': '1'},
{'title': 'otherlicense',
'url': 'otherlicense.com', 'id': '2'}]
elif action == 'package_update':
self.assertEqual('notspecified', data_dict['license_id'])
return data_dict
else:
return []
mock_portal.return_value.call_action = mock_call_action
push_dataset_to_ckan(
self.minimum_catalog,
'owner',
self.minimum_dataset['identifier'],
'portal',
'key',
catalog_id=self.minimum_catalog_id)
def test_dataset_level_wrappers(self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'package_update':
return data_dict
else:
return []
mock_portal.return_value.call_action = mock_call_action
restored_id = restore_dataset_to_ckan(
self.catalog, 'owner', self.dataset_id, 'portal', 'key')
harvested_id = harvest_dataset_to_ckan(
self.catalog,
'owner',
self.dataset_id,
'portal',
'key',
self.catalog_id)
self.assertEqual(self.dataset_id, restored_id)
self.assertEqual(self.catalog_id + '_' + self.dataset_id, harvested_id)
def test_harvest_catalog_with_no_optional_parametres(self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'package_update':
self.assertTrue(
data_dict['id'].startswith(
self.catalog_id + '_'))
self.assertTrue(
data_dict['name'].startswith(
self.catalog_id + '-'))
self.assertEqual(self.catalog_id, data_dict['owner_org'])
return data_dict
else:
return []
mock_portal.return_value.call_action = mock_call_action
harvested_ids, _ = harvest_catalog_to_ckan(
self.catalog, 'portal', 'key', self.catalog_id)
try:
self.assertItemsEqual([self.catalog_id + '_' + ds['identifier']
for ds in self.catalog.datasets],
harvested_ids)
except AttributeError:
self.assertCountEqual([self.catalog_id + '_' + ds['identifier']
for ds in self.catalog.datasets],
harvested_ids)
def test_harvest_catalog_with_dataset_list(self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'package_update':
return data_dict
else:
return []
mock_portal.return_value.call_action = mock_call_action
dataset_list = [ds['identifier'] for ds in self.catalog.datasets[:1]]
harvested_ids, _ = harvest_catalog_to_ckan(
self.catalog, 'portal', 'key',
self.catalog_id, dataset_list=dataset_list)
try:
self.assertItemsEqual(
[self.catalog_id + '_' + ds_id for ds_id in dataset_list],
harvested_ids)
except AttributeError:
self.assertCountEqual(
[self.catalog_id + '_' + ds_id for ds_id in dataset_list],
harvested_ids)
dataset_list = [ds['identifier'] for ds in self.catalog.datasets]
harvested_ids, _ = harvest_catalog_to_ckan(
self.catalog, 'portal', 'key',
self.catalog_id, dataset_list=dataset_list)
try:
self.assertItemsEqual(
[self.catalog_id + '_' + ds_id for ds_id in dataset_list],
harvested_ids)
except AttributeError:
self.assertCountEqual(
[self.catalog_id + '_' + ds_id for ds_id in dataset_list],
harvested_ids)
def test_harvest_catalog_with_owner_org(self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'package_update':
self.assertEqual('owner', data_dict['owner_org'])
return data_dict
else:
return []
mock_portal.return_value.call_action = mock_call_action
harvested_ids, _ = harvest_catalog_to_ckan(
self.catalog, 'portal', 'key', self.catalog_id, owner_org='owner')
try:
self.assertItemsEqual([self.catalog_id + '_' + ds['identifier']
for ds in self.catalog.datasets],
harvested_ids)
except AttributeError:
self.assertCountEqual([self.catalog_id + '_' + ds['identifier']
for ds in self.catalog.datasets],
harvested_ids)
def test_harvest_catalog_with_errors(self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'package_update':
if data_dict['id'][-3:] == '777':
return data_dict
else:
raise Exception('some message')
else:
return []
mock_portal.return_value.call_action = mock_call_action
_, errors = harvest_catalog_to_ckan(
self.catalog, 'portal', 'key', self.catalog_id, owner_org='owner')
self.assertDictEqual(
{self.catalog.datasets[1]['identifier']: "some message"}, errors)
def test_harvest_catalog_with_empty_list(self, mock_portal):
harvested_ids, _ = harvest_catalog_to_ckan(
self.catalog, 'portal', 'key', self.catalog_id,
owner_org='owner', dataset_list=[])
mock_portal.assert_not_called()
self.assertEqual([], harvested_ids)
def test_resource_upload_succesfully(self, mock_portal):
mock_portal.return_value.action.resource_patch = MagicMock(
return_value={'id': 'pushed_id',
'resource_type': 'file.upload'})
resources = {self.distribution_id: 'tests/samples/resource_sample.csv'}
res = resources_update('portal', 'key', self.catalog.distributions,
resources)
_, _, kwargs = \
mock_portal.return_value.action.resource_patch.mock_calls[0]
self.assertEqual(self.distribution_id, kwargs['id'])
self.assertEqual('Convocatorias abiertas durante el año 2015',
kwargs['name'])
self.assertEqual('file.upload', kwargs['resource_type'])
self.assertEqual(['pushed_id', 'pushed_id'], res)
def test_resource_upload_error(self, mock_portal):
mock_portal.return_value.action.resource_patch = MagicMock(
side_effect=CKANAPIError('broken resource'))
resources = {self.distribution_id: 'tests/samples/resource_sample.csv'}
res = resources_update('portal', 'key', self.catalog.distributions,
resources)
_, _, kwargs = \
mock_portal.return_value.action.resource_patch.mock_calls[0]
self.assertEqual(self.distribution_id, kwargs['id'])
self.assertEqual('Convocatorias abiertas durante el año 2015',
kwargs['name'])
self.assertEqual('file.upload', kwargs['resource_type'])
self.assertEqual([], res)
@patch('pydatajson.helpers.download_to_file')
def test_push_dataset_upload_strategy(self, mock_download, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'package_update':
return data_dict
else:
return []
mock_portal.return_value.call_action = mock_call_action
push_dataset_to_ckan(
self.catalog,
'owner',
self.dataset_id,
'portal',
'key',
download_strategy=(lambda _, x: x['identifier'] == '1.1'))
_, _, kwargs = \
mock_portal.return_value.action.resource_patch.mock_calls[0]
self.assertEqual('1.1', kwargs['id'])
self.assertEqual('Convocatorias abiertas durante el año 2015',
kwargs['name'])
self.assertEqual('file.upload', kwargs['resource_type'])
def test_push_dataset_upload_empty_strategy(self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'package_update':
return data_dict
else:
return []
mock_portal.return_value.call_action = mock_call_action
push_dataset_to_ckan(
self.minimum_catalog,
'owner',
self.dataset_id,
'portal',
'key',
download_strategy=is_local_andino_resource)
mock_portal.return_value.action.resource_patch.not_called()
def test_push_dataset_regenerate_accessurl_all(self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'package_update':
return data_dict
else:
return []
mock_portal.return_value.call_action = mock_call_action
identifiers = [dist['identifier'] for dist in
self.dataset['distribution']]
def side_effect(**kwargs):
self.assertTrue(kwargs['id'] in identifiers)
self.assertEqual('', kwargs['accessURL'])
return {'id': kwargs['id']}
mock_portal.return_value.action.resource_patch.side_effect =\
side_effect
pushed = push_dataset_to_ckan(self.catalog, 'owner',
self.dataset_id,
'portal', 'key',
generate_new_access_url=identifiers)
self.assertEqual(self.dataset_id, pushed)
class RemoveDatasetTestCase(FederationSuite):
@patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_empty_search_doesnt_call_purge(self, mock_portal):
mock_portal.return_value.call_action = MagicMock()
remove_datasets_from_ckan('portal', 'key')
mock_portal.return_value.call_action.assert_not_called()
@patch('pydatajson.federation.get_datasets')
@patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_filter_in_datasets(self, mock_portal, mock_search):
mock_portal.return_value.call_action = MagicMock()
mock_search.return_value = ['some_id']
filter_in = {'dataset': {'id': 'some_id'}}
remove_datasets_from_ckan('portal', 'key', filter_in=filter_in)
mock_portal.return_value.call_action.assert_called_with(
'dataset_purge', data_dict={'id': 'some_id'})
@patch('pydatajson.federation.get_datasets')
@patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_filter_in_out_datasets(self, mock_portal, mock_search):
mock_portal.return_value.call_action = MagicMock()
mock_search.return_value = ['some_id', 'other_id']
filter_out = {'dataset': {'id': 'some_id'}}
remove_datasets_from_ckan('portal', 'key', filter_out=filter_out)
mock_portal.return_value.call_action.assert_any_call(
'dataset_purge', data_dict={'id': 'other_id'})
mock_portal.return_value.call_action.assert_any_call(
'dataset_purge', data_dict={'id': 'some_id'})
@patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_query_one_dataset(self, mock_portal):
mock_portal.return_value.call_action = MagicMock(
return_value={'count': 1, 'results': [{'id': 'some_id'}]})
remove_datasets_from_ckan('portal', 'key', organization='some_org')
data_dict = {'q': 'organization:"some_org"', 'rows': 500, 'start': 0}
mock_portal.return_value.call_action.assert_any_call(
'package_search', data_dict=data_dict)
mock_portal.return_value.call_action.assert_any_call(
'dataset_purge', data_dict={'id': 'some_id'})
@patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_query_over_500_datasets(self, mock_portal):
count = 1001
# First, the query results. Then the "dataset_purge" None results
side_effects = [{'count': count, 'results': [{'id': 'id_1'}]},
{'count': count, 'results': [{'id': 'id_2'}]},
{'count': count, 'results': [{'id': 'id_3'}]},
None, None, None
]
mock_portal.return_value.call_action = MagicMock(
side_effect=side_effects)
remove_datasets_from_ckan('portal', 'key', organization='some_org')
for start in range(0, count, 500):
data_dict = {'q': 'organization:"some_org"',
'rows': 500, 'start': start}
mock_portal.return_value.call_action.assert_any_call(
'package_search', data_dict=data_dict)
for x in ['1', '2', '3']:
mock_portal.return_value.call_action.assert_any_call(
'dataset_purge', data_dict={'id': 'id_' + x})
@patch('pydatajson.federation.get_datasets')
@patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_remove_through_filters_and_organization(
self, mock_portal, mock_search):
filter_results = ['id_1', 'id_2']
org_results = [{'id': 'id_2'}, {'id': 'id_3'}]
mock_search.return_value = filter_results
mock_portal.return_value.call_action = MagicMock(
return_value={'count': 2, 'results': org_results})
remove_datasets_from_ckan(
'portal', 'key', only_time_series=True, organization='some_org')
mock_portal.return_value.call_action.assert_called_with(
'dataset_purge', data_dict={'id': 'id_2'})
@patch('pydatajson.federation.RemoteCKAN', autospec=True)
class PushThemeTestCase(FederationSuite):
@classmethod
def setUpClass(cls):
cls.catalog = pydatajson.DataJson(cls.get_sample('full_data.json'))
def test_empty_theme_search_raises_exception(self, mock_portal):
with self.assertRaises(AssertionError):
push_theme_to_ckan(self.catalog, 'portal_url', 'apikey')
def test_function_pushes_theme_by_identifier(self, mock_portal):
mock_portal.return_value.call_action = MagicMock(
return_value={'name': 'group_name'})
result = push_theme_to_ckan(
self.catalog,
'portal_url',
'apikey',
identifier='compras')
self.assertEqual('group_name', result)
def test_function_pushes_theme_by_label(self, mock_portal):
mock_portal.return_value.call_action = MagicMock(
return_value={'name': 'other_name'})
result = push_theme_to_ckan(
self.catalog,
'portal_url',
'apikey',
label='Adjudicaciones')
self.assertEqual('other_name', result)
def test_ckan_portal_is_called_with_correct_parametres(self, mock_portal):
mock_portal.return_value.call_action = MagicMock(
return_value={'name': u'contrataciones'})
group = {'name': u'contrataciones',
'title': u'Contrataciones',
'description': u'Datasets sobre contrataciones.'}
push_theme_to_ckan(
self.catalog,
'portal_url',
'apikey',
identifier='contrataciones')
mock_portal.return_value.call_action.assert_called_once_with(
'group_create', data_dict=group)
@patch('pydatajson.federation.RemoteCKAN', autospec=True)
class PushCatalogThemesTestCase(FederationSuite):
@classmethod
def setUpClass(cls):
cls.catalog = pydatajson.DataJson(cls.get_sample('full_data.json'))
def test_empty_portal_pushes_every_theme(self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'group_list':
return []
elif action == 'group_create':
return {'name': data_dict['name']}
else:
self.fail('should not be called')
mock_portal.return_value.call_action = mock_call_action
res_names = push_new_themes(self.catalog, 'portal_url', 'apikey')
try:
self.assertItemsEqual(
[theme['id'] for theme in self.catalog['themeTaxonomy']],
res_names)
except AttributeError:
self.assertCountEqual(
[theme['id'] for theme in self.catalog['themeTaxonomy']],
res_names)
def test_full_portal_pushes_nothing(self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'group_list':
return [theme['id'] for | |
<filename>HebbLearn.py<gh_stars>1-10
import sys
import os
import math
import numpy as np
from scipy import misc
import matplotlib.pyplot as plt
try:
import cv2
except:
print('cv2 not available')
pass
# Global parameters
# Nonlinearity options
LINEAR = 1
TANH = 2
DIVTANH = 3
# rgb2gray
# luminance preserving rgb2gray conversion
#
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.144])
# LoadImage
# Load an image as a numpy array
#
# file_name : directory of file to open
# returns : image as numpy array
def LoadImage(file_name):
return misc.imread(file_name)
# Load Video
# Load a video as a numpy array
#
# file_name : directory of video file to open
# returns : tensor of image (b&w)
def LoadVideo(fn):
cap = cv2.VideoCapture(fn)
num_frames = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
num_rows = np.shape(gray)[0]
num_cols = np.shape(gray)[1]
cap.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,0)
vid = np.zeros((num_rows, num_cols, num_frames))
for f in range(num_frames):
ret, frame = cap.read()
vid[:,:,f] = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cap.release()
return vid
# GenerateVideo
#
# Generates a video from a grayscale numpy array and saves to file
def GenerateVideo(raw, fn):
num_rows = np.shape(raw)[0]
num_cols = np.shape(raw)[1]
num_frames = np.shape(raw)[2]
fourcc = cv2.cv.CV_FOURCC(*'XVID')
out = cv2.VideoWriter(fn, 1196444237, 20, (num_rows, num_cols))
for f in range(num_frames):
#can't use normalized image for write
processed = (raw[:,:,f]*255.0).astype('uint8')
processed = np.repeat(processed,3,axis=1)
processed = processed.reshape(num_rows,num_cols,3)
out.write(processed)
out.release()
def DisplayFrames(video):
num_rows = np.shape(video)[0]
num_cols = np.shape(video)[1]
num_frames = np.shape(video)[2]
f = 0
plt.imshow(video[:,:,f], cmap=plt.get_cmap('gray'))
plt.show(block=False)
while(True):
f = 0
print("\033[A \033[A")
x = input("Press f: forward, b: back, q: quit : ")
if (x == "f"):
if ((f+1) < num_frames):
f = f+1
elif (x == "b"):
if ((f-1) >= 0):
f = f-1
elif (x == "q"):
break
else:
f = f
plt.imshow(video[:,:,f], cmap=plt.get_cmap('gray'))
plt.show(block=False)
# KD
# The Kronecker Delta Function - returns 1,
# if inputs are same, returns 0 otherwise
#
# m, n : input numbers to test
def KD(m, n):
if (m==n):
d=1;
else:
d=0;
return d
# FixedLinearGHA
#
# This class computes a linear generalized hebb
# algorithm - the weight matrix contains
# the weights for each center-surround.
class FixedLinearGHA():
def LinearGHA(self, in_vec, weights, out_vec, LR):
if (np.max(in_vec) == 0):
in_vec = in_vec+.0000001
if (np.max(out_vec) == 0):
out_vec = out_vec + 0.0000001
LT = np.tril(np.dot(out_vec, out_vec.T))
new_weights = weights + LR * (np.dot(out_vec, in_vec.T) - np.dot(LT,weights))
return new_weights/np.max(new_weights)
def ResizeImage(self, im, filter_size):
return im[0:(im.shape[0]-(im.shape[0]%filter_size)),0:(im.shape[1]-(im.shape[0]%filter_size))]
def InitializeWeights(self, im, filter_size, out_dimension):
num_rows = im.shape[0]/filter_size
num_cols = im.shape[1]/filter_size
return np.random.rand(out_dimension,(filter_size*filter_size),num_rows*num_cols)
def Train(self, raw_data, filter_size, out_dimension, LR):
sample_size = np.shape(raw_data)[2]
# crop video to correct dimensions
temp = self.ResizeImage(raw_data[:,:,0], filter_size)
data = np.zeros((np.shape(temp)[0],np.shape(temp)[1],sample_size))
for t in range(sample_size):
data[:,:,t] = self.ResizeImage(raw_data[:,:,t], filter_size)
num_rows = data.shape[0]/filter_size
num_cols = data.shape[1]/filter_size
weights = self.InitializeWeights(data[:,:,0], filter_size, out_dimension)
for f in range(sample_size):
w = 0
for c in range(num_cols-1):
for r in range(num_rows-1):
row_start = r*filter_size
row_end = (r+1)*filter_size
col_start = c*filter_size
col_end = (c+1)*filter_size
img = data[row_start:row_end, col_start:col_end, f]
in_vec = np.reshape(img,(filter_size*filter_size,1))
out_vec = self.GetOutput(in_vec, weights[:,:,w])
weights[:,:,w] = self.LinearGHA(in_vec, weights[:,:,w], out_vec, LR)
w = w+1
pc = ((f+.0)/sample_size)*100
sys.stdout.write("\rTraining is %f percent complete" % pc)
sys.stdout.flush()
return weights
def GetOutput(self, in_vec, weights):
return np.dot(weights, in_vec)
def ImageReconstruction(self, image, weights, filter_size):
# crop video to correct dimensions
image = self.ResizeImage(image, filter_size)
num_rows = image.shape[0]/filter_size
num_cols = image.shape[1]/filter_size
out_dimension = weights.shape[0]
output = np.zeros((image.shape[0],image.shape[1]))
w = 0
for c in range(num_cols):
for r in range(num_rows):
row_start = r*filter_size
row_end = (r+1)*filter_size
col_start = c*filter_size
col_end = (c+1)*filter_size
frame = image[row_start:row_end, col_start:col_end]
in_vec = np.reshape(frame,(filter_size*filter_size,1))
out_vec = np.dot(weights[:,:,w].T, np.dot(weights[:,:,w], in_vec))
output[row_start:row_end,col_start:col_end] = np.reshape(out_vec,(filter_size,filter_size))
w = w + 1
return output
# NonlinearGHA
#
# This class computes a nonlinear generalized hebb
# algorithm - the weight matrix contains
# the weights for each receptive-field.
class NonlinearGHA():
def GHA(self, in_vec, weights, out_vec, LR, nonlinearity):
if (np.max(in_vec) == 0):
in_vec = in_vec+.0000001
if (np.max(out_vec) == 0):
out_vec = out_vec + 0.0000001
LT = np.tril(np.dot(out_vec, out_vec.T))
new_weights = weights + LR * (np.dot(out_vec, in_vec.T) - np.dot(LT,weights))
return new_weights/np.max(new_weights)
def ResizeImage(self, im, filter_size):
return im[0:(im.shape[0]-(im.shape[0]%filter_size)),0:(im.shape[1]-(im.shape[0]%filter_size))]
def InitializeWeights(self, im, filter_size, step_size, out_dimension):
row_steps = ((im.shape[0] - filter_size)/step_size) + 1
col_steps = ((im.shape[1] - filter_size)/step_size) + 1
return np.random.rand(out_dimension,(filter_size*filter_size),row_steps*col_steps)
def GetOutput(self, in_vec, weights, nonlinearity):
if (nonlinearity == LINEAR):
return np.dot(weights, in_vec)
elif (nonlinearity == TANH):
return np.tanh(np.dot(weights, in_vec))
elif (nonlinearity == DIVTANH):
tmp = np.tanh(np.dot(weights, in_vec))/(1 + np.sum(np.square(np.tanh(np.dot(weights,in_vec)))))
return tmp
def Train(self, raw_data, filter_size, step_size, out_dimension, LR, nonlinearity):
sample_size = np.shape(raw_data)[2]
# crop video to correct dimensions
temp = self.ResizeImage(raw_data[:,:,0], filter_size)
data = np.zeros((np.shape(temp)[0],np.shape(temp)[1],sample_size))
for t in range(sample_size):
data[:,:,t] = self.ResizeImage(raw_data[:,:,t], filter_size)
row_steps = ((data.shape[0] - filter_size)/step_size) + 1
col_steps = ((data.shape[1] - filter_size)/step_size) + 1
weights = self.InitializeWeights(data[:,:,0], filter_size, step_size, out_dimension)
for f in range(sample_size):
w = 0
for c in range(col_steps):
for r in range(row_steps):
row_start = r*step_size
row_end = r*step_size + filter_size
col_start = c*step_size
col_end = c*step_size + filter_size
img = data[row_start:row_end, col_start:col_end, f]
in_vec = np.reshape(img,(filter_size*filter_size,1))
out_vec = self.GetOutput(in_vec, weights[:,:,w], nonlinearity)
weights[:,:,w] = self.GHA(in_vec, weights[:,:,w], out_vec, LR, nonlinearity)
w = w+1
pc = ((f+.0)/sample_size)*100
sys.stdout.write("\rTraining is %f percent complete" % pc)
sys.stdout.flush()
return weights
def ImageReconstruction(self, image, weights, filter_size, step_size, nonlinearity):
# crop video to correct dimensions
image = self.ResizeImage(image, filter_size)
row_steps = ((image.shape[0] - filter_size)/step_size) + 1
col_steps = ((image.shape[1] - filter_size)/step_size) + 1
out_dimension = weights.shape[0]
output = np.zeros((image.shape[0],image.shape[1]))
w = 0
for c in range(col_steps):
for r in range(row_steps):
row_start = r*step_size
row_end = r*step_size + filter_size
col_start = c*step_size
col_end = c*step_size + filter_size
frame = image[row_start:row_end, col_start:col_end]
in_vec = np.reshape(frame,(filter_size*filter_size,1))
if (nonlinearity == LINEAR):
out_vec = np.dot(weights[:,:,w].T, np.dot(weights[:,:,w], in_vec))
elif (nonlinearity == TANH):
out_vec = np.tanh(np.dot(weights[:,:,w].T, np.dot(weights[:,:,w], in_vec)))
elif (nonlinearity == DIVTANH):
out_vec = np.tanh(np.dot(weights[:,:,w].T, np.dot(weights[:,:,w], in_vec)))/(1 + np.sum(np.square(np.tanh(np.dot(weights[:,:,w].T,np.dot(weights[:,:,w],in_vec))))))
output[row_start:row_end,col_start:col_end] = output[row_start:row_end, col_start:col_end] + np.reshape(out_vec,(filter_size,filter_size))
w = w + 1
return output
# MultilayerGHA
#
# Initialize with the number of layers.
# All of the following parameters are arrays with length equal to num_layers
#
class MultilayerGHA():
def __init__(self, num_layers=1, filter_size=6, step_size=3, out_dim=10, LR=1, nonlinearity=TANH):
# make sure everything lines up
if (num_layers != len(filter_size)):
print('filter_size must be array with length num_layers')
elif (num_layers != len(step_size)):
print('step_size must be array with length num_layers')
elif (num_layers != len(out_dim)):
print('out_dim must be array with length num_layers')
elif (num_layers != len(LR)):
print('LR must be array with length num_layers')
elif (num_layers != len(nonlinearity)):
print('nonlinearity must be array with length num_layers')
# set up data structure
self.layers = []
for l in range(num_layers):
self.layers.append({'layer': l, 'filter_size': filter_size[l], 'step_size': step_size[l], 'out_dim': out_dim[l], 'LR': LR[l], 'nonlinearity': nonlinearity[l]})
# Train
# Train with the initialized layer structure
#
def Train(self, data):
input_features = data
for l in range(len(self.layers)):
fs = self.layers[l]['filter_size']
ss = self.layers[l]['step_size']
od = self.layers[l]['out_dim']
lr = self.layers[l]['LR']
nl = self.layers[l]['nonlinearity']
print('==> Training Layer ' + str(l))
weights = self.TrainLayer(data, fs, ss, od, lr, nl)
self.layers[l]['weights'] = weights
print('')
print('==> Calculating Output of Layer ' + str(l))
input_features = self.GetLayerOutput(input_features, weights, fs, ss, nl)
pop_mean = np.mean(input_features)
input_features = input_features - pop_mean
pop_std = np.std(input_features)
input_features = input_features/pop_std
return self.layers, input_features
# ImageReconstruction
#
def ImageReconstruction(self, image, layers):
input_features = image
for l in range(len(layers)):
fs = layers[l]['filter_size']
ss = layers[l]['step_size']
od = layers[l]['out_dim']
nl = layers[l]['nonlinearity']
weights = layers[l]['weights']
input_features = self.GetLayerOutput(input_features, weights, fs, ss, nl)
return input_features
# GHA
#
def GHA(self, in_vec, weights, out_vec, LR, nonlinearity):
if (np.max(in_vec) == 0):
in_vec = in_vec+.0000001
if (np.max(out_vec) == 0):
out_vec = out_vec + 0.0000001
LT = np.tril(np.dot(out_vec, out_vec.T))
if (nonlinearity == LINEAR):
new_weights = weights + LR * (np.dot(out_vec, in_vec.T) - np.dot(LT,weights))
elif (nonlinearity == TANH):
#new_weights = weights + LR * np.tanh((np.dot(out_vec, in_vec.T) - np.dot(LT,weights)))
new_weights = weights + LR * (np.dot(out_vec, in_vec.T) - np.dot(LT,weights))
return new_weights/np.max(new_weights)
# ResizeImage
#
def ResizeImage(self, im, filter_size):
return im[0:(im.shape[0]-(im.shape[0]%filter_size)),0:(im.shape[1]-(im.shape[0]%filter_size))]
# initializeWeights
#
def InitializeWeights(self, im, filter_size, step_size, out_dimension):
row_steps = ((im.shape[0] - filter_size)/step_size) + 1
col_steps = ((im.shape[1] - filter_size)/step_size) + 1
return np.random.rand(out_dimension,(filter_size*filter_size),row_steps*col_steps)
# GetOutput
#
def GetOutput(self, in_vec, weights, nonlinearity):
if (nonlinearity == LINEAR):
return np.dot(weights, in_vec)
elif (nonlinearity == TANH):
return np.tanh(np.dot(weights, in_vec))
#GetLayerOutput
#
def GetLayerOutput(self, input, weights, filter_size, step_size, nonlinearity):
try:
sample_size = np.shape(input)[2]
temp = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.