input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
# util.py
from __future__ import print_function
from collections import Mapping, OrderedDict
import datetime
import itertools
import random
import warnings
import pandas as pd
np = pd.np
from scipy import integrate
from matplotlib import pyplot as plt
import seaborn
from scipy.optimize import minimize
from scipy.signal import correlate
from titlecase import titlecase
from pug.nlp.util import listify, fuzzy_get, make_timestamp
def dropna(x):
"""Delete all NaNs and and infinities in a sequence of real values
Returns:
list: Array of all values in x that are between -inf and +inf, exclusive
"""
return [x_i for x_i in listify(x) if float('-inf') < x_i < float('inf')]
def rms(x):
""""Root Mean Square"
Arguments:
x (seq of float): A sequence of numerical values
Returns:
The square root of the average of the squares of the values
math.sqrt(sum(x_i**2 for x_i in x) / len(x))
or
return (np.array(x) ** 2).mean() ** 0.5
>>> rms([0, 2, 4, 4])
3.0
"""
try:
return (np.array(x) ** 2).mean() ** 0.5
except:
x = np.array(dropna(x))
invN = 1.0 / len(x)
return (sum(invN * (x_i ** 2) for x_i in x)) ** .5
def rmse(target, prediction, relative=False, percent=False):
"""Root Mean Square Error
This seems like a simple formula that you'd never need to create a function for.
But my mistakes on coding challenges have convinced me that I do need it,
as a reminder of important tweaks, if nothing else.
>>> rmse([0, 1, 4, 3], [2, 1, 0, -1])
3.0
>>> rmse([0, 1, 4, 3], [2, 1, 0, -1], relative=True) # doctest: +ELLIPSIS
1.2247...
>>> rmse([0, 1, 4, 3], [2, 1, 0, -1], percent=True) # doctest: +ELLIPSIS
122.47...
"""
relative = relative or percent
prediction = pd.np.array(prediction)
target = np.array(target)
err = prediction - target
if relative:
denom = target
# Avoid ZeroDivisionError: divide by prediction rather than target where target==0
denom[denom == 0] = prediction[denom == 0]
# If the prediction and target are both 0, then the error is 0 and should be included in the RMSE
# Otherwise, the np.isinf() below would remove all these zero-error predictions from the array.
denom[(denom == 0) & (target == 0)] = 1
err = (err / denom)
err = err[(~ np.isnan(err)) & (~ np.isinf(err))]
return 100 * rms(err) if percent else rms(err)
def blended_rolling_apply(series, window=2, fun=pd.np.mean):
new_series = pd.Series(np.fromiter((fun(series[:i + 1]) for i in range(window - 1)),
type(series.values[0])), index=series.index[:window - 1]).append(
pd.rolling_apply(series.copy(), window, fun)[window - 1:])
assert len(series) == len(new_series), (
"blended_rolling_apply should always return a series of the same length!\n"
" len(series) = {0} != {1} = len(new_series".format(len(series), len(new_series)))
assert not any(np.isnan(val) or val is None for val in new_series)
return new_series
def rolling_latch(series, period=31, decay=1.0):
# FIXME: implement recursive exponential decay filter rather than the nonrecursive, deratring done here
return blended_rolling_apply(series, period, lambda val: decay * pd.np.max(val))
def clean_dataframe(df):
"""Fill NaNs with the previous value, the next value or if all are NaN then 1.0"""
df = df.fillna(method='ffill')
df = df.fillna(0.0)
return df
def clean_dataframes(dfs):
"""Fill NaNs with the previous value, the next value or if all are NaN then 1.0
TODO:
Linear interpolation and extrapolation
Arguments:
dfs (list of dataframes): list of dataframes that contain NaNs to be removed
Returns:
list of dataframes: list of dataframes with NaNs replaced by interpolated values
"""
if isinstance(dfs, (list)):
for df in dfs:
df = clean_dataframe(df)
return dfs
else:
return [clean_dataframe(dfs)]
def get_symbols_from_list(list_name):
"""Retrieve a named (symbol list name) list of strings (symbols)
If you've installed the QSTK Quantitative analysis toolkit
`get_symbols_from_list('sp5002012')` will produce a list of the symbols that
were members of the S&P 500 in 2012.
Otherwise an import error exception will be raised.
If the symbol list cannot be found you'll get an empty list returned
Example:
>> len(get_symbols_from_list('sp5002012')) in (0, 501)
True
"""
try:
# quant software toolkit has a method for retrieving lists of symbols like S&P500 for 2012 with 'sp5002012'
import QSTK.qstkutil.DataAccess as da
dataobj = da.DataAccess('Yahoo')
except ImportError:
raise
except:
return []
try:
return dataobj.get_symbols_from_list(list_name)
except:
raise
def make_symbols(symbols, *args):
"""Return a list of uppercase strings like "GOOG", "$SPX, "XOM"...
Arguments:
symbols (str or list of str): list of market ticker symbols to normalize
If `symbols` is a str a get_symbols_from_list() call is used to retrieve the list of symbols
Returns:
list of str: list of cananical ticker symbol strings (typically after .upper().strip())
See Also:
pug.dj.db.normalize_names
Examples:
>>> make_symbols("Goog")
['GOOG']
>>> make_symbols(" $SPX ", " aaPL ")
['$SPX', 'AAPL']
>>> make_symbols(["$SPX", ["GOOG", "AAPL"]])
['GOOG', 'AAPL', '$SPX']
>>> make_symbols(" $Spy, Goog, aAPL ")
['$SPY', 'GOOG', 'AAPL']
"""
if (hasattr(symbols, '__iter__') and not any(symbols)) \
or (isinstance(symbols, (list, tuple, Mapping)) and not symbols):
return []
if isinstance(symbols, basestring):
# # FIXME: find a direct API for listing all possible symbols
# try:
# return list(set(dataobj.get_symbols_from_list(symbols)))
# except:
return [s.upper().strip() for s in (symbols.split(',') + list(str(a) for a in args))]
else:
ans = []
for sym in (list(symbols) + list(args)):
tmp = make_symbols(sym)
ans = ans + tmp
return list(set(ans))
def make_time_series(x, t=pd.Timestamp(datetime.datetime(1970, 1, 1)), freq=None):
"""Convert a 2-D array of time/value pairs (or pair of time/value vectors) into a pd.Series time-series
>>> make_time_series(range(3), freq='15min') # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
1970-01-01 00:00:00 NaN
1970-01-01 00:15:00 NaN
1970-01-01 00:30:00 NaN
dtype: float64
"""
if isinstance(x, pd.DataFrame):
x = pd.Series(x[x.columns[0]])
elif not isinstance(x, pd.Series) and (not isinstance(t, (pd.Series, pd.Index, list, tuple)) or not len(t)):
#warnings.warn("Coercing a non-Series")
if len(x) == 2:
t, x = listify(x[0]), listify(x[1])
elif len(x) >= 2:
try:
t, x = zip(*x)
except (ValueError, IndexError, TypeError):
pass
x = pd.Series(x)
else:
if isinstance(t, (datetime.datetime, pd.Timestamp)):
t = pd.Timestamp(t)
else:
x = pd.Series(listify(x), index=listify(t))
if not isinstance(x, pd.Series):
raise TypeError("`pug.invest.util.make_time_series(x, t)` expects x to be a type that"
" can be coerced to a Series object, but it's type is: {0}"
.format(type(x)))
# By this point x must be a Series, only question is whether its index needs to be converted to a DatetimeIndex
if x.index[0] != 0 and isinstance(x.index[0], (datetime.date, datetime.datetime, pd.Timestamp,
basestring, float, np.int64, int)):
t = x.index
elif isinstance(t, (datetime.date, datetime.datetime, pd.Timestamp, basestring, float, np.int64, int)):
if not freq:
freq = '15min'
warnings.warn('Assumed time series freq to be {0} though no freq argument was provided!'
.format(freq), RuntimeWarning)
t = pd.date_range(t, periods=len(x), freq=freq)
x = pd.Series(x, index=t)
if isinstance(x, pd.Series):
x.index = pd.DatetimeIndex(x.index.values)
return x
def pandas_mesh(df):
"""Create numpy 2-D "meshgrid" from 3+ columns in a Pandas DataFrame
Arguments:
df (DataFrame): Must have 3 or 4 columns of numerical data
Returns:
OrderedDict: column labels from the data frame are the keys, values are 2-D matrices
All matrices have shape NxM, where N = len(set(df.iloc[:,0])) and M = len(set(df.iloc[:,1]))
>>> pandas_mesh(pd.DataFrame(np.arange(18).reshape(3,6),
... columns=list('ABCDEF'))).values() # doctest: +NORMALIZE_WHITESPACE
[array([[ 0, 6, 12],
[ 0, 6, 12],
[ 0, 6, 12]]),
array([[ 1, 1, 1],
[ 7, 7, 7],
[13, 13, 13]]),
array([[ 2., nan, nan],
[ nan, 8., nan],
[ nan, nan, 14.]]),
array([[ 3., nan, nan],
[ nan, 9., nan],
[ nan, nan, 15.]]),
array([[ 4., nan, nan],
[ nan, 10., nan],
[ nan, nan, 16.]]),
array([[ 5., nan, nan],
[ nan, 11., nan],
[ nan, nan, 17.]])]
"""
xyz = [df[c].values for c in df.columns]
index = pd.MultiIndex.from_tuples(zip(xyz[0], xyz[1]), names=['x', 'y'])
# print(index)
series = [pd.Series(values, index=index) for values in xyz[2:]]
# print(series)
X, Y = np.meshgrid(sorted(list(set(xyz[0]))), sorted(list(set(xyz[1]))))
N, M = X.shape
Zs = []
# print(Zs)
for k, s in enumerate(series):
Z = np.empty(X.shape)
Z[:] = np.nan
for i, j in itertools.product(range(N), range(M)):
Z[i, j] = s.get((X[i, j], Y[i, j]), np.NAN)
Zs += [Z]
return OrderedDict((df.columns[i], m) for i, m in enumerate([X, Y] + Zs))
def integrated_change(ts, integrator=integrate.trapz, clip_floor=None, clip_ceil=float('inf')):
"""Total value * time above the starting value within a TimeSeries"""
integrator = get_integrator(integrator)
if clip_floor is None:
clip_floor = ts[0]
if clip_ceil < clip_floor:
polarity = -1
offset, clip_floor, clip_ceil, = clip_ceil, clip_ceil, clip_floor
else:
polarity, offset = 1, clip_floor
clipped_values = np.clip(ts.values - offset, clip_floor, clip_ceil)
print(polarity, offset, clip_floor, clip_ceil)
print(clipped_values)
integrator_types = set(['trapz', 'cumtrapz', 'simps', 'romb'])
if integrator in integrator_types:
integrator = getattr(integrate, integrator)
integrator = integrator | |
dev.wait_event(['FST-EVENT-IFACE'], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-IFACE detached (AP) for " + ifname)
for t in [ "detached", "ifname=" + ifname, "group=" + group ]:
if t not in ev:
raise Exception("Unexpected FST-EVENT-IFACE data (AP): " + ev)
def fst_detach_sta(dev, ifname, group):
dev.dump_monitor()
if "OK" not in dev.global_request("FST-DETACH " + ifname):
raise Exception("FST-DETACH (STA) failed for " + ifname)
ev = dev.wait_global_event(['FST-EVENT-IFACE'], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-IFACE detached (STA) for " + ifname)
for t in [ "detached", "ifname=" + ifname, "group=" + group ]:
if t not in ev:
raise Exception("Unexpected FST-EVENT-IFACE data (STA): " + ev)
def fst_wait_event_peer_ap(dev, event, ifname, addr):
ev = dev.wait_event(['FST-EVENT-PEER'], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-PEER connected (AP)")
for t in [ " " + event + " ", "ifname=" + ifname, "peer_addr=" + addr ]:
if t not in ev:
raise Exception("Unexpected FST-EVENT-PEER data (AP): " + ev)
def fst_wait_event_peer_sta(dev, event, ifname, addr):
ev = dev.wait_global_event(['FST-EVENT-PEER'], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-PEER connected (STA)")
for t in [ " " + event + " ", "ifname=" + ifname, "peer_addr=" + addr ]:
if t not in ev:
raise Exception("Unexpected FST-EVENT-PEER data (STA): " + ev)
def fst_setup_req(dev, hglobal, freq, dst, req, stie, mbie="", no_wait=False):
act = req + stie + mbie
dev.request("MGMT_TX %s %s freq=%d action=%s" % (dst, dst, freq, act))
ev = dev.wait_event(['MGMT-TX-STATUS'], timeout=5)
if ev is None or "result=SUCCESS" not in ev:
raise Exception("FST Action frame not ACKed")
if no_wait:
return
while True:
ev = hglobal.wait_event(['FST-EVENT-SESSION'], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION (AP)")
if "new_state=SETUP_COMPLETION" in ev:
break
def fst_start_and_connect(apdev, group, sgroup):
hglobal = hostapd.HostapdGlobal()
if "OK" not in hglobal.request("FST-MANAGER TEST_REQUEST IS_SUPPORTED"):
raise HwsimSkip("No FST testing support")
params = { "ssid": "fst_11a", "hw_mode": "a", "channel": "36",
"country_code": "US" }
hapd = hostapd.add_ap(apdev[0], params)
fst_attach_ap(hglobal, apdev[0]['ifname'], group)
cmd = "FST-ATTACH %s %s" % (apdev[0]['ifname'], group)
if "FAIL" not in hglobal.request(cmd):
raise Exception("Duplicated FST-ATTACH (AP) accepted")
params = { "ssid": "fst_11g", "hw_mode": "g", "channel": "1",
"country_code": "US" }
hapd2 = hostapd.add_ap(apdev[1], params)
fst_attach_ap(hglobal, apdev[1]['ifname'], group)
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5")
fst_attach_sta(wpas, wpas.ifname, sgroup)
wpas.interface_add("wlan6", set_ifname=False)
wpas2 = WpaSupplicant(ifname="wlan6")
fst_attach_sta(wpas, wpas2.ifname, sgroup)
wpas.connect("fst_11a", key_mgmt="NONE", scan_freq="5180",
wait_connect=False)
wpas.wait_connected()
fst_wait_event_peer_sta(wpas, "connected", wpas.ifname, apdev[0]['bssid'])
fst_wait_event_peer_ap(hglobal, "connected", apdev[0]['ifname'],
wpas.own_addr())
wpas2.connect("fst_11g", key_mgmt="NONE", scan_freq="2412",
wait_connect=False)
wpas2.wait_connected()
fst_wait_event_peer_sta(wpas, "connected", wpas2.ifname, apdev[1]['bssid'])
fst_wait_event_peer_ap(hglobal, "connected", apdev[1]['ifname'],
wpas2.own_addr())
return hglobal, wpas, wpas2, hapd, hapd2
def test_fst_test_setup(dev, apdev, test_params):
"""FST setup using separate commands"""
try:
_test_fst_test_setup(dev, apdev, test_params)
finally:
subprocess.call(['iw', 'reg', 'set', '00'])
dev[0].flush_scan_cache()
dev[1].flush_scan_cache()
def _test_fst_test_setup(dev, apdev, test_params):
group = "fstg0b"
sgroup = "fstg1b"
hglobal, wpas, wpas2, hapd, hapd2 = fst_start_and_connect(apdev, group, sgroup)
sid = wpas.global_request("FST-MANAGER SESSION_ADD " + sgroup).strip()
if "FAIL" in sid:
raise Exception("FST-MANAGER SESSION_ADD (STA) failed")
fst_session_set(wpas, sid, "old_ifname", wpas.ifname)
fst_session_set(wpas, sid, "old_peer_addr", apdev[0]['bssid'])
fst_session_set(wpas, sid, "new_ifname", wpas2.ifname)
fst_session_set(wpas, sid, "new_peer_addr", apdev[1]['bssid'])
if "OK" not in wpas.global_request("FST-MANAGER SESSION_INITIATE " + sid):
raise Exception("FST-MANAGER SESSION_INITIATE failed")
while True:
ev = hglobal.wait_event(['FST-EVENT-SESSION'], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION (AP)")
if "new_state=SETUP_COMPLETION" in ev:
f = re.search("session_id=(\d+)", ev)
if f is None:
raise Exception("No session_id in FST-EVENT-SESSION")
sid_ap = f.group(1)
cmd = "FST-MANAGER SESSION_RESPOND %s accept" % sid_ap
if "OK" not in hglobal.request(cmd):
raise Exception("FST-MANAGER SESSION_RESPOND failed on AP")
break
ev = wpas.wait_global_event(["FST-EVENT-SESSION"], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION")
if "new_state=SETUP_COMPLETION" not in ev:
raise Exception("Unexpected FST-EVENT-SESSION data: " + ev)
ev = wpas.wait_global_event(["FST-EVENT-SESSION"], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION")
if "event_type=EVENT_FST_ESTABLISHED" not in ev:
raise Exception("Unexpected FST-EVENT-SESSION data: " + ev)
cmd = "FST-MANAGER SESSION_REMOVE " + sid
if "OK" not in wpas.global_request(cmd):
raise Exception("FST-MANAGER SESSION_REMOVE failed")
ev = wpas.wait_global_event(["FST-EVENT-SESSION"], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION")
if "new_state=INITIAL" not in ev:
raise Exception("Unexpected FST-EVENT-SESSION data (STA): " + ev)
ev = hglobal.wait_event(['FST-EVENT-SESSION'], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION (AP)")
if "new_state=INITIAL" not in ev:
raise Exception("Unexpected FST-EVENT-SESSION data (AP): " + ev)
if "FAIL" not in wpas.global_request(cmd):
raise Exception("Duplicated FST-MANAGER SESSION_REMOVE accepted")
hglobal.request("FST-MANAGER SESSION_REMOVE " + sid_ap)
wpas.request("DISCONNECT")
wpas.wait_disconnected()
fst_wait_event_peer_sta(wpas, "disconnected", wpas.ifname,
apdev[0]['bssid'])
fst_wait_event_peer_ap(hglobal, "disconnected", apdev[0]['ifname'],
wpas.own_addr())
wpas2.request("DISCONNECT")
wpas2.wait_disconnected()
fst_wait_event_peer_sta(wpas, "disconnected", wpas2.ifname,
apdev[1]['bssid'])
fst_wait_event_peer_ap(hglobal, "disconnected", apdev[1]['ifname'],
wpas2.own_addr())
fst_detach_ap(hglobal, apdev[0]['ifname'], group)
if "FAIL" not in hglobal.request("FST-DETACH " + apdev[0]['ifname']):
raise Exception("Duplicated FST-DETACH (AP) accepted")
hapd.disable()
fst_detach_ap(hglobal, apdev[1]['ifname'], group)
hapd2.disable()
fst_detach_sta(wpas, wpas.ifname, sgroup)
fst_detach_sta(wpas, wpas2.ifname, sgroup)
def test_fst_setup_mbie_diff(dev, apdev, test_params):
"""FST setup and different MBIE in FST Setup Request"""
try:
_test_fst_setup_mbie_diff(dev, apdev, test_params)
finally:
subprocess.call(['iw', 'reg', 'set', '00'])
dev[0].flush_scan_cache()
dev[1].flush_scan_cache()
def _test_fst_setup_mbie_diff(dev, apdev, test_params):
group = "fstg0c"
sgroup = "fstg1c"
hglobal, wpas, wpas2, hapd, hapd2 = fst_start_and_connect(apdev, group, sgroup)
# FST Setup Request: Category, FST Action, Dialog Token (non-zero),
# LLT (32 bits, see 10.32), Session Transition (see 8.4.2.147),
# Multi-band element (optional, see 8.4.2.140)
# Session Transition: EID, Len, FSTS ID(4), Session Control,
# New Band (Band ID, Setup, Operation), Old Band (Band ID, Setup, Operation)
# Multi-band element: EID, Len, Multi-band Control, Band ID,
# Operating Class, Channel Number, BSSID (6), Beacon Interval (2),
# TSF Offset (8), Multi-band Connection Capability, FSTSessionTimeOut,
# STA MAC Address (6, optional), Pairwise Cipher Suite Count (2, optional),
# Pairwise Cipher Suite List (4xm, optional)
# MBIE with the non-matching STA MAC Address:
req = "1200011a060000"
stie = "a40b0100000000020001040001"
mbie = "9e1c0c0200010200000004000000000000000000000000ff0200000006ff"
fst_setup_req(wpas, hglobal, 5180, apdev[0]['bssid'], req, stie, mbie)
# MBIE without the STA MAC Address:
req = "1200011a060000"
stie = "a40b0100000000020001040001"
mbie = "9e16040200010200000004000000000000000000000000ff"
fst_setup_req(wpas, hglobal, 5180, apdev[0]['bssid'], req, stie, mbie)
# MBIE with unsupported STA Role:
req = "1200011a060000"
stie = "a40b0100000000020001040001"
mbie = "9e16070200010200000004000000000000000000000000ff"
fst_setup_req(wpas, hglobal, 5180, apdev[0]['bssid'], req, stie, mbie)
# MBIE with unsupported Band ID:
req = "1200011a060000"
stie = "a40b0100000000020001040001"
mbie = "9e1604ff00010200000004000000000000000000000000ff"
fst_setup_req(wpas, hglobal, 5180, apdev[0]['bssid'], req, stie, mbie)
# FST Setup Request without MBIE (different FSTS ID):
req = "1200011a060000"
stie = "a40b0200000000020001040001"
fst_setup_req(wpas, hglobal, 5180, apdev[0]['bssid'], req, stie)
# MBIE update OOM on AP
req = "1200011a060000"
stie = "a40b0100000000020001040001"
mbie = "9e16040200010200000004000000000000000000000000ff"
try:
with alloc_fail(hapd, 1, "mb_ies_by_info"):
fst_setup_req(wpas, hglobal, 5180, apdev[0]['bssid'], req, stie,
mbie, no_wait=True)
except HwsimSkip, e:
# Skip exception to allow proper cleanup
pass
# Remove sessions to avoid causing issues to following test ases
s = hglobal.request("FST-MANAGER LIST_SESSIONS " + group)
if not s.startswith("FAIL"):
for sid in s.split(' '):
if len(sid):
hglobal.request("FST-MANAGER SESSION_REMOVE " + sid)
def test_fst_many_setup(dev, apdev, test_params):
"""FST setup multiple times"""
try:
_test_fst_many_setup(dev, apdev, test_params)
finally:
subprocess.call(['iw', 'reg', 'set', '00'])
dev[0].flush_scan_cache()
dev[1].flush_scan_cache()
def _test_fst_many_setup(dev, apdev, test_params):
group = "fstg0d"
sgroup = "fstg1d"
hglobal, wpas, wpas2, hapd, hapd2 = fst_start_and_connect(apdev, group, sgroup)
sid = wpas.global_request("FST-MANAGER SESSION_ADD " + sgroup).strip()
if "FAIL" in sid:
raise Exception("FST-MANAGER SESSION_ADD (STA) failed")
fst_session_set(wpas, sid, "old_ifname", wpas.ifname)
fst_session_set(wpas, sid, "old_peer_addr", apdev[0]['bssid'])
fst_session_set(wpas, sid, "new_ifname", wpas2.ifname)
fst_session_set(wpas, sid, "new_peer_addr", apdev[1]['bssid'])
for i in range(257):
if "OK" not in wpas.global_request("FST-MANAGER SESSION_INITIATE " + sid):
raise Exception("FST-MANAGER SESSION_INITIATE failed")
while True:
ev = hglobal.wait_event(['FST-EVENT-SESSION'], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION (AP)")
if "new_state=SETUP_COMPLETION" in ev:
f = re.search("session_id=(\d+)", ev)
if f is None:
raise Exception("No session_id in FST-EVENT-SESSION")
sid_ap = f.group(1)
cmd = "FST-MANAGER SESSION_RESPOND %s accept" % sid_ap
if "OK" not in hglobal.request(cmd):
raise Exception("FST-MANAGER SESSION_RESPOND failed on AP")
break
ev = wpas.wait_global_event(["FST-EVENT-SESSION"], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION (STA)")
if "new_state=SETUP_COMPLETION" not in ev:
raise Exception("Unexpected FST-EVENT-SESSION data: " + ev)
ev = wpas.wait_global_event(["FST-EVENT-SESSION"], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION (STA)")
if "event_type=EVENT_FST_ESTABLISHED" not in ev:
raise Exception("Unexpected FST-EVENT-SESSION data: " + ev)
if "OK" not in wpas.global_request("FST-MANAGER SESSION_TEARDOWN " + sid):
raise Exception("FST-MANAGER SESSION_INITIATE failed")
if i == 0:
if "FAIL" not in wpas.global_request("FST-MANAGER SESSION_TEARDOWN " + sid):
raise Exception("Duplicate FST-MANAGER SESSION_TEARDOWN accepted")
ev = wpas.wait_global_event(["FST-EVENT-SESSION"], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION (STA teardown -->initial)")
if "new_state=INITIAL" not in ev:
raise Exception("Unexpected FST-EVENT-SESSION data (STA): " + ev)
ev = hglobal.wait_event(['FST-EVENT-SESSION'], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION (AP teardown -->initial)")
if "new_state=INITIAL" not in ev:
raise Exception("Unexpected FST-EVENT-SESSION | |
(troop_set_slot, "trp_temp_troop", 3, -1),
(multiplayer_send_3_int_to_server, multiplayer_event_coop_send_to_server, coop_event_player_remove_selected_item, 3, ":item_remove"),
(presentation_set_duration, 0),
(assign, "$g_presentation_state", 0),
(start_presentation, "prsnt_coop_item_select"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_5"),
# (store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 4),
# (player_get_slot, ":item_remove", ":my_player_no", ":selected_item_index"),
# (player_set_slot, ":my_player_no", ":selected_item_index", -1),
(troop_get_slot, ":item_remove", "trp_temp_troop", 4),
(troop_set_slot, "trp_temp_troop", 4, -1),
(multiplayer_send_3_int_to_server, multiplayer_event_coop_send_to_server, coop_event_player_remove_selected_item, 4, ":item_remove"),
(presentation_set_duration, 0),
(assign, "$g_presentation_state", 0),
(start_presentation, "prsnt_coop_item_select"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_6"),
# (store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 5),
# (player_get_slot, ":item_remove", ":my_player_no", ":selected_item_index"),
# (player_set_slot, ":my_player_no", ":selected_item_index", -1),
(troop_get_slot, ":item_remove", "trp_temp_troop", 5),
(troop_set_slot, "trp_temp_troop", 5, -1),
(multiplayer_send_3_int_to_server, multiplayer_event_coop_send_to_server, coop_event_player_remove_selected_item, 5, ":item_remove"),
(presentation_set_duration, 0),
(assign, "$g_presentation_state", 0),
(start_presentation, "prsnt_coop_item_select"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_7"),
# (store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 6),
# (player_get_slot, ":item_remove", ":my_player_no", ":selected_item_index"),
# (player_set_slot, ":my_player_no", ":selected_item_index", -1),
(troop_get_slot, ":item_remove", "trp_temp_troop", 6),
(troop_set_slot, "trp_temp_troop", 6, -1),
(multiplayer_send_3_int_to_server, multiplayer_event_coop_send_to_server, coop_event_player_remove_selected_item, 6, ":item_remove"),
(presentation_set_duration, 0),
(assign, "$g_presentation_state", 0),
(start_presentation, "prsnt_coop_item_select"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_8"),
# (store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 7),
# (player_get_slot, ":item_remove", ":my_player_no", ":selected_item_index"),
# (player_set_slot, ":my_player_no", ":selected_item_index", -1),
(troop_get_slot, ":item_remove", "trp_temp_troop", 7),
(troop_set_slot, "trp_temp_troop", 7, -1),
(multiplayer_send_3_int_to_server, multiplayer_event_coop_send_to_server, coop_event_player_remove_selected_item, 7, ":item_remove"),
(presentation_set_duration, 0),
(assign, "$g_presentation_state", 0),
(start_presentation, "prsnt_coop_item_select"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_9"),
# (eq, "$g_horses_are_avaliable", 1),
# (store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 8),
# (player_get_slot, ":item_remove", ":my_player_no", ":selected_item_index"),
# (player_set_slot, ":my_player_no", ":selected_item_index", -1),
(troop_get_slot, ":item_remove", "trp_temp_troop", 8),
(troop_set_slot, "trp_temp_troop", 8, -1),
(multiplayer_send_3_int_to_server, multiplayer_event_coop_send_to_server, coop_event_player_remove_selected_item, 8, ":item_remove"),
(presentation_set_duration, 0),
(assign, "$g_presentation_state", 0),
(start_presentation, "prsnt_coop_item_select"),
(try_end),
(else_try),
(gt, "$g_presentation_state", 0),
(presentation_set_duration, 0),
(assign, "$g_presentation_state", 0),
(start_presentation, "prsnt_coop_item_select"),
(try_end),
(try_end),
(else_try),
(assign, "$g_close_equipment_selection", 0),
(presentation_set_duration, 0),
(try_end),
]),
(ti_on_presentation_run,
[(store_trigger_param_1, ":cur_time"),
(try_begin),
(eq, "$g_close_equipment_selection", 0),
(try_begin),
(key_clicked, key_escape),
(try_begin),
(neq, "$g_current_opened_item_details", -1),
(close_item_details),
(assign, "$g_current_opened_item_details", -1),
(try_end),
(gt, ":cur_time", 200),
(presentation_set_duration, 0),
(try_end),
(else_try),
(assign, "$g_close_equipment_selection", 0),
#daha sonra buraya siege modundaysa ve takimini yeni degistirdigi icin spawn olamiyorsa start_presentation, spawn_counter satirini ekle sdsd.
(presentation_set_duration, 0),
(try_end),
]),
]),
##Doghotel begin
# ("doghotel_configure", prsntf_manual_end_only, mesh_pic_bandits,
# [
# (ti_on_presentation_load,
# [
# (set_fixed_point_multiplier, 1000),
# (presentation_set_duration, 999999),
# (assign, "$g_doghotel_prsnt_configure_close", 0),
# (set_container_overlay, -1),
# (create_mesh_overlay, "$g_presentation_obj_doghotel_42", "mesh_mp_score_b"),
# (overlay_set_alpha, "$g_presentation_obj_doghotel_42", 0xFF),
# (overlay_set_color, "$g_presentation_obj_doghotel_42", 0xFFFFFF),
# (position_set_x, pos1, 788),
# (position_set_y, pos1, 1064),
# (overlay_set_size, "$g_presentation_obj_doghotel_42", pos1),
# (position_set_x, pos1, 20),
# (position_set_y, pos1, 80),
# (overlay_set_position, "$g_presentation_obj_doghotel_42", pos1),
# (str_clear, s0),
# (create_text_overlay, reg0, s0, tf_scrollable_style_2),
# (position_set_x, pos1, 0),
# (position_set_y, pos1, -200),
# (overlay_set_position, reg0, pos1),
# (position_set_x, pos1, 880),
# (position_set_y, pos1, 888),
# (overlay_set_size, reg0, pos1),
# (overlay_set_area_size, reg0, pos1),
# (set_container_overlay, reg0),
# (create_text_overlay, "$g_presentation_obj_doghotel_1", "str_doghotel_config_shortcut_key", tf_right_align|tf_with_outline),
# (position_set_x, pos1, 635),
# (position_set_y, pos1, 660),
# (overlay_set_position, "$g_presentation_obj_doghotel_1", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_1", 0xA9E1BA),
# (create_button_overlay, "$g_presentation_obj_doghotel_41", "str_doghotel_defaults", 65544),
# (position_set_x, pos1, 620),
# (position_set_y, pos1, 140),
# (overlay_set_position, "$g_presentation_obj_doghotel_41", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_41", 0xA9E1BA),
# (create_button_overlay, "$g_presentation_obj_doghotel_40", "str_doghotel_done", 65544),
# (position_set_x, pos1, 620),
# (position_set_y, pos1, 100),
# (overlay_set_position, "$g_presentation_obj_doghotel_40", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_40", 0xA9E1BA),
# (create_text_overlay, "$g_presentation_obj_doghotel_35", "str_doghotel_enable_brainy_bots", tf_left_align|tf_with_outline),
# (position_set_x, pos1, 56),
# (position_set_y, pos1, 660),
# (overlay_set_position, "$g_presentation_obj_doghotel_35", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_35", 0xA9E1BA),
# (create_text_overlay, "$g_presentation_obj_doghotel_36", "str_doghotel_enable_only_for_heroes", tf_left_align|tf_with_outline),
# (position_set_x, pos1, 56),
# (position_set_y, pos1, 620),
# (overlay_set_position, "$g_presentation_obj_doghotel_36", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_36", 0xA9E1BA),
# (create_text_overlay, "$g_presentation_obj_doghotel_37", "str_doghotel_enable_movement_actions", tf_left_align|tf_with_outline),
# (position_set_x, pos1, 56),
# (position_set_y, pos1, 580),
# (overlay_set_position, "$g_presentation_obj_doghotel_37", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_37", 0xA9E1BA),
# (try_begin),
# (neg|game_in_multiplayer_mode),
# (create_text_overlay, "$g_presentation_obj_doghotel_39", "str_doghotel_combat_ai", tf_left_align|tf_with_outline),
# (position_set_x, pos1, 30),
# (position_set_y, pos1, 540),
# (overlay_set_position, "$g_presentation_obj_doghotel_39", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_39", 0xA9E1BA),
# (try_end),
# (create_text_overlay, "$g_presentation_obj_doghotel_12", "str_doghotel_block_chance_range", tf_left_align|tf_with_outline),
# (position_set_x, pos1, 30),
# (position_set_y, pos1, 500),
# (overlay_set_position, "$g_presentation_obj_doghotel_12", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_12", 0xA9E1BA),
# (create_text_overlay, "$g_presentation_obj_doghotel_13", "str_doghotel_hold_chance_range", tf_left_align|tf_with_outline),
# (position_set_x, pos1, 30),
# (position_set_y, pos1, 460),
# (overlay_set_position, "$g_presentation_obj_doghotel_13", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_13", 0xA9E1BA),
# (create_text_overlay, "$g_presentation_obj_doghotel_14", "str_doghotel_feint_chance_range", tf_left_align|tf_with_outline),
# (position_set_x, pos1, 30),
# (position_set_y, pos1, 420),
# (overlay_set_position, "$g_presentation_obj_doghotel_14", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_14", 0xA9E1BA),
# (create_text_overlay, "$g_presentation_obj_doghotel_15", "str_doghotel_kick_chance_range", tf_left_align|tf_with_outline),
# (position_set_x, pos1, 30),
# (position_set_y, pos1, 380),
# (overlay_set_position, "$g_presentation_obj_doghotel_15", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_15", 0xA9E1BA),
# (create_text_overlay, "$g_presentation_obj_doghotel_16", "str_doghotel_weapon_prof_range", tf_left_align|tf_with_outline),
# (position_set_x, pos1, 30),
# (position_set_y, pos1, 340),
# (overlay_set_position, "$g_presentation_obj_doghotel_16", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_16", 0xA9E1BA),
# (create_text_overlay, "$g_presentation_obj_doghotel_17", "str_doghotel_hold_time_range", tf_left_align|tf_with_outline),
# (position_set_x, pos1, 30),
# (position_set_y, pos1, 300),
# (overlay_set_position, "$g_presentation_obj_doghotel_17", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_17", 0xA9E1BA),
# (create_text_overlay, "$g_presentation_obj_doghotel_7", "str_doghotel_max_consecutive_feints", tf_left_align|tf_with_outline),
# (position_set_x, pos1, 30),
# (position_set_y, pos1, 260),
# (overlay_set_position, "$g_presentation_obj_doghotel_7", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_7", 0xA9E1BA),
# (create_text_overlay, "$g_presentation_obj_doghotel_8", "str_doghotel_renown_bonus", tf_left_align|tf_with_outline),
# (position_set_x, pos1, 30),
# (position_set_y, pos1, 220),
# (overlay_set_position, "$g_presentation_obj_doghotel_8", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_8", 0xA9E1BA),
# (create_text_overlay, "$g_presentation_obj_doghotel_9", "str_doghotel_renown_min", tf_left_align|tf_with_outline),
# (position_set_x, pos1, 30),
# (position_set_y, pos1, 180),
# (overlay_set_position, "$g_presentation_obj_doghotel_9", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_9", 0xA9E1BA),
# (create_text_overlay, "$g_presentation_obj_doghotel_10", "str_doghotel_batch_size", tf_left_align|tf_with_outline),
# (position_set_x, pos1, 30),
# (position_set_y, pos1, 140),
# (overlay_set_position, "$g_presentation_obj_doghotel_10", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_10", 0xA9E1BA),
# (create_text_overlay, "$g_presentation_obj_doghotel_11", "str_doghotel_nearby_enemy_radius", tf_left_align|tf_with_outline),
# (position_set_x, pos1, 30),
# (position_set_y, pos1, 100),
# (overlay_set_position, "$g_presentation_obj_doghotel_11", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_11", 0xA9E1BA),
# (create_check_box_overlay, "$g_presentation_obj_doghotel_32", "mesh_checkbox_off", "mesh_checkbox_on"),
# (position_set_x, pos1, 42),
# (position_set_y, pos1, 667),
# (overlay_set_position, "$g_presentation_obj_doghotel_32", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_32", 0xA9E1BA),
# (try_begin),
# (eq, "$g_doghotel_enable_brainy_bots", -1),
# (overlay_set_val, "$g_presentation_obj_doghotel_32", 0),
# (else_try),
# (overlay_set_val, "$g_presentation_obj_doghotel_32", "$g_doghotel_enable_brainy_bots"),
# (try_end),
# (try_begin),
# (eq, "$g_doghotel_enable_brainy_bots", 1),
# (overlay_set_val, "$g_presentation_obj_doghotel_32", 1),
# (else_try),
# (overlay_set_val, "$g_presentation_obj_doghotel_32", 0),
# (try_end),
# (create_check_box_overlay, "$g_presentation_obj_doghotel_33", "mesh_checkbox_off", "mesh_checkbox_on"),
# (position_set_x, pos1, 42),
# (position_set_y, pos1, 627),
# (overlay_set_position, "$g_presentation_obj_doghotel_33", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_33", 0xA9E1BA),
# (try_begin),
# (eq, "$g_doghotel_enable_only_for_heroes", -1),
# (overlay_set_val, "$g_presentation_obj_doghotel_33", 0),
# (else_try),
# (overlay_set_val, "$g_presentation_obj_doghotel_33", "$g_doghotel_enable_only_for_heroes"),
# (try_end),
# (try_begin),
# (eq, "$g_doghotel_enable_only_for_heroes", 1),
# (overlay_set_val, "$g_presentation_obj_doghotel_33", 1),
# (else_try),
# (overlay_set_val, "$g_presentation_obj_doghotel_33", 0),
# (try_end),
# (create_check_box_overlay, "$g_presentation_obj_doghotel_34", "mesh_checkbox_off", "mesh_checkbox_on"),
# (position_set_x, pos1, 42),
# (position_set_y, pos1, 587),
# (overlay_set_position, "$g_presentation_obj_doghotel_34", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_34", 0xA9E1BA),
# (try_begin),
# (eq, "$g_doghotel_movement_actions_enabled", -1),
# (overlay_set_val, "$g_presentation_obj_doghotel_34", 0),
# (else_try),
# (overlay_set_val, "$g_presentation_obj_doghotel_34", "$g_doghotel_movement_actions_enabled"),
# (try_end),
# (try_begin),
# (eq, "$g_doghotel_movement_actions_enabled", 1),
# (overlay_set_val, "$g_presentation_obj_doghotel_34", 1),
# (else_try),
# (overlay_set_val, "$g_presentation_obj_doghotel_34", 0),
# (try_end),
# (try_begin),
# (neg|game_in_multiplayer_mode),
# (options_get_combat_ai, ":var0"),
# (create_combo_button_overlay, "$g_presentation_obj_doghotel_38", 4),
# (position_set_x, pos1, 540),
# (position_set_y, pos1, 540),
# (overlay_set_position, "$g_presentation_obj_doghotel_38", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_38", 0xA9E1BA),
# (try_begin),
# (eq, ":var0", -1),
# (overlay_set_val, "$g_presentation_obj_doghotel_38", 0),
# (else_try),
# (overlay_set_val, "$g_presentation_obj_doghotel_38", ":var0"),
# (try_end),
# (position_set_x, pos1, 800),
# (position_set_y, pos1, 800),
# (overlay_set_size, "$g_presentation_obj_doghotel_38", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_38", 0x000000),
# (overlay_add_item, "$g_presentation_obj_doghotel_38", "str_doghotel_combat_ai_good"),
# (overlay_add_item, "$g_presentation_obj_doghotel_38", "str_doghotel_combat_ai_average"),
# (overlay_add_item, "$g_presentation_obj_doghotel_38", "str_doghotel_combat_ai_poor"),
# (overlay_set_val, "$g_presentation_obj_doghotel_38", ":var0"),
# (try_end),
# (create_number_box_overlay, "$g_presentation_obj_doghotel_18", 0, 101, 4),
# (position_set_x, pos1, 420),
# (position_set_y, pos1, 500),
# (overlay_set_position, "$g_presentation_obj_doghotel_18", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_18", 0xA9E1BA),
# (try_begin),
# (eq, "$g_doghotel_min_block_chance", -1),
# (overlay_set_val, "$g_presentation_obj_doghotel_18", 0),
# (else_try),
# (overlay_set_val, "$g_presentation_obj_doghotel_18", "$g_doghotel_min_block_chance"),
# (try_end),
# (create_number_box_overlay, "$g_presentation_obj_doghotel_19", 0, 101, 4),
# (position_set_x, pos1, 420),
# (position_set_y, pos1, 460),
# (overlay_set_position, "$g_presentation_obj_doghotel_19", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_19", 0xA9E1BA),
# (try_begin),
# (eq, "$g_doghotel_min_hold_chance", -1),
# (overlay_set_val, "$g_presentation_obj_doghotel_19", 0),
# (else_try),
# (overlay_set_val, "$g_presentation_obj_doghotel_19", "$g_doghotel_min_hold_chance"),
# (try_end),
# (create_number_box_overlay, "$g_presentation_obj_doghotel_20", 0, 101, 4),
# (position_set_x, pos1, 420),
# (position_set_y, pos1, 420),
# (overlay_set_position, "$g_presentation_obj_doghotel_20", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_20", 0xA9E1BA),
# (try_begin),
# (eq, "$g_doghotel_min_feint_chance", -1),
# (overlay_set_val, "$g_presentation_obj_doghotel_20", 0),
# (else_try),
# (overlay_set_val, "$g_presentation_obj_doghotel_20", "$g_doghotel_min_feint_chance"),
# (try_end),
# (create_number_box_overlay, "$g_presentation_obj_doghotel_21", 0, 101, 4),
# (position_set_x, pos1, 420),
# (position_set_y, pos1, 380),
# (overlay_set_position, "$g_presentation_obj_doghotel_21", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_21", 0xA9E1BA),
# (try_begin),
# (eq, "$g_doghotel_min_kick_chance", -1),
# (overlay_set_val, "$g_presentation_obj_doghotel_21", 0),
# (else_try),
# (overlay_set_val, "$g_presentation_obj_doghotel_21", "$g_doghotel_min_kick_chance"),
# (try_end),
# (create_number_box_overlay, "$g_presentation_obj_doghotel_22", 0, 1001, 4),
# (position_set_x, pos1, 420),
# (position_set_y, pos1, 340),
# (overlay_set_position, "$g_presentation_obj_doghotel_22", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_22", 0xA9E1BA),
# (try_begin),
# (eq, "$g_doghotel_min_weapon_prof", -1),
# (overlay_set_val, "$g_presentation_obj_doghotel_22", 0),
# (else_try),
# (overlay_set_val, "$g_presentation_obj_doghotel_22", "$g_doghotel_min_weapon_prof"),
# (try_end),
# (create_number_box_overlay, "$g_presentation_obj_doghotel_23", 0, 10001, 4),
# (position_set_x, pos1, 420),
# (position_set_y, pos1, 300),
# (overlay_set_position, "$g_presentation_obj_doghotel_23", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_23", 0xA9E1BA),
# (try_begin),
# (eq, "$g_doghotel_min_hold_msec", -1),
# (overlay_set_val, "$g_presentation_obj_doghotel_23", 0),
# (else_try),
# (overlay_set_val, "$g_presentation_obj_doghotel_23", "$g_doghotel_min_hold_msec"),
# (try_end),
# (create_number_box_overlay, "$g_presentation_obj_doghotel_2", 0, 11, 4),
# (position_set_x, pos1, 420),
# (position_set_y, pos1, 260),
# (overlay_set_position, "$g_presentation_obj_doghotel_2", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_2", 0xA9E1BA),
# (try_begin),
# (eq, "$g_doghotel_max_consecutive_feints", -1),
# (overlay_set_val, "$g_presentation_obj_doghotel_2", 0),
# (else_try),
# (overlay_set_val, "$g_presentation_obj_doghotel_2", "$g_doghotel_max_consecutive_feints"),
# (try_end),
# (create_number_box_overlay, "$g_presentation_obj_doghotel_3", 0, 101, 4),
# (position_set_x, pos1, 420),
# (position_set_y, pos1, 220),
# (overlay_set_position, "$g_presentation_obj_doghotel_3", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_3", 0xA9E1BA),
# (try_begin),
# (eq, "$g_doghotel_renown_block_bonus", -1),
# (overlay_set_val, "$g_presentation_obj_doghotel_3", 0),
# (else_try),
# (overlay_set_val, "$g_presentation_obj_doghotel_3", "$g_doghotel_renown_block_bonus"),
# (try_end),
# (create_number_box_overlay, "$g_presentation_obj_doghotel_4", 0, 10001, 4),
# (position_set_x, pos1, 420),
# (position_set_y, pos1, 180),
# (overlay_set_position, "$g_presentation_obj_doghotel_4", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_4", 0xA9E1BA),
# (try_begin),
# (eq, "$g_doghotel_renown_min", -1),
# (overlay_set_val, "$g_presentation_obj_doghotel_4", 0),
# (else_try),
# (overlay_set_val, "$g_presentation_obj_doghotel_4", "$g_doghotel_renown_min"),
# (try_end),
# (create_number_box_overlay, "$g_presentation_obj_doghotel_5", 1, 10001, 4),
# (position_set_x, pos1, 420),
# (position_set_y, pos1, 140),
# (overlay_set_position, "$g_presentation_obj_doghotel_5", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_5", 0xA9E1BA),
# (try_begin),
# (eq, "$g_doghotel_batch_size", -1),
# (overlay_set_val, "$g_presentation_obj_doghotel_5", 0),
# (else_try),
# (overlay_set_val, "$g_presentation_obj_doghotel_5", "$g_doghotel_batch_size"),
# (try_end),
# (create_number_box_overlay, "$g_presentation_obj_doghotel_6", 1, 10001, 4),
# (position_set_x, pos1, 420),
# (position_set_y, pos1, 100),
# (overlay_set_position, "$g_presentation_obj_doghotel_6", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_6", 0xA9E1BA),
# (try_begin),
# (eq, "$g_doghotel_nearby_enemy_radius", -1),
# (overlay_set_val, "$g_presentation_obj_doghotel_6", 0),
# (else_try),
# (overlay_set_val, "$g_presentation_obj_doghotel_6", "$g_doghotel_nearby_enemy_radius"),
# (try_end),
# (try_begin),
# (game_in_multiplayer_mode),
# (create_text_overlay, "$g_presentation_obj_doghotel_30", "str_doghotel_anti_autoblock", tf_left_align|tf_with_outline),
# (position_set_x, pos1, 56),
# (position_set_y, pos1, 540),
# (overlay_set_position, "$g_presentation_obj_doghotel_30", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_30", 0xA9E1BA),
# (try_end),
# (try_begin),
# (game_in_multiplayer_mode),
# (create_check_box_overlay, "$g_presentation_obj_doghotel_31", "mesh_checkbox_off", "mesh_checkbox_on"),
# (position_set_x, pos1, 42),
# (position_set_y, pos1, 547),
# (overlay_set_position, "$g_presentation_obj_doghotel_31", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_31", 0xA9E1BA),
# (try_begin),
# (eq, "$g_doghotel_anti_autoblock", -1),
# (overlay_set_val, "$g_presentation_obj_doghotel_31", 0),
# (else_try),
# (overlay_set_val, "$g_presentation_obj_doghotel_31", "$g_doghotel_anti_autoblock"),
# (try_end),
# (try_begin),
# (eq, "$g_doghotel_anti_autoblock", 1),
# (overlay_set_val, "$g_presentation_obj_doghotel_31", 1),
# (else_try),
# (overlay_set_val, "$g_presentation_obj_doghotel_31", 0),
# (try_end),
# (try_end),
# (create_number_box_overlay, "$g_presentation_obj_doghotel_24", 0, 101, 4),
# (position_set_x, pos1, 555),
# (position_set_y, pos1, 500),
# (overlay_set_position, "$g_presentation_obj_doghotel_24", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_24", 0xA9E1BA),
# (try_begin),
# (eq, "$g_doghotel_max_block_chance", -1),
# (overlay_set_val, "$g_presentation_obj_doghotel_24", 0),
# (else_try),
# (overlay_set_val, "$g_presentation_obj_doghotel_24", "$g_doghotel_max_block_chance"),
# (try_end),
# (create_number_box_overlay, "$g_presentation_obj_doghotel_25", 0, 101, 4),
# (position_set_x, pos1, 555),
# (position_set_y, pos1, 460),
# (overlay_set_position, "$g_presentation_obj_doghotel_25", pos1),
# (overlay_set_color, "$g_presentation_obj_doghotel_25", 0xA9E1BA),
# (try_begin),
# (eq, "$g_doghotel_max_hold_chance", -1),
# (overlay_set_val, "$g_presentation_obj_doghotel_25", 0),
# (else_try),
# (overlay_set_val, "$g_presentation_obj_doghotel_25", "$g_doghotel_max_hold_chance"),
# (try_end),
# (create_number_box_overlay, "$g_presentation_obj_doghotel_26", 0, 101, 4),
# (position_set_x, pos1, 555),
# (position_set_y, pos1, 420),
# | |
float. Each coefficient will be set to zero with
probability `1-prob`. Otherwise coefficients will be chosen
randomly from base ring (and may be zero).
- ``*args, **kwds`` - passed on to random_element function of base
ring.
EXAMPLES::
sage: M = FreeModule(ZZ, 3)
sage: M.random_element()
(-1, 2, 1)
sage: M.random_element()
(-95, -1, -2)
sage: M.random_element()
(-12, 0, 0)
Passes extra positional or keyword arguments through::
sage: M.random_element(5,10)
(5, 5, 5)
::
sage: M = FreeModule(ZZ, 16)
sage: M.random_element()
(-6, 5, 0, 0, -2, 0, 1, -4, -6, 1, -1, 1, 1, -1, 1, -1)
sage: M.random_element(prob=0.3)
(0, 0, 0, 0, -3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, -3)
"""
rand = current_randstate().python_random().random
R = self.base_ring()
v = self(0)
prob = float(prob)
for i in range(self.rank()):
if rand() <= prob:
v[i] = R.random_element(*args, **kwds)
return v
###############################################################################
#
# Ambient free modules over an integral domain.
#
###############################################################################
class FreeModule_ambient_domain(FreeModule_ambient):
"""
Ambient free module over an integral domain.
"""
def __init__(self, base_ring, rank, sparse=False):
"""
Create the ambient free module of given rank over the given
integral domain.
EXAMPLES::
sage: FreeModule(PolynomialRing(GF(5),'x'), 3)
Ambient free module of rank 3 over the principal ideal domain
Univariate Polynomial Ring in x over Finite Field of size 5
"""
FreeModule_ambient.__init__(self, base_ring, rank, sparse)
def _repr_(self):
"""
The printing representation of self.
EXAMPLES::
sage: R = PolynomialRing(ZZ,'x')
sage: M = FreeModule(R,7)
sage: print M
Ambient free module of rank 7 over the integral domain Univariate Polynomial Ring in x over Integer Ring
sage: print M._repr_()
Ambient free module of rank 7 over the integral domain Univariate Polynomial Ring in x over Integer Ring
The system representation can be overwritten, but leaves _repr_
unmodified.
::
sage: M.rename('M')
sage: print M
M
sage: print M._repr_()
Ambient free module of rank 7 over the integral domain Univariate Polynomial Ring in x over Integer Ring
Sparse modules print this fact.
::
sage: N = FreeModule(R,7,sparse=True)
sage: print N
Ambient sparse free module of rank 7 over the integral domain Univariate Polynomial Ring in x over Integer Ring
(Now clean up again.)
::
sage: M.reset_name()
sage: print M
Ambient free module of rank 7 over the integral domain Univariate Polynomial Ring in x over Integer Ring
"""
if self.is_sparse():
return "Ambient sparse free module of rank %s over the integral domain %s"%(
self.rank(), self.base_ring())
else:
return "Ambient free module of rank %s over the integral domain %s"%(
self.rank(), self.base_ring())
def base_field(self):
"""
Return the fraction field of the base ring of self.
EXAMPLES::
sage: M = ZZ^3; M.base_field()
Rational Field
sage: M = PolynomialRing(GF(5),'x')^3; M.base_field()
Fraction Field of Univariate Polynomial Ring in x over Finite Field of size 5
"""
return self.base_ring().fraction_field()
def ambient_vector_space(self):
"""
Returns the ambient vector space, which is this free module
tensored with its fraction field.
EXAMPLES::
sage: M = ZZ^3;
sage: V = M.ambient_vector_space(); V
Vector space of dimension 3 over Rational Field
If an inner product on the module is specified, then this
is preserved on the ambient vector space.
::
sage: N = FreeModule(ZZ,4,inner_product_matrix=1)
sage: U = N.ambient_vector_space()
sage: U
Ambient quadratic space of dimension 4 over Rational Field
Inner product matrix:
[1 0 0 0]
[0 1 0 0]
[0 0 1 0]
[0 0 0 1]
sage: P = N.submodule_with_basis([[1,-1,0,0],[0,1,-1,0],[0,0,1,-1]])
sage: P.gram_matrix()
[ 2 -1 0]
[-1 2 -1]
[ 0 -1 2]
sage: U == N.ambient_vector_space()
True
sage: U == V
False
"""
try:
return self.__ambient_vector_space
except AttributeError:
self.__ambient_vector_space = FreeModule(self.base_field(), self.rank(), sparse=self.is_sparse())
return self.__ambient_vector_space
def coordinate_vector(self, v, check=True):
"""
Write `v` in terms of the standard basis for self and
return the resulting coefficients in a vector over the fraction
field of the base ring.
INPUT:
- ``v`` - vector
- ``check`` - bool (default: True); if True, also
verify that v is really in self.
OUTPUT: list
Returns a vector `c` such that if `B` is the basis for self, then
.. math::
\sum c_i B_i = v.
If `v` is not in self, raises an ArithmeticError exception.
EXAMPLES::
sage: V = ZZ^3
sage: v = V.coordinate_vector([1,5,9]); v
(1, 5, 9)
sage: v.parent()
Vector space of dimension 3 over Rational Field
"""
return self.ambient_vector_space()(v)
def vector_space(self, base_field=None):
"""
Returns the vector space obtained from self by tensoring with the
fraction field of the base ring and extending to the field.
EXAMPLES::
sage: M = ZZ^3; M.vector_space()
Vector space of dimension 3 over Rational Field
"""
if base_field is None:
R = self.base_ring()
return self.change_ring(R.fraction_field())
else:
return self.change_ring(base_field)
###############################################################################
#
# Ambient free modules over a principal ideal domain.
#
###############################################################################
class FreeModule_ambient_pid(FreeModule_generic_pid, FreeModule_ambient_domain):
"""
Ambient free module over a principal ideal domain.
"""
def __init__(self, base_ring, rank, sparse=False):
"""
Create the ambient free module of given rank over the given
principal ideal domain.
INPUT:
- ``base_ring`` - a principal ideal domain
- ``rank`` - a non-negative integer
- ``sparse`` - bool (default: False)
EXAMPLES::
sage: ZZ^3
Ambient free module of rank 3 over the principal ideal domain Integer Ring
"""
FreeModule_ambient_domain.__init__(self, base_ring=base_ring, rank=rank, sparse=sparse)
def _repr_(self):
"""
The printing representation of self.
EXAMPLES::
sage: M = FreeModule(ZZ,7)
sage: print M
Ambient free module of rank 7 over the principal ideal domain Integer Ring
sage: print M._repr_()
Ambient free module of rank 7 over the principal ideal domain Integer Ring
The system representation can be overwritten, but leaves _repr_
unmodified.
::
sage: M.rename('M')
sage: print M
M
sage: print M._repr_()
Ambient free module of rank 7 over the principal ideal domain Integer Ring
Sparse modules print this fact.
::
sage: N = FreeModule(ZZ,7,sparse=True)
sage: print N
Ambient sparse free module of rank 7 over the principal ideal domain Integer Ring
(Now clean up again.)
::
sage: M.reset_name()
sage: print M
Ambient free module of rank 7 over the principal ideal domain Integer Ring
"""
if self.is_sparse():
return "Ambient sparse free module of rank %s over the principal ideal domain %s"%(
self.rank(), self.base_ring())
else:
return "Ambient free module of rank %s over the principal ideal domain %s"%(
self.rank(), self.base_ring())
###############################################################################
#
# Ambient free modules over a field (i.e., a vector space).
#
###############################################################################
class FreeModule_ambient_field(FreeModule_generic_field, FreeModule_ambient_pid):
"""
"""
def __init__(self, base_field, dimension, sparse=False):
"""
Create the ambient vector space of given dimension over the given
field.
INPUT:
- ``base_field`` - a field
- ``dimension`` - a non-negative integer
- ``sparse`` - bool (default: False)
EXAMPLES::
sage: QQ^3
Vector space of dimension 3 over Rational Field
"""
FreeModule_ambient_pid.__init__(self, base_field, dimension, sparse=sparse)
def _repr_(self):
"""
The printing representation of self.
EXAMPLES::
sage: V = FreeModule(QQ,7)
sage: print V
Vector space of dimension 7 over Rational Field
sage: print V._repr_()
Vector space of dimension 7 over Rational Field
The system representation can be overwritten, but leaves _repr_
unmodified.
::
sage: V.rename('V')
sage: print V
V
sage: print V._repr_()
Vector space of dimension 7 over Rational Field
Sparse modules print this fact.
::
sage: U = FreeModule(QQ,7,sparse=True)
sage: print U
Sparse vector space of dimension 7 over Rational Field
(Now clean up again.)
::
sage: V.reset_name()
sage: print V
Vector space of dimension 7 over Rational Field
"""
if self.is_sparse():
return "Sparse vector space of dimension %s over %s"%(self.dimension(), self.base_ring())
else:
return "Vector space of dimension %s over %s"%(self.dimension(), self.base_ring())
def ambient_vector_space(self):
"""
Returns self as the ambient vector space.
EXAMPLES::
sage: M = QQ^3
sage: M.ambient_vector_space()
Vector space of dimension 3 over Rational Field
"""
return self
def base_field(self):
"""
Returns the base field of this vector space.
EXAMPLES::
sage: M = QQ^3
sage: M.base_field()
Rational Field
"""
return self.base_ring()
def __call__(self, e, coerce=True, copy=True, check=True):
"""
Create an element of this vector space.
EXAMPLE::
sage: k.<a> = GF(3^4)
sage: VS = k.vector_space()
sage: VS(a)
(0, 1, 0, 0)
"""
try:
k = e.parent()
if finite_field.is_FiniteField(k) and k.base_ring() == self.base_ring() and k.degree() == self.degree():
return self(e._vector_())
except AttributeError:
pass
return FreeModule_generic_field.__call__(self,e)
###############################################################################
#
# R-Submodule of K^n where K is the | |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/always-newbie161/pyprobml/blob/issue_hermes78/notebooks/logreg_ucb_admissions_numpyro.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="bZmPN5Gu1zna"
# # Binomial logistic regression for UCB admissions
#
# We illustrate binary logistic regression on 2 discrete inputs using the example in sec 11.1.4 of [Statistical Rethinking ed 2](https://xcelab.net/rm/statistical-rethinking/).
# The numpyro code is from [Du Phan's site](https://fehiepsi.github.io/rethinking-numpyro/11-god-spiked-the-integers.html)
#
#
# + id="_y0aLBbR1zMh" colab={"base_uri": "https://localhost:8080/"} outputId="8b715628-06e4-4994-aea4-c4b61a03c17e"
# !pip install -q numpyro@git+https://github.com/pyro-ppl/numpyro
# !pip install -q arviz
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="cehGSuboWUCj" outputId="bfbcc046-0ff1-4e78-a8b4-5bf423fd711c"
import arviz as az
az.__version__
# + colab={"base_uri": "https://localhost:8080/"} id="WKy5McPCB8R_" outputId="63dcc8d2-50b8-4073-8947-2b5a21dc8905"
# !pip install causalgraphicalmodels
# + id="q2Nn5H_nDK7P"
# #!pip install -U daft
# + colab={"base_uri": "https://localhost:8080/"} id="HxnMvcA72EPS" outputId="18693280-d54d-485a-d0c6-135293ce1be2"
import numpy as np
np.set_printoptions(precision=3)
import matplotlib.pyplot as plt
import math
import os
import warnings
import pandas as pd
import jax
print("jax version {}".format(jax.__version__))
print("jax backend {}".format(jax.lib.xla_bridge.get_backend().platform))
import jax.numpy as jnp
from jax import random, vmap
from jax.scipy.special import expit
rng_key = random.PRNGKey(0)
rng_key, rng_key_ = random.split(rng_key)
import numpyro
import numpyro.distributions as dist
from numpyro.distributions import constraints
from numpyro.distributions.transforms import AffineTransform
from numpyro.diagnostics import hpdi, print_summary
from numpyro.infer import Predictive
from numpyro.infer import MCMC, NUTS
from numpyro.infer import SVI, Trace_ELBO, init_to_value
from numpyro.infer.autoguide import AutoLaplaceApproximation
import numpyro.optim as optim
import daft
from causalgraphicalmodels import CausalGraphicalModel
from sklearn.preprocessing import StandardScaler
# + colab={"base_uri": "https://localhost:8080/"} id="oKZlhvtKcDVs" outputId="55136941-f974-42e8-97b7-287ec3bd2f78"
n = jax.local_device_count()
print(n)
# + [markdown] id="JK9wTe4b2MBq"
# # Data
# + colab={"base_uri": "https://localhost:8080/", "height": 392} id="DmV4wYiI2F1c" outputId="dfc5ec61-7d1d-4188-bd01-6fc16e2cc011"
url = 'https://raw.githubusercontent.com/fehiepsi/rethinking-numpyro/master/data/UCBadmit.csv'
UCBadmit = pd.read_csv(url, sep=";")
d = UCBadmit
display(d)
# + colab={"base_uri": "https://localhost:8080/"} id="WNXUcstfgMb7" outputId="28361766-63e7-4ecc-81e3-ffab529019be"
print(d.to_latex(index=False))
# + colab={"base_uri": "https://localhost:8080/"} id="4s0itZ74AVD8" outputId="221a6709-2672-46f2-aea3-46fffb76ae7a"
dat_list = dict(
admit=d.admit.values,
applications=d.applications.values,
gid=(d["applicant.gender"] != "male").astype(int).values,
)
dat_list["dept_id"] = jnp.repeat(jnp.arange(6), 2)
print(dat_list)
# + colab={"base_uri": "https://localhost:8080/"} id="awteD7M-Asri" outputId="59c78248-6254-47f8-9007-baf41c2b802e"
# extract number of applicaitons for dept 2 (C)
d.applications[dat_list["dept_id"].copy() == 2]
# + colab={"base_uri": "https://localhost:8080/"} id="tso5iKGVZ1A3" outputId="6ffa821c-ca79-4f23-c78e-4c78a7f70ce1"
d.applications[dat_list["dept_id"].copy() == 2].sum()
# + colab={"base_uri": "https://localhost:8080/", "height": 251} id="rjKvZC7w9F_t" outputId="b94f80b7-1f9e-4fb9-ca93-da19a2982c07"
# application rate per department
pg = jnp.stack(
list(
map(
lambda k: jnp.divide(
d.applications[dat_list["dept_id"].copy() == k].values,
d.applications[dat_list["dept_id"].copy() == k].sum(),
),
range(6),
)
),
axis=0,
).T
pg = pd.DataFrame(pg, index=["male", "female"], columns=d.dept.unique())
display(pg.round(2))
print(pg.to_latex())
# + colab={"base_uri": "https://localhost:8080/", "height": 251} id="OLoWrfLyaZrw" outputId="4cfd5397-f780-4cf2-e679-4d8f3a3b0eb2"
# admisions rate per department
pg = jnp.stack(
list(
map(
lambda k: jnp.divide(
d.admit[dat_list["dept_id"].copy() == k].values,
d.applications[dat_list["dept_id"].copy() == k].values,
),
range(6),
)
),
axis=0,
).T
pg = pd.DataFrame(pg, index=["male", "female"], columns=d.dept.unique())
display(pg.round(2))
print(pg.to_latex())
# + [markdown] id="lIH__8Bz2Vhf"
# # Model 1
# + colab={"base_uri": "https://localhost:8080/"} id="DCchW_SRb2tJ" outputId="7dd337c0-b177-46ac-f5a3-649b888c818d"
dat_list = dict(
admit=d.admit.values,
applications=d.applications.values,
gid=(d["applicant.gender"] != "male").astype(int).values,
)
def model(gid, applications, admit=None):
a = numpyro.sample("a", dist.Normal(0, 1.5).expand([2]))
logit_p = a[gid]
numpyro.sample("admit", dist.Binomial(applications, logits=logit_p), obs=admit)
m11_7 = MCMC(NUTS(model), num_warmup=500, num_samples=500, num_chains=4)
m11_7.run(random.PRNGKey(0), **dat_list)
m11_7.print_summary(0.89)
# + colab={"base_uri": "https://localhost:8080/"} id="YrkAgHHH2zvJ" outputId="4195c61e-908c-4975-b786-f1d7e7fb5467"
post = m11_7.get_samples()
diff_a = post["a"][:, 0] - post["a"][:, 1]
diff_p = expit(post["a"][:, 0]) - expit(post["a"][:, 1])
print_summary({"diff_a": diff_a, "diff_p": diff_p}, 0.89, False)
# + [markdown] id="aIEJ5zrH288g"
# # Posterior predictive check
# + id="jY1URpYA4_TJ"
def ppc(mcmc_run, model_args):
post = mcmc_run.get_samples()
pred = Predictive(mcmc_run.sampler.model, post)(random.PRNGKey(2), **model_args)
admit_pred = pred["admit"]
admit_rate = admit_pred / d.applications.values
plt.errorbar(
range(1, 13),
jnp.mean(admit_rate, 0),
jnp.std(admit_rate, 0) / 2,
fmt="o",
c="k",
mfc="none",
ms=7,
elinewidth=1,
)
plt.plot(range(1, 13), jnp.percentile(admit_rate, 5.5, 0), "k+")
plt.plot(range(1, 13), jnp.percentile(admit_rate, 94.5, 0), "k+")
# draw lines connecting points from same dept
for i in range(1, 7):
x = 1 + 2 * (i - 1) # 1,3,5,7,9,11
y1 = d.admit.iloc[x - 1] / d.applications.iloc[x - 1] # male
y2 = d.admit.iloc[x] / d.applications.iloc[x] # female
plt.plot((x, x + 1), (y1, y2), "bo-")
plt.annotate(
d.dept.iloc[x], (x + 0.5, (y1 + y2) / 2 + 0.05), ha="center", color="royalblue"
)
plt.gca().set(ylim=(0, 1), xticks=range(1, 13), ylabel="admit", xlabel="case")
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="FyrzH_Yi30N2" outputId="d2180a68-4b0b-4a22-e300-9f04427b8e6c"
ppc(m11_7, {'gid': dat_list["gid"], 'applications': dat_list["applications"]})
plt.savefig('admissions_ppc.pdf', dpi=300)
plt.show()
# + [markdown] id="KlJGu63T4Jew"
# # Model 2 (departmental-specific offset)
# + colab={"base_uri": "https://localhost:8080/"} id="EgepyNLf4E9H" outputId="bbd5aef4-990c-42c9-b1a1-0d14de4e1b8e"
dat_list["dept_id"] = jnp.repeat(jnp.arange(6), 2)
def model(gid, dept_id, applications, admit=None):
a = numpyro.sample("a", dist.Normal(0, 1.5).expand([2]))
delta = numpyro.sample("delta", dist.Normal(0, 1.5).expand([6]))
logit_p = a[gid] + delta[dept_id]
numpyro.sample("admit", dist.Binomial(applications, logits=logit_p), obs=admit)
m11_8 = MCMC(NUTS(model), num_warmup=2000, num_samples=2000, num_chains=4)
m11_8.run(random.PRNGKey(0), **dat_list)
m11_8.print_summary(0.89)
# + colab={"base_uri": "https://localhost:8080/"} id="8CYrU2uN4nli" outputId="57cb1fd2-3db5-4cb1-b7cc-b7d1e6069597"
post = m11_8.get_samples()
diff_a = post["a"][:, 0] - post["a"][:, 1]
diff_p = expit(post["a"][:, 0]) - expit(post["a"][:, 1])
print_summary({"diff_a": diff_a, "diff_p": diff_p}, 0.89, False)
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="BFXYpYYh4gZL" outputId="12e334c8-6689-48ad-ef94-3c3fbab577db"
data_dict = {'gid': dat_list["gid"],
'dept_id': dat_list["dept_id"],
'applications': dat_list["applications"]}
ppc(m11_8, data_dict)
#ppc(m11_8, dat_list) # must exclude 'admit' for predictive distribution
plt.savefig('admissions_ppc_per_dept.pdf', dpi=300)
plt.show()
# + [markdown] id="fTyU9j5gWpG5"
# # Poisson regression
#
# We now show we can emulate binomial regresison using 2 poisson regressions,
# following sec 11.3.3 of rethinking. We use a simplified model that just predicts outcomes, and has no features (just an offset term).
# + colab={"base_uri": "https://localhost:8080/"} id="K6zWmx1LXdrj" outputId="0740c417-7657-4329-b55d-a2e15bba106a"
# binomial model of overall admission probability
def model(applications, admit):
a = numpyro.sample("a", dist.Normal(0, 1.5))
logit_p = a
numpyro.sample("admit", dist.Binomial(applications, logits=logit_p), obs=admit)
'''
m_binom = AutoLaplaceApproximation(model)
svi = SVI(
model,
m_binom,
optim.Adam(1),
Trace_ELBO(),
applications=d.applications.values,
admit=d.admit.values,
)
p_binom, losses = svi.run(random.PRNGKey(0), 1000)
'''
m_binom = MCMC(NUTS(model), num_warmup=500, num_samples=500, num_chains=4)
m_binom.run(random.PRNGKey(0), d.applications.values, d.admit.values)
m_binom.print_summary(0.95)
# + colab={"base_uri": "https://localhost:8080/"} id="pKRQopvFYeSq" outputId="1ea101a6-a230-4676-84bd-0fdee708433c"
logit = jnp.mean(m_binom.get_samples()["a"])
print(expit(logit))
# + colab={"base_uri": "https://localhost:8080/"} id="oYMtHurQYArE" outputId="9ff3ba00-d8e0-4c2b-ec87-937edc01c4a5"
def model(rej, admit):
a1, a2 = numpyro.sample("a", dist.Normal(0, 1.5).expand([2]))
lambda1 = jnp.exp(a1)
lambda2 = jnp.exp(a2)
numpyro.sample("rej", dist.Poisson(lambda2), obs=rej)
numpyro.sample("admit", dist.Poisson(lambda1), obs=admit)
m_pois = MCMC(NUTS(model), num_warmup=1000, num_samples=1000, num_chains=3)
m_pois.run(random.PRNGKey(0), d.reject.values, d.admit.values)
m_pois.print_summary(0.95)
# + colab={"base_uri": "https://localhost:8080/"} id="Pwr25iX4ZGZD" outputId="59e577c0-4ede-4ce4-b022-45124c9aadd0"
params = jnp.mean(m_pois.get_samples()["a"], 0)
a1 = params[0]
a2 = params[1]
lam1 = jnp.exp(a1)
lam2 = jnp.exp(a2)
print([lam1, lam2])
print(lam1 / (lam1 + lam2))
# + [markdown] id="k7RykjuvbG5F"
# # Beta-binomial regression
#
# Sec 12.1.1 of rethinking.
# Code from snippet 12.2 of [Du Phan's site](https://fehiepsi.github.io/rethinking-numpyro/12-monsters-and-mixtures.html)
#
# + colab={"base_uri": "https://localhost:8080/"} id="rXLcgSYibLs8" outputId="a96b1c01-1b6f-426e-dfe6-ba18c7f22d06"
d = UCBadmit
d["gid"] = (d["applicant.gender"] != "male").astype(int)
dat = dict(A=d.admit.values, N=d.applications.values, gid=d.gid.values)
def model(gid, N, A=None):
a = numpyro.sample("a", dist.Normal(0, 1.5).expand([2]))
phi = numpyro.sample("phi", dist.Exponential(1))
theta = numpyro.deterministic("theta", phi + 2) # shape
pbar = expit(a[gid]) # mean
numpyro.sample("A", dist.BetaBinomial(pbar * theta, (1 - pbar) * theta, N), obs=A)
m12_1 = MCMC(NUTS(model), num_warmup=500, num_samples=500, num_chains=4)
m12_1.run(random.PRNGKey(0), **dat)
# + colab={"base_uri": "https://localhost:8080/"} id="DroRyUENbvyD" outputId="eedcc585-f2a4-4d01-d42c-d920476b0c51"
post = m12_1.get_samples()
post["theta"] = Predictive(m12_1.sampler.model, post)(random.PRNGKey(1), **dat)["theta"]
post["da"] = post["a"][:, 0] - post["a"][:, 1]
print_summary(post, 0.89, False)
# + id="S9VxiaDZyecO" colab={"base_uri": "https://localhost:8080/"} outputId="3de32392-9a74-46d8-e248-f687986adab4"
post
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="GIwMyYUob2At" outputId="8bdc1adc-c386-4e95-b454-b954c8b149c9"
gid = 1
# draw posterior mean beta distribution
x = jnp.linspace(0, 1, 101)
pbar = jnp.mean(expit(post["a"][:, gid]))
theta = jnp.mean(post["theta"])
plt.plot(x, jnp.exp(dist.Beta(pbar * theta, (1 - pbar) * theta).log_prob(x)))
plt.gca().set(ylabel="Density", xlabel="probability admit", ylim=(0, 3))
# draw 50 beta distributions sampled from posterior
for i in range(50):
p = expit(post["a"][i, gid])
theta = post["theta"][i]
plt.plot(
x, jnp.exp(dist.Beta(p * theta, (1 - p) * theta).log_prob(x)), "k", alpha=0.2
)
plt.title("distribution of female admission rates")
plt.savefig('admissions_betabinom_female_rate.pdf')
plt.show()
# + id="P0m8BionvsXR" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="0202f6b7-cd3a-4813-e6b6-3d780fb95778"
fig, ax = plt.subplots()
labels = ['male', 'female']
colors = ['b', 'r']
for gid in [0,1]:
# draw posterior mean beta distribution
x = jnp.linspace(0, 1, 101)
pbar = jnp.mean(expit(post["a"][:, gid]))
theta = jnp.mean(post["theta"])
y = jnp.exp(dist.Beta(pbar * theta, (1 - pbar) * theta).log_prob(x))
ax.plot(x, y, label=labels[gid], color=colors[gid])
ax.set_ylabel("Density")
ax.set_xlabel("probability admit")
ax.set_ylim(0, 3)
# draw some beta distributions sampled from posterior
for i in range(10):
p = expit(post["a"][i, gid])
theta = post["theta"][i]
y =jnp.exp(dist.Beta(p * theta, (1 - p) * theta).log_prob(x))
plt.plot(x, y, colors[gid], alpha=0.2)
plt.title("distribution of admission rates")
plt.legend()
plt.savefig('admissions_betabinom_rates.pdf')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="LqPtmN_IcLY4" outputId="2519a209-84d0-477d-a13f-49ac03214931"
post = m12_1.get_samples()
admit_pred = Predictive(m12_1.sampler.model, post)(
random.PRNGKey(1), gid=dat["gid"], N=dat["N"]
)["A"]
admit_rate = admit_pred / dat["N"]
plt.scatter(range(1, 13), dat["A"] / dat["N"])
plt.errorbar(
range(1, 13),
jnp.mean(admit_rate, 0),
jnp.std(admit_rate, 0) / 2,
fmt="o",
c="k",
mfc="none",
ms=7,
elinewidth=1,
)
plt.plot(range(1, 13), jnp.percentile(admit_rate, 5.5, 0), "k+")
plt.plot(range(1, 13), jnp.percentile(admit_rate, 94.5, 0), "k+")
plt.savefig('admissions_betabinom_post_pred.pdf')
plt.show()
# + [markdown] id="WASI8v5XHUhi"
# # Mixed effects model with joint prior
#
# This code is from https://numpyro.readthedocs.io/en/latest/examples/ucbadmit.html.
# + id="peOoI8OLHZFs"
from numpyro.examples.datasets import UCBADMIT, load_dataset
def glmm(dept, male, applications, admit=None):
v_mu = numpyro.sample("v_mu", dist.Normal(0, jnp.array([4.0, 1.0])))
sigma = numpyro.sample("sigma", dist.HalfNormal(jnp.ones(2)))
L_Rho = numpyro.sample("L_Rho", dist.LKJCholesky(2, concentration=2))
scale_tril = sigma[..., jnp.newaxis] * L_Rho
# non-centered parameterization
num_dept = len(np.unique(dept))
z = numpyro.sample("z", dist.Normal(jnp.zeros((num_dept, 2)), 1))
v = jnp.dot(scale_tril, z.T).T
logits = v_mu[0] + v[dept, 0] + (v_mu[1] + v[dept, 1]) * male
if admit is None:
# we use a Delta site to record probs for predictive distribution
probs = expit(logits)
numpyro.sample("probs", dist.Delta(probs), obs=probs)
numpyro.sample("admit", dist.Binomial(applications, logits=logits), obs=admit)
def run_inference(dept, male, applications, admit, rng_key):
kernel = NUTS(glmm)
mcmc = MCMC(kernel, num_warmup=500, num_samples=1000, num_chains=1)
mcmc.run(rng_key, dept, male, applications, admit)
return mcmc.get_samples()
def print_results(header, preds, dept, male, probs):
columns = ["Dept", "Male", "ActualProb", "Pred(p25)", "Pred(p50)", "Pred(p75)"]
header_format = "{:>10} {:>10} {:>10} {:>10} {:>10} {:>10}"
row_format = "{:>10.0f} {:>10.0f} {:>10.2f} {:>10.2f} {:>10.2f} {:>10.2f}"
quantiles = jnp.quantile(preds, jnp.array([0.25, 0.5, 0.75]), axis=0)
print("\n", header, "\n")
print(header_format.format(*columns))
for i in range(len(dept)):
print(row_format.format(dept[i], male[i], probs[i], *quantiles[:, i]), "\n")
# + id="tGUqrgCgHlPt" colab={"base_uri": "https://localhost:8080/", "height": 961} outputId="8f9e1a80-05e8-4ef3-9b0c-5bf23cf3ebb6"
_, fetch_train = load_dataset(UCBADMIT, split="train", shuffle=False)
dept, male, applications, admit = fetch_train()
rng_key, rng_key_predict = random.split(random.PRNGKey(1))
zs = run_inference(dept, male, applications, admit, rng_key)
pred_probs = Predictive(glmm, zs)(rng_key_predict, dept, male, applications)[
"probs"
]
header = "=" * 30 + "glmm - TRAIN" + "=" * | |
_ArgumentT,
/,
ctx: tanjun_abc.Context = injecting.inject(type=tanjun_abc.Context),
cache: _GuildCacheT = injecting.inject(type=_GuildCacheT),
) -> hikari.Guild:
guild_id = parse_snowflake(argument, message="No valid guild ID found")
if ctx.cache and (guild := ctx.cache.get_guild(guild_id)):
return guild
if cache:
try:
return await cache.get(guild_id)
except async_cache.EntryNotFound:
raise ValueError("Couldn't find guild") from None
except async_cache.CacheMissError:
pass
try:
return await ctx.rest.fetch_guild(guild_id)
except hikari.NotFoundError:
pass
raise ValueError("Couldn't find guild")
GuildConverter = ToGuild
"""Deprecated alias of `ToGuild`."""
_InviteCacheT = typing.Optional[async_cache.AsyncCache[str, hikari.InviteWithMetadata]]
class ToInvite(BaseConverter[hikari.Invite]):
"""Standard converter for invites."""
__slots__ = ()
@property
def async_caches(self) -> collections.Sequence[typing.Any]:
# <<inherited docstring from BaseConverter>>.
return (_InviteCacheT,)
@property
def cache_components(self) -> hikari.CacheComponents:
# <<inherited docstring from BaseConverter>>.
return hikari.CacheComponents.INVITES
@property
def intents(self) -> hikari.Intents:
# <<inherited docstring from BaseConverter>>.
return hikari.Intents.GUILD_INVITES
@property
def requires_cache(self) -> bool:
# <<inherited docstring from BaseConverter>>.
return False
async def __call__(
self,
argument: _ArgumentT,
/,
ctx: tanjun_abc.Context = injecting.inject(type=tanjun_abc.Context),
cache: _InviteCacheT = injecting.inject(type=_InviteCacheT),
) -> hikari.Invite:
if not isinstance(argument, str):
raise ValueError(f"`{argument}` is not a valid invite code")
if ctx.cache and (invite := ctx.cache.get_invite(argument)):
return invite
if cache:
try:
return await cache.get(argument)
except async_cache.EntryNotFound:
raise ValueError("Couldn't find invite") from None
except async_cache.CacheMissError:
pass
try:
return await ctx.rest.fetch_invite(argument)
except hikari.NotFoundError:
pass
raise ValueError("Couldn't find invite")
InviteConverter = ToInvite
"""Deprecated alias of `ToInvite`."""
class ToInviteWithMetadata(BaseConverter[hikari.InviteWithMetadata]):
"""Standard converter for invites with metadata.
For a standard instance of this see `to_invite_with_metadata`.
.. note::
Unlike `InviteConverter`, this converter is cache dependent.
"""
__slots__ = ()
@property
def async_caches(self) -> collections.Sequence[typing.Any]:
# <<inherited docstring from BaseConverter>>.
return (_InviteCacheT,)
@property
def cache_components(self) -> hikari.CacheComponents:
# <<inherited docstring from BaseConverter>>.
return hikari.CacheComponents.INVITES
@property
def intents(self) -> hikari.Intents:
# <<inherited docstring from BaseConverter>>.
return hikari.Intents.GUILD_INVITES
@property
def requires_cache(self) -> bool:
# <<inherited docstring from BaseConverter>>.
return True
async def __call__(
self,
argument: _ArgumentT,
/,
ctx: tanjun_abc.Context = injecting.inject(type=tanjun_abc.Context),
cache: typing.Optional[_InviteCacheT] = injecting.inject(type=_InviteCacheT),
) -> hikari.InviteWithMetadata:
if not isinstance(argument, str):
raise ValueError(f"`{argument}` is not a valid invite code")
if ctx.cache and (invite := ctx.cache.get_invite(argument)):
return invite
if cache and (invite := await cache.get(argument)):
return invite
raise ValueError("Couldn't find invite")
InviteWithMetadataConverter = ToInviteWithMetadata
"""Deprecated alias of `ToInviteWithMetadata`."""
_MemberCacheT = typing.Optional[async_cache.SfGuildBound[hikari.Member]]
class ToMember(BaseConverter[hikari.Member]):
"""Standard converter for guild members.
For a standard instance of this see `to_member`.
This converter allows both mentions, raw IDs and partial usernames/nicknames
and only works within a guild context.
"""
__slots__ = ()
@property
def async_caches(self) -> collections.Sequence[typing.Any]:
# <<inherited docstring from BaseConverter>>.
return (_MemberCacheT,)
@property
def cache_components(self) -> hikari.CacheComponents:
# <<inherited docstring from BaseConverter>>.
return hikari.CacheComponents.MEMBERS
@property
def intents(self) -> hikari.Intents:
# <<inherited docstring from BaseConverter>>.
return hikari.Intents.GUILD_MEMBERS | hikari.Intents.GUILDS
@property
def requires_cache(self) -> bool:
# <<inherited docstring from BaseConverter>>.
return False
async def __call__(
self,
argument: _ArgumentT,
/,
ctx: tanjun_abc.Context = injecting.inject(type=tanjun_abc.Context),
cache: _MemberCacheT = injecting.inject(type=_MemberCacheT),
) -> hikari.Member:
if ctx.guild_id is None:
raise ValueError("Cannot get a member from a DM channel")
try:
user_id = parse_user_id(argument, message="No valid user mention or ID found")
except ValueError:
if isinstance(argument, str):
try:
return (await ctx.rest.search_members(ctx.guild_id, argument))[0]
except (hikari.NotFoundError, IndexError):
pass
else:
if ctx.cache and (member := ctx.cache.get_member(ctx.guild_id, user_id)):
return member
if cache:
try:
return await cache.get_from_guild(ctx.guild_id, user_id)
except async_cache.EntryNotFound:
raise ValueError("Couldn't find member in this guild") from None
except async_cache.CacheMissError:
pass
try:
return await ctx.rest.fetch_member(ctx.guild_id, user_id)
except hikari.NotFoundError:
pass
raise ValueError("Couldn't find member in this guild")
MemberConverter = ToMember
"""Deprecated alias of `ToMember`."""
_PresenceCacheT = typing.Optional[async_cache.SfGuildBound[hikari.MemberPresence]]
class ToPresence(BaseConverter[hikari.MemberPresence]):
"""Standard converter for presences.
For a standard instance of this see `to_presence`.
This converter is cache dependent and only works in a guild context.
"""
__slots__ = ()
@property
def async_caches(self) -> collections.Sequence[typing.Any]:
# <<inherited docstring from BaseConverter>>.
return (_PresenceCacheT,)
@property
def cache_components(self) -> hikari.CacheComponents:
# <<inherited docstring from BaseConverter>>.
return hikari.CacheComponents.PRESENCES
@property
def intents(self) -> hikari.Intents:
# <<inherited docstring from BaseConverter>>.
return hikari.Intents.GUILD_PRESENCES | hikari.Intents.GUILDS
@property
def requires_cache(self) -> bool:
# <<inherited docstring from BaseConverter>>.
return True
async def __call__(
self,
argument: _ArgumentT,
/,
ctx: tanjun_abc.Context = injecting.inject(type=tanjun_abc.Context),
cache: _PresenceCacheT = injecting.inject(type=_PresenceCacheT),
) -> hikari.MemberPresence:
if ctx.guild_id is None:
raise ValueError("Cannot get a presence from a DM channel")
user_id = parse_user_id(argument, message="No valid member mention or ID found")
if ctx.cache and (presence := ctx.cache.get_presence(ctx.guild_id, user_id)):
return presence
if cache and (presence := await cache.get_from_guild(ctx.guild_id, user_id, default=None)):
return presence
raise ValueError("Couldn't find presence in current guild")
PresenceConverter = ToPresence
"""Deprecated alias of `ToPresence`."""
_RoleCacheT = typing.Optional[async_cache.SfCache[hikari.Role]]
class ToRole(BaseConverter[hikari.Role]):
"""Standard converter for guild roles.
For a standard instance of this see `to_role`.
"""
__slots__ = ()
@property
def async_caches(self) -> collections.Sequence[typing.Any]:
# <<inherited docstring from BaseConverter>>.
return (_RoleCacheT,)
@property
def cache_components(self) -> hikari.CacheComponents:
# <<inherited docstring from BaseConverter>>.
return hikari.CacheComponents.ROLES
@property
def intents(self) -> hikari.Intents:
# <<inherited docstring from BaseConverter>>.
return hikari.Intents.GUILDS
@property
def requires_cache(self) -> bool:
# <<inherited docstring from BaseConverter>>.
return False
async def __call__(
self,
argument: _ArgumentT,
/,
ctx: tanjun_abc.Context = injecting.inject(type=tanjun_abc.Context),
cache: _RoleCacheT = injecting.inject(type=_RoleCacheT),
) -> hikari.Role:
role_id = parse_role_id(argument, message="No valid role mention or ID found")
if ctx.cache and (role := ctx.cache.get_role(role_id)):
return role
if cache:
try:
return await cache.get(role_id)
except async_cache.EntryNotFound:
raise ValueError("Couldn't find role") from None
except async_cache.CacheMissError:
pass
if ctx.guild_id:
for role in await ctx.rest.fetch_roles(ctx.guild_id):
if role.id == role_id:
return role
raise ValueError("Couldn't find role")
RoleConverter = ToRole
"""Deprecated alias of `ToRole`."""
_UserCacheT = typing.Optional[async_cache.SfCache[hikari.User]]
class ToUser(BaseConverter[hikari.User]):
"""Standard converter for users.
For a standard instance of this see `to_user`.
"""
__slots__ = ()
@property
def async_caches(self) -> collections.Sequence[typing.Any]:
# <<inherited docstring from BaseConverter>>.
return (_UserCacheT,)
@property
def cache_components(self) -> hikari.CacheComponents:
# <<inherited docstring from BaseConverter>>.
return hikari.CacheComponents.NONE
@property
def intents(self) -> hikari.Intents:
# <<inherited docstring from BaseConverter>>.
return hikari.Intents.GUILDS | hikari.Intents.GUILD_MEMBERS
@property
def requires_cache(self) -> bool:
# <<inherited docstring from BaseConverter>>.
return False
async def __call__(
self,
argument: _ArgumentT,
/,
ctx: tanjun_abc.Context = injecting.inject(type=tanjun_abc.Context),
cache: _UserCacheT = injecting.inject(type=_UserCacheT),
) -> hikari.User:
# TODO: search by name if this is a guild context
user_id = parse_user_id(argument, message="No valid user mention or ID found")
if ctx.cache and (user := ctx.cache.get_user(user_id)):
return user
if cache:
try:
return await cache.get(user_id)
except async_cache.EntryNotFound:
raise ValueError("Couldn't find user") from None
except async_cache.CacheMissError:
pass
try:
return await ctx.rest.fetch_user(user_id)
except hikari.NotFoundError:
pass
raise ValueError("Couldn't find user")
UserConverter = ToUser
"""Deprecated alias of `ToUser`."""
_VoiceStateCacheT = typing.Optional[async_cache.SfGuildBound[hikari.VoiceState]]
class ToVoiceState(BaseConverter[hikari.VoiceState]):
"""Standard converter for voice states.
For a standard instance of this see `to_voice_state`.
.. note::
This converter is cache dependent and only works in a guild context.
"""
__slots__ = ()
@property
def async_caches(self) -> collections.Sequence[typing.Any]:
# <<inherited docstring from BaseConverter>>.
return (_VoiceStateCacheT,)
@property
def cache_components(self) -> hikari.CacheComponents:
# <<inherited docstring from BaseConverter>>.
return hikari.CacheComponents.VOICE_STATES
@property
def intents(self) -> hikari.Intents:
# <<inherited docstring from BaseConverter>>.
return hikari.Intents.GUILD_VOICE_STATES | hikari.Intents.GUILDS
@property
def requires_cache(self) -> bool:
# <<inherited docstring from BaseConverter>>.
return True
async def __call__(
self,
argument: _ArgumentT,
/,
ctx: tanjun_abc.Context = injecting.inject(type=tanjun_abc.Context),
cache: _VoiceStateCacheT = injecting.inject(type=_VoiceStateCacheT),
) -> hikari.VoiceState:
if ctx.guild_id is None:
raise ValueError("Cannot get a voice state from a DM channel")
user_id = parse_user_id(argument, message="No valid user mention or ID found")
if ctx.cache and (state := ctx.cache.get_voice_state(ctx.guild_id, user_id)):
return state
if cache and (state := await cache.get_from_guild(ctx.guild_id, user_id, default=None)):
return state
raise ValueError("Voice state couldn't be found for current guild")
VoiceStateConverter = ToVoiceState
"""Deprecated alias of `ToVoiceState`."""
class _IDMatcherSig(typing.Protocol):
def __call__(self, value: _ArgumentT, /, *, message: str = "No valid mention or ID found") -> hikari.Snowflake:
raise NotImplementedError
def _make_snowflake_parser(regex: re.Pattern[str], /) -> _IDMatcherSig:
def parse(value: _ArgumentT, /, *, message: str = "No valid mention or ID found") -> hikari.Snowflake:
"""Parse a snowflake from a string or int value.
.. note::
This only allows the relevant entity's mention format if applicable.
Parameters
----------
value: str | int
The value to parse (this argument can only be passed positionally).
Other Parameters
----------------
message: str
The error message to raise if the value cannot be parsed.
Returns
-------
hikari.Snowflake
The parsed snowflake.
Raises
------
ValueError
If the value cannot be parsed.
"""
result: typing.Optional[hikari.Snowflake] = None
if isinstance(value, str):
if value.isdigit():
result = hikari.Snowflake(value)
else:
capture = next(regex.finditer(value), None)
result = hikari.Snowflake(capture.groups()[0]) if capture else None
else:
try:
# Technically passing a float here is invalid (typing wise)
# but we handle that by catching TypeError
result | |
<reponame>ashishdhngr/baserow<filename>backend/src/baserow/contrib/database/views/handler.py<gh_stars>0
from collections import defaultdict
from copy import deepcopy
from typing import Dict, Any, List, Optional, Iterable, Tuple
from django.core.exceptions import FieldDoesNotExist, ValidationError
from django.db import models as django_models
from django.db.models import F, Count
from baserow.contrib.database.fields.exceptions import FieldNotInTable
from baserow.contrib.database.fields.field_filters import FilterBuilder
from baserow.contrib.database.fields.field_sortings import AnnotatedOrder
from baserow.contrib.database.fields.models import Field
from baserow.contrib.database.fields.registries import field_type_registry
from baserow.contrib.database.rows.handler import RowHandler
from baserow.contrib.database.rows.signals import row_created
from baserow.core.trash.handler import TrashHandler
from baserow.core.utils import (
extract_allowed,
set_allowed_attrs,
get_model_reference_field_name,
)
from .exceptions import (
ViewDoesNotExist,
ViewNotInTable,
UnrelatedFieldError,
ViewFilterDoesNotExist,
ViewFilterNotSupported,
ViewFilterTypeNotAllowedForField,
ViewSortDoesNotExist,
ViewSortNotSupported,
ViewSortFieldAlreadyExist,
ViewSortFieldNotSupported,
ViewDoesNotSupportFieldOptions,
FieldAggregationNotSupported,
CannotShareViewTypeError,
)
from .models import View, ViewFilter, ViewSort
from .registries import (
view_type_registry,
view_filter_type_registry,
view_aggregation_type_registry,
)
from .signals import (
view_created,
view_updated,
view_deleted,
views_reordered,
view_filter_created,
view_filter_updated,
view_filter_deleted,
view_sort_created,
view_sort_updated,
view_sort_deleted,
view_field_options_updated,
)
from .validators import EMPTY_VALUES
from ..table.models import Table, GeneratedTableModel
class ViewHandler:
def get_view(self, view_id, view_model=None, base_queryset=None):
"""
Selects a view and checks if the user has access to that view. If everything
is fine the view is returned.
:param view_id: The identifier of the view that must be returned.
:type view_id: int
:param view_model: If provided that models objects are used to select the
view. This can for example be useful when you want to select a GridView or
other child of the View model.
:type view_model: Type[View]
:param base_queryset: The base queryset from where to select the view
object. This can for example be used to do a `select_related`. Note that
if this is used the `view_model` parameter doesn't work anymore.
:type base_queryset: Queryset
:raises ViewDoesNotExist: When the view with the provided id does not exist.
:type view_model: View
:return:
"""
if not view_model:
view_model = View
if base_queryset is None:
base_queryset = view_model.objects
try:
view = base_queryset.select_related("table__database__group").get(
pk=view_id
)
except View.DoesNotExist:
raise ViewDoesNotExist(f"The view with id {view_id} does not exist.")
if TrashHandler.item_has_a_trashed_parent(view.table, check_item_also=True):
raise ViewDoesNotExist(f"The view with id {view_id} does not exist.")
return view
def create_view(self, user, table, type_name, **kwargs):
"""
Creates a new view based on the provided type.
:param user: The user on whose behalf the view is created.
:type user: User
:param table: The table that the view instance belongs to.
:type table: Table
:param type_name: The type name of the view.
:type type_name: str
:param kwargs: The fields that need to be set upon creation.
:type kwargs: object
:return: The created view instance.
:rtype: View
"""
group = table.database.group
group.has_user(user, raise_error=True)
# Figure out which model to use for the given view type.
view_type = view_type_registry.get(type_name)
model_class = view_type.model_class
view_values = view_type.prepare_values(kwargs, table, user)
allowed_fields = [
"name",
"filter_type",
"filters_disabled",
] + view_type.allowed_fields
view_values = extract_allowed(view_values, allowed_fields)
last_order = model_class.get_last_order(table)
instance = model_class.objects.create(
table=table, order=last_order, **view_values
)
view_type.view_created(view=instance)
view_created.send(self, view=instance, user=user, type_name=type_name)
return instance
def update_view(self, user, view, **kwargs):
"""
Updates an existing view instance.
:param user: The user on whose behalf the view is updated.
:type user: User
:param view: The view instance that needs to be updated.
:type view: View
:param kwargs: The fields that need to be updated.
:type kwargs: object
:raises ValueError: When the provided view not an instance of View.
:return: The updated view instance.
:rtype: View
"""
if not isinstance(view, View):
raise ValueError("The view is not an instance of View.")
group = view.table.database.group
group.has_user(user, raise_error=True)
view_type = view_type_registry.get_by_model(view)
view_values = view_type.prepare_values(kwargs, view.table, user)
allowed_fields = [
"name",
"filter_type",
"filters_disabled",
] + view_type.allowed_fields
view = set_allowed_attrs(view_values, allowed_fields, view)
view.save()
view_updated.send(self, view=view, user=user)
return view
def order_views(self, user, table, order):
"""
Updates the order of the views in the given table. The order of the views
that are not in the `order` parameter set set to `0`.
:param user: The user on whose behalf the views are ordered.
:type user: User
:param table: The table of which the views must be updated.
:type table: Table
:param order: A list containing the view ids in the desired order.
:type order: list
:raises ViewNotInTable: If one of the view ids in the order does not belong
to the table.
"""
group = table.database.group
group.has_user(user, raise_error=True)
queryset = View.objects.filter(table_id=table.id)
view_ids = queryset.values_list("id", flat=True)
for view_id in order:
if view_id not in view_ids:
raise ViewNotInTable(view_id)
View.order_objects(queryset, order)
views_reordered.send(self, table=table, order=order, user=user)
def delete_view(self, user, view):
"""
Deletes an existing view instance.
:param user: The user on whose behalf the view is deleted.
:type user: User
:param view: The view instance that needs to be deleted.
:type view: View
:raises ViewDoesNotExist: When the view with the provided id does not exist.
"""
if not isinstance(view, View):
raise ValueError("The view is not an instance of View")
group = view.table.database.group
group.has_user(user, raise_error=True)
view_id = view.id
view.delete()
view_deleted.send(self, view_id=view_id, view=view, user=user)
def update_field_options(self, view, field_options, user=None, fields=None):
"""
Updates the field options with the provided values if the field id exists in
the table related to the view.
:param view: The view for which the field options need to be updated.
:type view: View
:param field_options: A dict with the field ids as the key and a dict
containing the values that need to be updated as value.
:type field_options: dict
:param user: Optionally the user on whose behalf the request is made. If you
give a user, the permissions are checked against this user otherwise there is
no permission checking.
:type user: User
:param fields: Optionally a list of fields can be provided so that they don't
have to be fetched again.
:type fields: None or list
:raises UnrelatedFieldError: When the provided field id is not related to the
provided view.
"""
if user is not None:
# Here we check the permissions only if we have a user. If the field options
# update is triggered by user a action, we have one from the view but in
# some situation, we have automatic processing and we don't have any user.
view.table.database.group.has_user(user, raise_error=True)
if not fields:
fields = Field.objects.filter(table=view.table)
try:
model = view._meta.get_field("field_options").remote_field.through
except FieldDoesNotExist:
raise ViewDoesNotSupportFieldOptions(
"This view does not support field options."
)
field_name = get_model_reference_field_name(model, View)
if not field_name:
raise ValueError(
"The model doesn't have a relationship with the View model or any "
"descendants."
)
view_type = view_type_registry.get_by_model(view.specific_class)
field_options = view_type.before_field_options_update(
view, field_options, fields
)
allowed_field_ids = [field.id for field in fields]
for field_id, options in field_options.items():
if int(field_id) not in allowed_field_ids:
raise UnrelatedFieldError(
f"The field id {field_id} is not related to the view."
)
model.objects.update_or_create(
field_id=field_id, defaults=options, **{field_name: view}
)
view_field_options_updated.send(self, view=view, user=user)
def field_type_changed(self, field: Field):
"""
This method is called by the FieldHandler when the field type of a field has
changed. It could be that the field has filters or sortings that are not
compatible anymore. If that is the case then those need to be removed.
All view_type `after_field_type_change` of views that are linked to this field
are also called to react on this change.
:param field: The new field object.
:type field: Field
"""
field_type = field_type_registry.get_by_model(field.specific_class)
# If the new field type does not support sorting then all sortings will be
# removed.
if not field_type.check_can_order_by(field):
field.viewsort_set.all().delete()
# Check which filters are not compatible anymore and remove those.
for filter in field.viewfilter_set.all():
filter_type = view_filter_type_registry.get(filter.type)
if not filter_type.field_is_compatible(field):
filter.delete()
# Call view types hook
for view_type in view_type_registry.get_all():
view_type.after_field_type_change(field)
def _get_filter_builder(
self, view: View, model: GeneratedTableModel
) -> FilterBuilder:
"""
Constructs a FilterBuilder object based on the provided view's filter.
:param view: The view where to fetch the fields from.
:param model: The generated model containing all fields.
:return: FilterBuilder object with the view's filter applied.
"""
# The table model has to be dynamically generated
if not hasattr(model, "_field_objects"):
raise ValueError("A queryset of the table model is required.")
filter_builder = FilterBuilder(filter_type=view.filter_type)
for view_filter in view.viewfilter_set.all():
if view_filter.field_id not in model._field_objects:
raise ValueError(
f"The table model does not contain field "
f"{view_filter.field_id}."
)
field_object = model._field_objects[view_filter.field_id]
field_name = field_object["name"]
model_field = model._meta.get_field(field_name)
view_filter_type = view_filter_type_registry.get(view_filter.type)
filter_builder.filter(
view_filter_type.get_filter(
field_name, view_filter.value, model_field, field_object["field"]
)
)
return filter_builder
def apply_filters(self, view, queryset):
"""
Applies the view's filter to the given queryset.
:param | |
processes as possible. If there is a
process where there is only 1 element, this function will adjust the ``lshape_map`` then
redistribute ``arr`` so that there is not a single diagonal element on one process
"""
def adjust_lshape(lshape_mapi, pri, cnti):
if lshape_mapi[..., 0][pri] < cnti:
h = cnti - lshape_mapi[..., 0][pri]
lshape_mapi[..., 0][pri] += h
lshape_mapi[..., 0][pri + 1] -= h
for cnt in col_inds[:-1]: # only need to loop until the second to last one
for pr in range(arr.comm.size - 1):
adjust_lshape(lshape_map, pr, cnt)
negs = torch.where(lshape_map[..., 0] < 0)[0]
if negs.numel() > 0:
for n in negs:
lshape_map[n - 1, 0] += lshape_map[n, 0]
lshape_map[n, 0] = 0
arr.redistribute_(target_map=lshape_map)
last_diag_pr, col_per_proc_list, col_inds, tile_columns = SquareDiagTiles.__create_cols(
arr, lshape_map, tiles_per_proc
)
return last_diag_pr, col_per_proc_list, col_inds, tile_columns
@staticmethod
def __create_cols(
arr: DNDarray, lshape_map: torch.Tensor, tiles_per_proc: int
) -> Tuple[torch.Tensor, List[int, ...], List[int, ...], torch.Tensor]:
"""
Calculates the last diagonal process, then creates a list of the number of tile columns per
process, then calculates the starting indices of the columns. Also returns the number of tile
columns.
Parameters
----------
arr : DNDarray
DNDarray for which to find the tile columns for
lshape_map : torch.Tensor
The map of the local shapes (for more info see: :func:`~heat.core.dndarray.DNDarray.create_lshape_map`)
tiles_per_proc : int
The number of divisions per process
"""
last_tile_cols = tiles_per_proc
last_dia_pr = torch.where(lshape_map[..., arr.split].cumsum(dim=0) >= min(arr.gshape))[0][0]
# adjust for small blocks on the last diag pr:
last_pr_minus1 = last_dia_pr - 1 if last_dia_pr > 0 else 0
rem_cols_last_pr = abs(
min(arr.gshape) - lshape_map[..., arr.split].cumsum(dim=0)[last_pr_minus1]
)
# this is the number of rows/columns after the last diagonal on the last diagonal pr
try:
num_after_diag = torch.div(rem_cols_last_pr, last_tile_cols, rounding_mode="floor")
except TypeError:
num_after_diag = torch.floor_divide(rem_cols_last_pr, last_tile_cols)
while 1 < num_after_diag < 2:
# todo: determine best value for this (prev at 2)
# if there cannot be tiles formed which are at list ten items larger than 2
# then need to reduce the number of tiles
last_tile_cols -= 1
if last_tile_cols == 1:
break
# create lists of columns and rows for each process
col_per_proc_list = [tiles_per_proc] * (last_dia_pr.item() + 1)
col_per_proc_list[-1] = last_tile_cols
if last_dia_pr < arr.comm.size - 1 and arr.split == 1:
# this is the case that the gshape[1] >> gshape[0]
col_per_proc_list.extend([1] * (arr.comm.size - last_dia_pr - 1).item())
# need to determine the proper number of tile rows/columns
tile_columns = tiles_per_proc * last_dia_pr + last_tile_cols
diag_crossings = lshape_map[..., arr.split].cumsum(dim=0)[: last_dia_pr + 1]
diag_crossings[-1] = (
diag_crossings[-1] if diag_crossings[-1] <= min(arr.gshape) else min(arr.gshape)
)
dev = arr.larray.device
diag_crossings = torch.cat((torch.tensor([0], device=dev), diag_crossings), dim=0).tolist()
# create the tile columns sizes, saved to list
col_inds = []
for col in range(tile_columns.item()):
try:
off = torch.div(col, tiles_per_proc, rounding_mode="floor").to(dev)
except TypeError:
off = torch.floor_divide(col, tiles_per_proc).to(dev)
_, lshape, _ = arr.comm.chunk(
[diag_crossings[off + 1] - diag_crossings[off]],
0,
rank=int(col % tiles_per_proc),
w_size=tiles_per_proc if off != last_dia_pr else last_tile_cols,
)
col_inds.append(lshape[0])
return last_dia_pr, col_per_proc_list, col_inds, tile_columns
@staticmethod
def __def_end_row_inds_sp0_m_ge_n(
arr: DNDarray,
row_inds: List[int, ...],
last_diag_pr: int,
tiles_per_proc: int,
lshape_map: torch.Tensor,
) -> None:
"""
Adjust the rows on the processes which are greater than the last diagonal processs to have
rows which are chunked evenly into ``tiles_per_proc`` rows.
"""
nz = torch.nonzero(
input=torch.tensor(row_inds, device=arr.larray.device) == 0, as_tuple=False
)
lp_map = lshape_map.tolist()
for i in range(last_diag_pr.item() + 1, arr.comm.size):
# loop over all of the rest of the processes
for t in range(tiles_per_proc):
_, lshape, _ = arr.comm.chunk(lp_map[i], 0, rank=t, w_size=tiles_per_proc)
# row_inds[nz[0].item()] = lshape[0]
if row_inds[-1] == 0:
row_inds[-1] = lshape[0]
else:
row_inds.append(lshape[0])
nz = nz[1:]
@staticmethod
def __last_tile_row_adjust_sp1(arr: DNDarray, row_inds: List[int, ...]) -> None:
"""
Add extra row/s if there is space below the diagonal (``split=1``)
"""
if arr.gshape[0] - arr.gshape[1] > 10: # todo: determine best value for this
# use chunk and a loop over the however many tiles are desired
num_ex_row_tiles = 1 # todo: determine best value for this
while (arr.gshape[0] - arr.gshape[1]) // num_ex_row_tiles < 2:
num_ex_row_tiles -= 1
for i in range(num_ex_row_tiles):
_, lshape, _ = arr.comm.chunk(
(arr.gshape[0] - arr.gshape[1],), 0, rank=i, w_size=num_ex_row_tiles
)
row_inds.append(lshape[0])
else:
# if there is no place for multiple tiles, combine the remainder with the last row
row_inds[-1] = arr.gshape[0] - sum(row_inds[:-1])
@property
def arr(self) -> DNDarray:
"""
Returns the ``DNDarray`` for which the tiles are defined on
"""
return self.__DNDarray
@property
def col_indices(self) -> List[int, ...]:
"""
Returns a list containing the indices of the tile columns
"""
return self.__col_inds
@property
def lshape_map(self) -> torch.Tensor:
"""
Returns the map of the lshape tuples for the ``DNDarray`` given.
Units are ``(rank, lshape)`` (tuple of the local shape)
"""
return self.__lshape_map
@property
def last_diagonal_process(self) -> int:
"""
Returns the rank of the last process with diagonal elements
"""
return self.__last_diag_pr
@property
def row_indices(self) -> List[int, ...]:
"""
Returns a list containing the indices of the tile rows
"""
return self.__row_inds
@property
def tile_columns(self) -> int:
"""
Returns the number of tile columns
"""
return len(self.__col_inds)
@property
def tile_columns_per_process(self) -> List[int, ...]:
"""
Returns a list containing the number of columns on all processes
"""
return self.__col_per_proc_list
@property
def tile_map(self) -> torch.Tensor:
"""
Returns tile_map which contains the sizes of the tiles
units are ``(row, column, start index in each direction, process)``
Examples
--------
>>> a = ht.zeros((12, 10), split=0)
>>> a_tiles = tiling.SquareDiagTiles(a, tiles_per_proc=2)
>>> print(a_tiles.tile_map)
[(0 & 1)/1] tensor([[[0, 0, 0],
[(0 & 1)/1] [0, 3, 0],
[(0 & 1)/1] [0, 6, 0],
[(0 & 1)/1] [0, 8, 0]],
[(0 & 1)/1]
[(0 & 1)/1] [[3, 0, 0],
[(0 & 1)/1] [3, 3, 0],
[(0 & 1)/1] [3, 6, 0],
[(0 & 1)/1] [3, 8, 0]],
[(0 & 1)/1]
[(0 & 1)/1] [[6, 0, 1],
[(0 & 1)/1] [6, 3, 1],
[(0 & 1)/1] [6, 6, 1],
[(0 & 1)/1] [6, 8, 1]],
[(0 & 1)/1]
[(0 & 1)/1] [[8, 0, 1],
[(0 & 1)/1] [8, 3, 1],
[(0 & 1)/1] [8, 6, 1],
[(0 & 1)/1] [8, 8, 1]]], dtype=torch.int32)
>>> print(a_tiles.tile_map.shape)
[0/1] torch.Size([4, 4, 3])
[1/1] torch.Size([4, 4, 3])
"""
return self.__tile_map
@property
def tile_rows(self) -> int:
"""
Returns the number of tile rows
"""
return len(self.__row_inds)
@property
def tile_rows_per_process(self) -> List[int, ...]:
"""
Returns a list containing the number of rows on all processes
"""
return self.__row_per_proc_list
def get_start_stop(
self, key: Union[int, slice, Tuple[int, slice, ...]]
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Returns the start and stop indices in form of ``(dim0 start, dim0 stop, dim1 start, dim1 stop)``
which correspond to the tile/s which corresponds to the given key. The key MUST use global indices.
Parameters
----------
key : int or Tuple or List or slice
Indices to select the tile
STRIDES ARE NOT ALLOWED, MUST BE GLOBAL INDICES
Examples
--------
>>> a = ht.zeros((12, 10), split=0)
>>> a_tiles = ht.tiling.SquareDiagTiles(a, tiles_per_proc=2) # type: tiling.SquareDiagTiles
>>> print(a_tiles.get_start_stop(key=(slice(0, 2), 2)))
[0/1] (tensor(0), tensor(6), tensor(6), tensor(8))
[1/1] (tensor(0), tensor(6), tensor(6), tensor(8))
>>> print(a_tiles.get_start_stop(key=(0, 2)))
[0/1] (tensor(0), tensor(3), tensor(6), tensor(8))
[1/1] (tensor(0), tensor(3), tensor(6), tensor(8))
>>> print(a_tiles.get_start_stop(key=2))
[0/1] (tensor(0), tensor(2), tensor(0), tensor(10))
[1/1] (tensor(0), tensor(2), tensor(0), tensor(10))
>>> print(a_tiles.get_start_stop(key=(3, 3)))
[0/1] (tensor(2), tensor(6), tensor(8), tensor(10))
[1/1] (tensor(2), tensor(6), tensor(8), tensor(10))
"""
split = self.__DNDarray.split
pr = self.tile_map[key][..., 2].unique()
if pr.numel() > 1:
raise ValueError("Tile/s must be located on one process. currently on: {}".format(pr))
row_inds = self.row_indices + [self.__DNDarray.gshape[0]]
col_inds = self.col_indices + [self.__DNDarray.gshape[1]]
row_start = row_inds[sum(self.tile_rows_per_process[:pr]) if split == 0 else 0]
col_start = col_inds[sum(self.tile_columns_per_process[:pr]) if split == 1 else 0]
if isinstance(key, int):
key = [key]
else:
key = list(key)
if len(key) == 1:
key.append(slice(0, None))
key = list(key)
if isinstance(key[0], int):
st0 = row_inds[key[0]] - row_start
sp0 = row_inds[key[0] + 1] - row_start
elif isinstance(key[0], slice):
start = row_inds[key[0].start] if key[0].start is not None else 0
stop = row_inds[key[0].stop] if key[0].stop is not None else row_inds[-1]
st0, sp0 = start - row_start, stop - | |
<reponame>Annonymous-code-release/BINAS<gh_stars>1-10
#!/usr/bin/env python
import pickle
import sys
import time
from contextlib import suppress
from datetime import datetime
import matplotlib.pyplot as plt
import yaml
from scipy.stats import stats
from torch.nn.parallel import DistributedDataParallel as NativeDDP
from tqdm import tqdm
from accuracy_contribution import validate
from external.nas_parser import *
from nas.nas_utils.general_purpose import extract_structure_param_list, target_time_loss, \
freeze_weights_unfreeze_alphas, get_stage_block_from_name, STAGE_BLOCK_DELIMITER, OptimLike, \
update_alpha_beta_tensorboard
from nas.nas_utils.predictor import Quadratic, Bilinear, MLP, Predictor, construct_predictors, predict
from nas.src.optim.block_frank_wolfe import flatten_attention_latency_grad_alpha_beta_blocks
from nas.src.optim.utils import update_attentions_inplace
from timm import create_model
from timm.data import Dataset, CsvDataset, create_loader, FastCollateMixup, resolve_data_config
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy, JsdCrossEntropy
from timm.models import resume_checkpoint, convert_splitbn_model
from timm.models.mobilenasnet import transform_model_to_mobilenet, measure_time
from timm.optim import create_optimizer_alpha
from timm.utils import *
from timm.utils_new.cuda import ApexScaler, NativeScaler
try:
from apex import amp
from apex.parallel import DistributedDataParallel as ApexDDP
from apex.parallel import convert_syncbn_model
has_apex = True
except ImportError:
has_apex = False
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
torch.backends.cudnn.benchmark = True
import gc
from tensorboardX import SummaryWriter
torch.backends.cudnn.benchmark = True
np.set_printoptions(threshold=sys.maxsize, suppress=True, precision=6)
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False)
parser.add_argument('-c', '--config', default='', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Dataset / Model parameters
parser.add_argument('data', metavar='DIR', default=None,
help='path to dataset')
parser.add_argument('--csv-file', default='data.csv',
help='file name for csv. Expected to be in data folder')
parser.add_argument('--model', default='mobilenasnet', type=str, metavar='MODEL',
help='Name of model to train (default: "mobilenasnet"')
parser.add_argument('--pretrained', action='store_true', default=False,
help='Start with pretrained version of specified network (if avail)')
parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH',
help='Initialize model from this checkpoint (default: none)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='Resume full model and optimizer state from checkpoint (default: none)')
parser.add_argument('--no-resume-opt', action='store_true', default=False,
help='prevent resume of optimizer state when resuming model')
parser.add_argument('--num-classes', type=int, default=1000, metavar='N',
help='number of label classes (default: 1000)')
parser.add_argument('--gp', default='avg', type=str, metavar='POOL',
help='Type of global pool, "avg", "max", "avgmax", "avgmaxc" (default: "avg")')
parser.add_argument('--img-size', type=int, default=None, metavar='N',
help='Image patch size (default: None => model default)')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop percent (for validation only)')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--min-crop-factor', type=float, default=0.08,
help='minimum size of crop for image transformation in training')
parser.add_argument('--squish', action='store_true', default=False,
help='use squish for resize input image')
parser.add_argument('-b', '--batch-size', type=int, default=16, metavar='N',
help='input batch size for training (default: 16)')
parser.add_argument('-vb', '--validation-batch-size-multiplier', type=int, default=1, metavar='N',
help='ratio of validation batch size to training batch size (default: 1)')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-connect', type=float, default=None, metavar='PCT',
help='Drop connect rate, DEPRECATED, use drop-path (default: None)')
parser.add_argument('--drop-path', type=float, default=None, metavar='PCT',
help='Drop path rate (default: None)')
parser.add_argument('--drop-block', type=float, default=None, metavar='PCT',
help='Drop block rate (default: None)')
parser.add_argument('--jsd', action='store_true', default=False,
help='Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". (default: None)'),
parser.add_argument('--aug-splits', type=int, default=0,
help='Number of augmentation splits (default: 0, valid: 0 or >=2)')
parser.add_argument('--reprob', type=float, default=0.2, metavar='PCT',
help='Random erase prob (default: 0.2)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
parser.add_argument('--mixup', type=float, default=0.0,
help='mixup alpha, mixup enabled if > 0. (default: 0.)')
parser.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N',
help='turn off mixup after this epoch, disabled if 0 (default: 0)')
parser.add_argument('--smoothing', type=float, default=0.1,
help='label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='random',
help='Training interpolation (random, bilinear, bicubic default: "random")')
# Batch norm parameters (only works with gen_efficientnet based models currently)
parser.add_argument('--bn-tf', action='store_true', default=False,
help='Use Tensorflow BatchNorm defaults for models that support it (default: False)')
parser.add_argument('--bn-momentum', type=float, default=None,
help='BatchNorm momentum override (if not None)')
parser.add_argument('--bn-eps', type=float, default=None,
help='BatchNorm epsilon override (if not None)')
parser.add_argument('--sync-bn', action='store_true',
help='Enable NVIDIA Apex or Torch synchronized BatchNorm.')
parser.add_argument('--dist-bn', type=str, default='',
help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")')
parser.add_argument('--split-bn', action='store_true',
help='Enable separate BN layers per augmentation split.')
# Misc
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--recovery-interval', type=int, default=0, metavar='N',
help='how many batches to wait before writing recovery checkpoint')
parser.add_argument('-j', '--workers', type=int, default=16, metavar='N',
help='how many training processes to use (default: 16)')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--save-images', action='store_true', default=False,
help='save images of input bathes every log interval for debugging')
parser.add_argument('--amp', type=str2bool, nargs='?', const=True, default=True,
help='use NVIDIA amp for mixed precision training')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--output', default='./outputs', type=str, metavar='PATH',
help='path to output folder (default: none, current dir)')
parser.add_argument('--eval-metric', default='top1', type=str, metavar='EVAL_METRIC',
help='Best metric (default: "top1"')
parser.add_argument('--tta', type=int, default=0, metavar='N',
help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--nonstrict_checkpoint', type=str2bool, nargs='?', const=True, default=True,
help='Ignore missmatch in size when loading model weights. Used for transfer learning')
parser.add_argument('--tensorboard', action='store_true', default=False,
help='Write to TensorboardX')
parser.add_argument("--single-view", action='store_true', default=False,
help="train only the fc layer")
parser.add_argument("--debug", action='store_true', default=False,
help="logging is set to debug")
parser.add_argument("--train_percent", type=int, default=100,
help="what percent of data to use for train (don't forget to leave out val")
parser.add_argument('--resnet_structure', type=int, nargs='+', default=[3, 4, 6, 3], metavar='resnetstruct',
help='custom resnet structure')
parser.add_argument('--resnet_block', default='Bottleneck', type=str, metavar='block',
help='custom resnet block')
parser.add_argument("--ema_KD", action='store_true', default=False, help="use KD from EMA")
parser.add_argument('--temperature_T', type=float, default=1,
help='factor for temperature of the teacher')
parser.add_argument('--temperature_S', type=float, default=1,
help='factor for temperature of the student')
parser.add_argument('--keep_only_correct', action='store_true', default=False,
help='Hard threshold for training from example')
parser.add_argument('--only_kd', action='store_true', default=False,
help='Hard threshold for training from example')
parser.add_argument('--verbose', action='store_true', default=False,
help='Verbose mode')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--apex-amp', action='store_true', default=False,
help='Use NVIDIA Apex AMP mixed precision')
parser.add_argument('--native-amp', action='store_true', default=False,
help='Use Native Torch AMP mixed precision')
parser.add_argument('--predictor_type', default='bilinear', choices=['bilinear', 'quadratic', 'mlp'],
help='The type of the predictor model (default: bilinear)')
parser.add_argument('--predictor_ckpt_filename',
help='The filename of the predictor checkpoint')
parser.add_argument('--test_accuracy_lut_filename', default=None,
help='The filename of the measured accuracy LUT for test architectures (default: None)')
parser.add_argument('--test_figure_filename', default=None,
help='The output filename for the output figures (default: None)')
parser.add_argument('--eval_child_model', action='store_true', default=False,
help='Evaluate the generated child model with weights loaded from the supernetwork')
parser.add_argument('--verbose_search', action='store_true', default=False,
help='Verbose search mode')
add_nas_to_parser(parser)
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
def get_train_val_dir(basedir):
train_dir = val_dir = None
for reg in 'train train_set'.split():
if os.path.exists(os.path.join(basedir, reg)):
train_dir = os.path.join(basedir, reg)
break
if train_dir is None:
logging.error('Training folder does not exist at: {}'.format(basedir))
exit(1)
for reg in 'val validation val_set test'.split():
if os.path.exists(os.path.join(basedir, reg)):
val_dir = os.path.join(basedir, reg)
break
if val_dir is None:
logging.error('Validation folder does not exist at: {}'.format(basedir))
exit(1)
return train_dir, val_dir
def main():
args, args_text = _parse_args()
default_level = logging.INFO
if args.debug:
default_level = logging.DEBUG
setup_default_logging(default_level=default_level)
args.prefetcher = not args.no_prefetcher
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
if args.distributed and args.num_gpu > 1:
logging.warning(
'Using more than one GPU per process in distributed mode is not allowed. Setting num_gpu to 1.')
args.num_gpu = 1
args.device = 'cuda:0'
args.world_size = 1
args.rank = 0 # global rank
if args.distributed:
args.num_gpu = 1
args.device = 'cuda:%d' % args.local_rank
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
assert args.rank >= 0
DistributedManager.set_args(args)
sys.stdout = FilteredPrinter(filtered_print, sys.stdout, args.rank == 0)
if args.distributed:
logging.info('Distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.'
% (args.rank, args.world_size))
else:
logging.info('A single process on %d GPUs.' % args.num_gpu)
torch.manual_seed(args.seed + args.rank)
if args.eval_child_model:
if os.path.exists(os.path.join(args.data, args.csv_file)):
dataset_eval = CsvDataset(os.path.join(args.data, args.csv_file),
single_view=True, data_percent=10, reverse_order=True)
else:
_, eval_dir = get_train_val_dir(args.data)
dataset_eval = Dataset(eval_dir)
logging.info(f'Evaluation data has {len(dataset_eval)} images')
args.num_classes = len(dataset_eval.class_to_idx)
logging.info(f'Setting num classes to {args.num_classes}')
else:
args.num_classes = 1000
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
drop_rate=args.drop,
drop_connect_rate=args.drop_connect,
drop_path_rate=args.drop_path,
drop_block_rate=args.drop_block,
global_pool=args.gp,
bn_tf=args.bn_tf,
bn_momentum=args.bn_momentum,
bn_eps=args.bn_eps,
checkpoint_path=args.initial_checkpoint,
strict=not args.nonstrict_checkpoint,
resnet_structure=args.resnet_structure,
resnet_block=args.resnet_block,
heaviest_network=args.heaviest_network,
use_kernel_3=args.use_kernel_3,
exp_r=args.exp_r,
depth=args.depth,
reduced_exp_ratio=args.reduced_exp_ratio,
use_dedicated_pwl_se=args.use_dedicated_pwl_se,
force_sync_gpu=args.force_sync_gpu,
multipath_sampling=args.multipath_sampling,
use_softmax=args.use_softmax,
detach_gs=args.detach_gs,
no_swish=args.no_swish,
search_mode=True
)
if args.force_se and 'mobilenasnet' in args.model:
model.set_force_se(True)
if args.qc_init:
if args.init_to_biggest_alpha:
model.set_all_alpha(er=6, k=5, se=0.25 if args.force_se else 0, use_only=False)
else:
model.set_all_alpha(er=3, k=3, se=0.25 if args.force_se else 0, | |
<reponame>aka-sova/semantic-segmentation-pytorch
import numpy as np
import cv2
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
import os
from sklearn import linear_model, datasets
from collections import defaultdict
import shutil
def rgb_2_hsv_pixel(rgb: list):
pixel = np.zeros((1, 1, 3))
pixel[0][0] = np.array(rgb)
pixel = pixel.astype(np.uint8)
return cv2.cvtColor(pixel, cv2.COLOR_RGB2HSV)
def get_color_bounds(pixel, H_margin : list, S_margin: list, V_margin: list):
H = pixel[0][0][0]
S = pixel[0][0][1]
V = pixel[0][0][2]
lower_bound = np.array([H-H_margin[0], S-S_margin[0], V-V_margin[0]])
upper_bound = np.array([H+H_margin[1], S+S_margin[1], V+V_margin[1]])
for bound in [lower_bound, upper_bound]:
for idx, val in enumerate(bound):
if val > 255:
bound[idx] = 255
return (lower_bound, upper_bound)
def get_signle_ransac(draw_img: np.ndarray, ransac_img: np.ndarray):
# input:
# the final result on the draw_img
# get the ransac processing on the ransac_img
# output:
# the image with a line on it
# extract the array of X and Y location of all 'white' pixels in the mask image
x_coords = []
y_coords = []
for row_num in range(ransac_img.shape[0]):
for col_num in range(ransac_img.shape[1]):
if ransac_img[row_num][col_num] == 255:
x_coords.append(col_num)
y_coords.append(row_num)
x_coords = np.reshape(x_coords, (-1, 1))
y_coords = np.reshape(y_coords, (-1, 1))
ransac_line = linear_model.RANSACRegressor(residual_threshold=10)
ransac_line.fit(x_coords, y_coords)
# draw the line on the image
line_X = np.arange(x_coords.min(), x_coords.max())[:, np.newaxis]
line_y = ransac_line.predict(line_X)
cv2.line(draw_img, (line_X[0], line_y[0]), (line_X[-1], line_y[-1]), (0, 0, 255), 1)
return draw_img
def draw_hough_lines(draw_img_input: np.ndarray, edges_img: np.ndarray, threshold: int, rho_res: int = 1, theha_res = np.pi/180):
draw_img = np.copy(draw_img_input)
lines = cv2.HoughLines(edges_img, rho=rho_res, theta=theha_res, threshold=threshold)
draw_hough_lines_on_img(lines, draw_img)
return (draw_img, lines)
def draw_hough_lines_on_img(lines: np.ndarray, input_img, color=(0, 0, 255), thickness=1):
# draw on input_img the lines
if lines is not None:
for line_num in range(lines.shape[0]):
rho = lines[line_num][0][0]
theta = lines[line_num][0][1]
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * (a))
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * (a))
cv2.line(input_img, (x1, y1), (x2, y2), color, thickness)
return input_img
def get_common_lines(lines_base: np.ndarray, lines_mask: np.ndarray, threshold_rho: int, threshold_theta: float):
# calculate the euclidean distance between the parameters of each line in both arrays
# save only the lines where the difference is smaller than threshold
new_lines = []
lines_arr = None
def inner_loop(rho_1, theta_1):
for line_num_2 in range(lines_mask.shape[0]):
rho_2 = lines_mask[line_num_2][0][0]
theta_2 = lines_mask[line_num_2][0][1]
theta_1 = np.arctan2(np.sin(theta_1), np.cos(theta_1))
theta_2 = np.arctan2(np.sin(theta_2), np.cos(theta_2))
if np.abs(rho_1 - rho_2) <= threshold_rho and np.abs(theta_1 - theta_2) <= threshold_theta:
new_lines.append([rho_1, theta_1])
return
if lines_base is not None and lines_mask is not None:
for line_num_1 in range(lines_base.shape[0]):
rho_1 = lines_base[line_num_1][0][0]
theta_1 = lines_base[line_num_1][0][1]
inner_loop(rho_1, theta_1)
if len(new_lines) > 0:
new_lines_arr = np.asarray(new_lines)
lines_arr = np.expand_dims(new_lines_arr, 1)
return lines_arr
def non_max_suppression_lines(lines: np.ndarray, threshold_rho: int, threshold_theta: float):
# Find similar lines inside the defined boundaries
# Take their mean
new_lines = []
lines_arr = None
suppressed_lines = []
if lines is not None:
for line_num in range(lines.shape[0]):
rho = lines[line_num][0][0]
theta = lines[line_num][0][1]
met_suppressed_line = False # is not, create new line
x0 = rho * np.cos(theta)
y0 = rho * np.sin(theta)
line_param = np.array([x0, y0])
for suppressed_line in suppressed_lines:
rho_supp = suppressed_line[0]
theta_supp = suppressed_line[1]
x0_supp = rho_supp * np.cos(theta_supp)
y0_supp = rho_supp * np.sin(theta_supp)
supprassed_params = np.array([x0_supp, y0_supp])
diff = np.abs(theta - theta_supp)
angle_diff = np.min([diff, np.pi - diff]) # use pi instead of pi*2 since lines looking opposite are same
distance = np.linalg.norm(line_param - supprassed_params)
if distance <= threshold_rho and angle_diff <= threshold_theta:
if rho < 0:
rho = np.abs(rho)
theta = theta - np.pi # will yield same line
suppressed_line[0] = (rho_supp + rho) / 2 # mean
suppressed_line[1] = (theta_supp + theta) / 2 # mean
met_suppressed_line = True
if not met_suppressed_line:
# bring all lines to one policy - no negative rho!
if rho < 0:
rho = np.abs(rho)
theta = theta - np.pi # will yield same line
suppressed_lines.append([rho, theta])
if len(suppressed_lines) > 0:
new_lines_arr = np.asarray(suppressed_lines)
lines_arr = np.expand_dims(new_lines_arr, 1)
return lines_arr
def find_intersections(lines: np.ndarray, original_img):
# find the intersections between the points,
# which lay inside the boundaries of the image
draw_img = np.copy(original_img)
# find intersection point of each line with other 3 lines
# return only points
# using the K-Means we segment the lines into vertical and horizontal
segmented = segment_by_angle_kmeans(lines)
intersections = segmented_intersections(segmented)
colors = [(0, 255, 0), (0, 0, 255)]
for segment, color in zip(segmented, colors):
segment_arr = np.asarray(segment)
draw_img = draw_hough_lines_on_img(segment_arr, draw_img, color, thickness=2)
for intersec in intersections:
x = intersec[0][0]
y = intersec[0][1]
cv2.circle(draw_img, (x, y), 10, (255, 0, 0), thickness=-1)
return intersections, segmented, draw_img
def segment_by_angle_kmeans(lines, k=2, **kwargs):
"""Groups lines based on angle with k-means.
Uses k-means on the coordinates of the angle on the unit circle
to segment `k` angles inside `lines`.
"""
# Define criteria = (type, max_iter, epsilon)
default_criteria_type = cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER
criteria = kwargs.get('criteria', (default_criteria_type, 10, 1.0))
flags = kwargs.get('flags', cv2.KMEANS_RANDOM_CENTERS)
attempts = kwargs.get('attempts', 10)
# returns angles in [0, pi] in radians
angles = np.array([line[0][1] for line in lines])
# multiply the angles by two and find coordinates of that angle
pts = np.array([[np.cos(2*angle), np.sin(2*angle)]
for angle in angles], dtype=np.float32)
# run kmeans on the coords
labels, centers = cv2.kmeans(pts, k, None, criteria, attempts, flags)[1:]
labels = labels.reshape(-1) # transpose to row vec
# segment lines based on their kmeans label
segmented = defaultdict(list)
for i, line in zip(range(len(lines)), lines):
segmented[labels[i]].append(line)
segmented = list(segmented.values())
return segmented
def intersection(line1, line2):
"""Finds the intersection of two lines given in Hesse normal form.
Returns closest integer pixel locations.
See https://stackoverflow.com/a/383527/5087436
"""
rho1, theta1 = line1[0]
rho2, theta2 = line2[0]
A = np.array([
[np.cos(theta1), np.sin(theta1)],
[np.cos(theta2), np.sin(theta2)]
])
b = np.array([[rho1], [rho2]])
x0, y0 = np.linalg.solve(A, b)
x0, y0 = int(np.round(x0)), int(np.round(y0))
return [[x0, y0]]
def segmented_intersections(lines):
"""Finds the intersections between groups of lines."""
intersections = []
for i, group in enumerate(lines[:-1]):
for next_group in lines[i+1:]:
for line1 in group:
for line2 in next_group:
intersections.append(intersection(line1, line2))
return intersections
def draw_polygons(intersection_points: list, original_img):
draw_img = np.copy(original_img)
# change intersections 3 and 4 places
intersection_points[2], intersection_points[3] = intersection_points[3], intersection_points[2]
inter_points = np.asarray(intersection_points)
# draw the polygon within the intersection points
draw_img = cv2.fillConvexPoly(img=draw_img, points=inter_points, color=(0, 0, 0))
return draw_img
def get_segmentation(input_img_loc: str, debug: bool=True, debug_folder: str="output_debug"):
"""This process receives an image, and outputs its segmentation"""
if debug:
if os.path.exists(debug_folder):
shutil.rmtree(debug_folder, ignore_errors=True)
os.mkdir(debug_folder)
else:
os.mkdir(debug_folder)
# Main parameters of the process
# 1. filter color
# We are interested mainly in the Hue value, which is quiet saturated. Define a margin of +- 10 and filter the original image
H_margin = [6, 6]
S_Margin = [40, 100]
V_margin = [120, 100]
# 2. Morphological 'open' operation
kernel = np.ones((2, 2), np.uint8)
# 3. Canny edge detector - normal image
canny_threshold_normal_1 = 80
canny_threshold_normal_2 = 120
canny_aperture_size_normal = 3
# 3. Canny edge detector - mask image
canny_threshold_mask_1 = 60
canny_threshold_mask_2 = 120
canny_aperture_size_mask = 3
# 4. Hough on original edges
hough_threshold_normal = 80
# 5. Hough on masked image edges
hough_threshold_masked = 25
# 6. threshold to find common lines
threshold_rho_common = 5
threshold_theta_common = 0.2
# 7. threshold for non max suppression
threshold_rho_max_sup = 45
threshold_theta_max_sup = 1
img_cv = cv2.imread(input_img_loc)
img_mp = mpimg.imread(input_img_loc)
if debug:
cv2.imwrite(os.path.join(debug_folder, '1_input_img.jpg'), img_cv)
img_cv = cv2.resize(img_cv, None, fx=0.2, fy=0.2)
img_mp = cv2.resize(img_mp, None, fx=0.2, fy=0.2)
img2_hsv = cv2.cvtColor(img_mp, cv2.COLOR_RGB2HSV)
# get the yellow pixel in hsv
# the yellow is approximately [193 165 65] in RGB
yellow_pixel = rgb_2_hsv_pixel([193, 165, 65])
# the yellow is approximately [23 169 169] in HSV
(lower_yellow, upper_yellow) = get_color_bounds(yellow_pixel, H_margin, S_Margin, V_margin)
mask_img = cv2.inRange(img2_hsv, lower_yellow, upper_yellow)
if debug:
cv2.imwrite(os.path.join(debug_folder, '2_mask_before_morph.jpg'), mask_img)
# use morphological 'open'
mask_img = cv2.morphologyEx(mask_img, cv2.MORPH_OPEN, kernel)
if debug:
cv2.imwrite(os.path.join(debug_folder, '3_mask_after_morph.jpg'), mask_img)
# Using the RANSAC algorithm to find the lines which resemble the lines of the doors
# Regular algorithms find only one model using the RANSAC algorithm.
# We here have to find numerous lines, which will sum to 3 after the non-max suppression.
# original_img = np.copy(img_cv)
| |
<gh_stars>0
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from tempest.common import image as common_image
from tempest.common.utils.linux import remote_client
from tempest.common import waiters
from tempest import config
from tempest import exceptions
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions as lib_exc
import tempest.test
CONF = config.CONF
LOG = log.getLogger(__name__)
class ScenarioTest(tempest.test.BaseTestCase):
"""Base class for scenario tests. Uses tempest own clients. """
credentials = ['primary', 'admin']
@classmethod
def setup_clients(cls):
super(ScenarioTest, cls).setup_clients()
# Clients (in alphabetical order)
cls.flavors_client = cls.os_admin.flavors_client
cls.compute_floating_ips_client = (
cls.os_admin.compute_floating_ips_client)
if CONF.service_available.glance:
# Check if glance v1 is available to determine which client to use.
if CONF.image_feature_enabled.api_v1:
cls.image_client = cls.os_admin.image_client
elif CONF.image_feature_enabled.api_v2:
cls.image_client = cls.os_admin.image_client_v2
else:
raise lib_exc.InvalidConfiguration(
'Either api_v1 or api_v2 must be True in '
'[image-feature-enabled].')
# Compute image client
cls.compute_images_client = cls.os_admin.compute_images_client
cls.keypairs_client = cls.os_admin.keypairs_client
# Nova security groups client
cls.compute_security_groups_client = (
cls.os_admin.compute_security_groups_client)
cls.compute_security_group_rules_client = (
cls.os_admin.compute_security_group_rules_client)
cls.servers_client = cls.os_admin.servers_client
# Neutron network client
cls.networks_client = cls.os_admin.networks_client
cls.ports_client = cls.os_admin.ports_client
cls.routers_client = cls.os_admin.routers_client
cls.subnets_client = cls.os_admin.subnets_client
cls.floating_ips_client = cls.os_admin.floating_ips_client
cls.security_groups_client = cls.os_admin.security_groups_client
cls.security_group_rules_client = (
cls.os_admin.security_group_rules_client)
if CONF.volume_feature_enabled.api_v2:
cls.volumes_client = cls.os_admin.volumes_v2_client
cls.snapshots_client = cls.os_admin.snapshots_v2_client
if CONF.volume_feature_enabled.api_v1:
cls.volumes_client = cls.os_admin.volumes_client
cls.snapshots_client = cls.os_admin.snapshots_client
# ## Test functions library
#
# The create_[resource] functions only return body and discard the
# resp part which is not used in scenario tests
def _create_port(self, network_id, client=None, namestart='port-quotatest',
**kwargs):
if not client:
client = self.ports_client
name = data_utils.rand_name(namestart)
result = client.create_port(
name=name,
network_id=network_id,
**kwargs)
self.assertIsNotNone(result, 'Unable to allocate port')
port = result['port']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
client.delete_port, port['id'])
return port
def create_keypair(self, client=None):
if not client:
client = self.keypairs_client
name = data_utils.rand_name(self.__class__.__name__)
# We don't need to create a keypair by pubkey in scenario
body = client.create_keypair(name=name)
return body['keypair']
def create_server(self, name=None, image_id=None, flavor=None,
net_id=None, key=None, wait_until='ACTIVE',
sec_grps=[], metadata={}, **kwargs):
networks = [{'uuid': net_id}]
server = self.servers_client.create_server(name=name,
imageRef=image_id,
flavorRef=flavor,
key_name=key,
security_groups=sec_grps,
networks=networks,
metadata=metadata,
**kwargs)['server']
server_id = server['id']
waiters.wait_for_server_status(self.servers_client, server_id,
'ACTIVE')
return server
def create_volume(self, size=None, name=None, snapshot_id=None,
imageRef=None, volume_type=None):
if size is None:
size = CONF.volume.volume_size
if imageRef:
image = self.compute_images_client.show_image(imageRef)['image']
min_disk = image.get('minDisk')
size = max(size, min_disk)
if name is None:
name = data_utils.rand_name(self.__class__.__name__ + "-volume")
kwargs = {'display_name': name,
'snapshot_id': snapshot_id,
'imageRef': imageRef,
'volume_type': volume_type,
'size': size}
volume = self.volumes_client.create_volume(**kwargs)['volume']
self.addCleanup(self.volumes_client.wait_for_resource_deletion,
volume['id'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.volumes_client.delete_volume, volume['id'])
# NOTE(e0ne): Cinder API v2 uses name instead of display_name
if 'display_name' in volume:
self.assertEqual(name, volume['display_name'])
else:
self.assertEqual(name, volume['name'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
# The volume retrieved on creation has a non-up-to-date status.
# Retrieval after it becomes active ensures correct details.
volume = self.volumes_client.show_volume(volume['id'])['volume']
return volume
def create_volume_type(self, client=None, name=None, backend_name=None):
if not client:
client = self.admin_volume_types_client
if not name:
class_name = self.__class__.__name__
name = data_utils.rand_name(class_name + '-volume-type')
randomized_name = data_utils.rand_name('scenario-type-' + name)
LOG.debug("Creating a volume type: %s on backend %s",
randomized_name, backend_name)
extra_specs = {}
if backend_name:
extra_specs = {"volume_backend_name": backend_name}
body = client.create_volume_type(name=randomized_name,
extra_specs=extra_specs)
volume_type = body['volume_type']
self.assertIn('id', volume_type)
self.addCleanup(client.delete_volume_type, volume_type['id'])
return volume_type
def image_create(self, name, fmt,
disk_format=None, properties=None):
if properties is None:
properties = {}
name = data_utils.rand_name('%s-' % name)
params = {
'name': name,
'container_format': fmt,
'disk_format': disk_format or fmt,
}
if CONF.image_feature_enabled.api_v1:
params['is_public'] = 'False'
params['properties'] = properties
params = {'headers': common_image.image_meta_to_headers(**params)}
else:
params['visibility'] = 'private'
# Additional properties are flattened out in the v2 API.
params.update(properties)
body = self.image_client.create_image(**params)
image = body['image'] if 'image' in body else body
# self.addCleanup(self.image_client.delete_image, image['id'])
self.assertEqual("queued", image['status'])
img_path = CONF.scenario.img_dir + "/" + CONF.scenario.img_file
with open(img_path, 'rb') as image_file:
if CONF.image_feature_enabled.api_v1:
self.image_client.update_image(image['id'], data=image_file)
else:
self.image_client.store_image_file(image['id'], image_file)
return image['id']
def rebuild_server(self, server_id, image=None,
preserve_ephemeral=False, wait=True,
rebuild_kwargs=None):
if image is None:
image = CONF.compute.image_ref
rebuild_kwargs = rebuild_kwargs or {}
LOG.debug("Rebuilding server (id: %s, image: %s, preserve eph: %s)",
server_id, image, preserve_ephemeral)
self.servers_client.rebuild_server(
server_id=server_id, image_ref=image,
preserve_ephemeral=preserve_ephemeral,
**rebuild_kwargs)
if wait:
waiters.wait_for_server_status(self.servers_client,
server_id, 'ACTIVE')
def create_floating_ip(self, thing, pool_name=None):
"""Create a floating IP and associates to a server on Nova"""
if not pool_name:
pool_name = CONF.network.floating_network_name
floating_ip = (self.compute_floating_ips_client.
create_floating_ip(pool=pool_name)['floating_ip'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.compute_floating_ips_client.delete_floating_ip,
floating_ip['id'])
self.compute_floating_ips_client.associate_floating_ip_to_server(
floating_ip['ip'], thing['id'])
return floating_ip
def nova_volume_attach(self, server, volume_to_attach):
volume = self.servers_client.attach_volume(
server['id'], volumeId=volume_to_attach['id'], device='/dev/%s'
% CONF.compute.volume_device_name)['volumeAttachment']
self.assertEqual(volume_to_attach['id'], volume['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'in-use')
# Return the updated volume after the attachment
return self.volumes_client.show_volume(volume['id'])['volume']
def nova_volume_detach(self, server, volume):
self.servers_client.detach_volume(server['id'], volume['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
volume = self.volumes_client.show_volume(volume['id'])['volume']
self.assertEqual('available', volume['status'])
def create_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
private_key=None):
ssh_client = self.get_remote_client(ip_address,
private_key=private_key)
if dev_name is not None:
ssh_client.make_fs(dev_name)
ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
mount_path))
cmd_timestamp = 'sudo sh -c "date > %s/timestamp; sync"' % mount_path
ssh_client.exec_command(cmd_timestamp)
timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
% mount_path)
if dev_name is not None:
ssh_client.exec_command('sudo umount %s' % mount_path)
return timestamp
def get_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
private_key=None):
ssh_client = self.get_remote_client(ip_address,
private_key=private_key)
if dev_name is not None:
ssh_client.mount(dev_name, mount_path)
timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
% mount_path)
if dev_name is not None:
ssh_client.exec_command('sudo umount %s' % mount_path)
return timestamp
def get_server_ip(self, server):
"""Get the server fixed or floating IP.
Based on the configuration we're in, return a correct ip
address for validating that a guest is up.
"""
if CONF.validation.connect_method == 'floating':
# The tests calling this method don't have a floating IP
# and can't make use of the validation resources. So the
# method is creating the floating IP there.
return self.create_floating_ip(server)['ip']
elif CONF.validation.connect_method == 'fixed':
# Determine the network name to look for based on config or creds
# provider network resources.
if CONF.validation.network_for_ssh:
addresses = server['addresses'][
CONF.validation.network_for_ssh]
else:
creds_provider = self._get_credentials_provider()
net_creds = creds_provider.get_primary_creds()
network = getattr(net_creds, 'network', None)
addresses = (server['addresses'][network['name']]
if network else [])
for addr in addresses:
if (addr['version'] == CONF.validation.ip_version_for_ssh and
addr['OS-EXT-IPS:type'] == 'fixed'):
return addr['addr']
raise exceptions.ServerUnreachable(server_id=server['id'])
else:
raise lib_exc.InvalidConfiguration()
def get_remote_client(self, ip_address, username=None, private_key=None):
"""Get a SSH client to a remote server
@param ip_address the server floating or fixed IP address to use
for ssh validation
@param username name of the Linux account on the remote server
@param private_key the SSH private key to use
@return a RemoteClient object
"""
if username is None:
username = CONF.validation.image_ssh_user
# Set this with 'keypair' or others to log in with keypair or
# username/password.
if CONF.validation.auth_method == 'keypair':
password = None
if private_key is None:
private_key = self.keypair['private_key']
else:
password = CONF.validation.image_ssh_password
private_key = None
linux_client = remote_client.RemoteClient(ip_address, username,
pkey=private_key,
password=password)
try:
linux_client.validate_authentication()
except Exception as e:
message = ('Initializing SSH connection to %(ip)s failed. '
'Error: %(error)s' % {'ip': ip_address,
'error': e})
caller = test_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
LOG.exception(message)
self._log_console_output()
raise
return linux_client
def _default_security_group(self, client=None, tenant_id=None):
"""Get default secgroup for given tenant_id.
:returns: default secgroup for given tenant
"""
if client is None:
client = self.security_groups_client
if not tenant_id:
tenant_id = client.tenant_id
sgs = [
sg for sg in list(client.list_security_groups().values())[0]
if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
]
msg = "No default security group for tenant %s." % tenant_id
self.assertGreater(len(sgs), 0, msg)
return sgs[0]
def _create_security_group(self):
# Create security group
sg_name = data_utils.rand_name(self.__class__.__name__)
sg_desc = sg_name + " description"
secgroup = self.compute_security_groups_client.create_security_group(
name=sg_name, description=sg_desc)['security_group']
self.assertEqual(secgroup['name'], sg_name)
self.assertEqual(secgroup['description'], sg_desc)
self.addCleanup(
test_utils.call_and_ignore_notfound_exc,
self.compute_security_groups_client.delete_security_group,
secgroup['id'])
# Add rules to the security group
self._create_loginable_secgroup_rule(secgroup['id'])
return secgroup
def _create_loginable_secgroup_rule(self, secgroup_id=None):
_client = self.compute_security_groups_client
_client_rules = self.compute_security_group_rules_client
if secgroup_id is None:
sgs = _client.list_security_groups()['security_groups']
for sg in sgs:
if sg['name'] == 'default':
secgroup_id = sg['id']
# These rules are intended to permit inbound ssh and icmp
# traffic from all sources, so no group_id is provided.
# Setting a group_id would only permit traffic from ports
# belonging to the same security | |
SYMBOL-4
1D220 GREEK INSTRUMENTAL NOTATION SYMBOL-5
1D221 GREEK INSTRUMENTAL NOTATION SYMBOL-7
1D222 GREEK INSTRUMENTAL NOTATION SYMBOL-8
1D223 GREEK INSTRUMENTAL NOTATION SYMBOL-11
1D224 GREEK INSTRUMENTAL NOTATION SYMBOL-12
1D225 GREEK INSTRUMENTAL NOTATION SYMBOL-13
1D226 GREEK INSTRUMENTAL NOTATION SYMBOL-14
1D227 GREEK INSTRUMENTAL NOTATION SYMBOL-17
1D228 GREEK INSTRUMENTAL NOTATION SYMBOL-18
1D229 GREEK INSTRUMENTAL NOTATION SYMBOL-19
1D22A GREEK INSTRUMENTAL NOTATION SYMBOL-23
1D22B GREEK INSTRUMENTAL NOTATION SYMBOL-24
1D22C GREEK INSTRUMENTAL NOTATION SYMBOL-25
1D22D GREEK INSTRUMENTAL NOTATION SYMBOL-26
1D22E GREEK INSTRUMENTAL NOTATION SYMBOL-27
1D22F GREEK INSTRUMENTAL NOTATION SYMBOL-29
1D230 GREEK INSTRUMENTAL NOTATION SYMBOL-30
1D231 GREEK INSTRUMENTAL NOTATION SYMBOL-32
1D232 GREEK INSTRUMENTAL NOTATION SYMBOL-36
1D233 GREEK INSTRUMENTAL NOTATION SYMBOL-37
1D234 GREEK INSTRUMENTAL NOTATION SYMBOL-38
1D235 GREEK INSTRUMENTAL NOTATION SYMBOL-39
1D236 GREEK INSTRUMENTAL NOTATION SYMBOL-40
1D237 GREEK INSTRUMENTAL NOTATION SYMBOL-42
1D238 GREEK INSTRUMENTAL NOTATION SYMBOL-43
1D239 GREEK INSTRUMENTAL NOTATION SYMBOL-45
1D23A GREEK INSTRUMENTAL NOTATION SYMBOL-47
1D23B GREEK INSTRUMENTAL NOTATION SYMBOL-48
1D23C GREEK INSTRUMENTAL NOTATION SYMBOL-49
1D23D GREEK INSTRUMENTAL NOTATION SYMBOL-50
1D23E GREEK INSTRUMENTAL NOTATION SYMBOL-51
1D23F GREEK INSTRUMENTAL NOTATION SYMBOL-52
1D240 GREEK INSTRUMENTAL NOTATION SYMBOL-53
1D241 GREEK INSTRUMENTAL NOTATION SYMBOL-54
1D242 COMBINING GREEK MUSICAL TRISEME
1D243 COMBINING GREEK MUSICAL TETRASEME
1D244 COMBINING GREEK MUSICAL PENTASEME
1D245 GREEK MUSICAL LEIMMA
1D300 MONOGRAM FOR EARTH
1D301 DIGRAM FOR HEAVENLY EARTH
1D302 DIGRAM FOR HUMAN EARTH
1D303 DIGRAM FOR EARTHLY HEAVEN
1D304 DIGRAM FOR EARTHLY HUMAN
1D305 DIGRAM FOR EARTH
1D306 TETRAGRAM FOR CENTRE
1D307 TETRAGRAM FOR FULL CIRCLE
1D308 TETRAGRAM FOR MIRED
1D309 TETRAGRAM FOR BARRIER
1D30A TETRAGRAM FOR KEEPING SMALL
1D30B TETRAGRAM FOR CONTRARIETY
1D30C TETRAGRAM FOR ASCENT
1D30D TETRAGRAM FOR OPPOSITION
1D30E TETRAGRAM FOR BRANCHING OUT
1D30F TETRAGRAM FOR DEFECTIVENESS OR DISTORTION
1D310 TETRAGRAM FOR DIVERGENCE
1D311 TETRAGRAM FOR YOUTHFULNESS
1D312 TETRAGRAM FOR INCREASE
1D313 TETRAGRAM FOR PENETRATION
1D314 TETRAGRAM FOR REACH
1D315 TETRAGRAM FOR CONTACT
1D316 TETRAGRAM FOR HOLDING BACK
1D317 TETRAGRAM FOR WAITING
1D318 TETRAGRAM FOR FOLLOWING
1D319 TETRAGRAM FOR ADVANCE
1D31A TETRAGRAM FOR RELEASE
1D31B TETRAGRAM FOR RESISTANCE
1D31C TETRAGRAM FOR EASE
1D31D TETRAGRAM FOR JOY
1D31E TETRAGRAM FOR CONTENTION
1D31F TETRAGRAM FOR ENDEAVOUR
1D320 TETRAGRAM FOR DUTIES
1D321 TETRAGRAM FOR CHANGE
1D322 TETRAGRAM FOR DECISIVENESS
1D323 TETRAGRAM FOR BOLD RESOLUTION
1D324 TETRAGRAM FOR PACKING
1D325 TETRAGRAM FOR LEGION
1D326 TETRAGRAM FOR CLOSENESS
1D327 TETRAGRAM FOR KINSHIP
1D328 TETRAGRAM FOR GATHERING
1D329 TETRAGRAM FOR STRENGTH
1D32A TETRAGRAM FOR PURITY
1D32B TETRAGRAM FOR FULLNESS
1D32C TETRAGRAM FOR RESIDENCE
1D32D TETRAGRAM FOR LAW OR MODEL
1D32E TETRAGRAM FOR RESPONSE
1D32F TETRAGRAM FOR GOING TO MEET
1D330 TETRAGRAM FOR ENCOUNTERS
1D331 TETRAGRAM FOR STOVE
1D332 TETRAGRAM FOR GREATNESS
1D333 TETRAGRAM FOR ENLARGEMENT
1D334 TETRAGRAM FOR PATTERN
1D335 TETRAGRAM FOR RITUAL
1D336 TETRAGRAM FOR FLIGHT
1D337 TETRAGRAM FOR VASTNESS OR WASTING
1D338 TETRAGRAM FOR CONSTANCY
1D339 TETRAGRAM FOR MEASURE
1D33A TETRAGRAM FOR ETERNITY
1D33B TETRAGRAM FOR UNITY
1D33C TETRAGRAM FOR DIMINISHMENT
1D33D TETRAGRAM FOR CLOSED MOUTH
1D33E TETRAGRAM FOR GUARDEDNESS
1D33F TETRAGRAM FOR GATHERING IN
1D340 TETRAGRAM FOR MASSING
1D341 TETRAGRAM FOR ACCUMULATION
1D342 TETRAGRAM FOR EMBELLISHMENT
1D343 TETRAGRAM FOR DOUBT
1D344 TETRAGRAM FOR WATCH
1D345 TETRAGRAM FOR SINKING
1D346 TETRAGRAM FOR INNER
1D347 TETRAGRAM FOR DEPARTURE
1D348 TETRAGRAM FOR DARKENING
1D349 TETRAGRAM FOR DIMMING
1D34A TETRAGRAM FOR EXHAUSTION
1D34B TETRAGRAM FOR SEVERANCE
1D34C TETRAGRAM FOR STOPPAGE
1D34D TETRAGRAM FOR HARDNESS
1D34E TETRAGRAM FOR COMPLETION
1D34F TETRAGRAM FOR CLOSURE
1D350 TETRAGRAM FOR FAILURE
1D351 TETRAGRAM FOR AGGRAVATION
1D352 TETRAGRAM FOR COMPLIANCE
1D353 TETRAGRAM FOR ON THE VERGE
1D354 TETRAGRAM FOR DIFFICULTIES
1D355 TETRAGRAM FOR LABOURING
1D356 TETRAGRAM FOR FOSTERING
1D360 COUNTING ROD UNIT DIGIT ONE
1D361 COUNTING ROD UNIT DIGIT TWO
1D362 COUNTING ROD UNIT DIGIT THREE
1D363 COUNTING ROD UNIT DIGIT FOUR
1D364 COUNTING ROD UNIT DIGIT FIVE
1D365 COUNTING ROD UNIT DIGIT SIX
1D366 COUNTING ROD UNIT DIGIT SEVEN
1D367 COUNTING ROD UNIT DIGIT EIGHT
1D368 COUNTING ROD UNIT DIGIT NINE
1D369 COUNTING ROD TENS DIGIT ONE
1D36A COUNTING ROD TENS DIGIT TWO
1D36B COUNTING ROD TENS DIGIT THREE
1D36C COUNTING ROD TENS DIGIT FOUR
1D36D COUNTING ROD TENS DIGIT FIVE
1D36E COUNTING ROD TENS DIGIT SIX
1D36F COUNTING ROD TENS DIGIT SEVEN
1D370 COUNTING ROD TENS DIGIT EIGHT
1D371 COUNTING ROD TENS DIGIT NINE
1D400 MATHEMATICAL BOLD CAPITAL A
1D401 MATHEMATICAL BOLD CAPITAL B
1D402 MATHEMATICAL BOLD CAPITAL C
1D403 MATHEMATICAL BOLD CAPITAL D
1D404 MATHEMATICAL BOLD CAPITAL E
1D405 MATHEMATICAL BOLD CAPITAL F
1D406 MATHEMATICAL BOLD CAPITAL G
1D407 MATHEMATICAL BOLD CAPITAL H
1D408 MATHEMATICAL BOLD CAPITAL I
1D409 MATHEMATICAL BOLD CAPITAL J
1D40A MATHEMATICAL BOLD CAPITAL K
1D40B MATHEMATICAL BOLD CAPITAL L
1D40C MATHEMATICAL BOLD CAPITAL M
1D40D MATHEMATICAL BOLD CAPITAL N
1D40E MATHEMATICAL BOLD CAPITAL O
1D40F MATHEMATICAL BOLD CAPITAL P
1D410 MATHEMATICAL BOLD CAPITAL Q
1D411 MATHEMATICAL BOLD CAPITAL R
1D412 MATHEMATICAL BOLD CAPITAL S
1D413 MATHEMATICAL BOLD CAPITAL T
1D414 MATHEMATICAL BOLD CAPITAL U
1D415 MATHEMATICAL BOLD CAPITAL V
1D416 MATHEMATICAL BOLD CAPITAL W
1D417 MATHEMATICAL BOLD CAPITAL X
1D418 MATHEMATICAL BOLD CAPITAL Y
1D419 MATHEMATICAL BOLD CAPITAL Z
1D41A MATHEMATICAL BOLD SMALL A
1D41B MATHEMATICAL BOLD SMALL B
1D41C MATHEMATICAL BOLD SMALL C
1D41D MATHEMATICAL BOLD SMALL D
1D41E MATHEMATICAL BOLD SMALL E
1D41F MATHEMATICAL BOLD SMALL F
1D420 MATHEMATICAL BOLD SMALL G
1D421 MATHEMATICAL BOLD SMALL H
1D422 MATHEMATICAL BOLD SMALL I
1D423 MATHEMATICAL BOLD SMALL J
1D424 MATHEMATICAL BOLD SMALL K
1D425 MATHEMATICAL BOLD SMALL L
1D426 MATHEMATICAL BOLD SMALL M
1D427 MATHEMATICAL BOLD SMALL N
1D428 MATHEMATICAL BOLD SMALL O
1D429 MATHEMATICAL BOLD SMALL P
1D42A MATHEMATICAL BOLD SMALL Q
1D42B MATHEMATICAL BOLD SMALL R
1D42C MATHEMATICAL BOLD SMALL S
1D42D MATHEMATICAL BOLD SMALL T
1D42E MATHEMATICAL BOLD SMALL U
1D42F MATHEMATICAL BOLD SMALL V
1D430 MATHEMATICAL BOLD SMALL W
1D431 MATHEMATICAL BOLD SMALL X
1D432 MATHEMATICAL BOLD SMALL Y
1D433 MATHEMATICAL BOLD SMALL Z
1D434 MATHEMATICAL ITALIC CAPITAL A
1D435 MATHEMATICAL ITALIC CAPITAL B
1D436 MATHEMATICAL ITALIC CAPITAL C
1D437 MATHEMATICAL ITALIC CAPITAL D
1D438 MATHEMATICAL ITALIC CAPITAL E
1D439 MATHEMATICAL ITALIC CAPITAL F
1D43A MATHEMATICAL ITALIC CAPITAL G
1D43B MATHEMATICAL ITALIC CAPITAL H
1D43C MATHEMATICAL ITALIC CAPITAL I
1D43D MATHEMATICAL ITALIC CAPITAL J
1D43E MATHEMATICAL ITALIC CAPITAL K
1D43F MATHEMATICAL ITALIC CAPITAL L
1D440 MATHEMATICAL ITALIC CAPITAL M
1D441 MATHEMATICAL ITALIC CAPITAL N
1D442 MATHEMATICAL ITALIC CAPITAL O
1D443 MATHEMATICAL ITALIC CAPITAL P
1D444 MATHEMATICAL ITALIC CAPITAL Q
1D445 MATHEMATICAL ITALIC CAPITAL R
1D446 MATHEMATICAL ITALIC CAPITAL S
1D447 MATHEMATICAL ITALIC CAPITAL T
1D448 MATHEMATICAL ITALIC CAPITAL U
1D449 MATHEMATICAL ITALIC CAPITAL V
1D44A MATHEMATICAL ITALIC CAPITAL W
1D44B MATHEMATICAL ITALIC CAPITAL X
1D44C MATHEMATICAL ITALIC CAPITAL Y
1D44D MATHEMATICAL ITALIC CAPITAL Z
1D44E MATHEMATICAL ITALIC SMALL A
1D44F MATHEMATICAL ITALIC SMALL B
1D450 MATHEMATICAL ITALIC SMALL C
1D451 MATHEMATICAL ITALIC SMALL D
1D452 MATHEMATICAL ITALIC SMALL E
1D453 MATHEMATICAL ITALIC SMALL F
1D454 MATHEMATICAL ITALIC SMALL G
1D456 MATHEMATICAL ITALIC SMALL I
1D457 MATHEMATICAL ITALIC SMALL J
1D458 MATHEMATICAL ITALIC SMALL K
1D459 MATHEMATICAL ITALIC SMALL L
1D45A MATHEMATICAL ITALIC SMALL M
1D45B MATHEMATICAL ITALIC SMALL N
1D45C MATHEMATICAL ITALIC SMALL O
1D45D MATHEMATICAL ITALIC SMALL P
1D45E MATHEMATICAL ITALIC SMALL Q
1D45F MATHEMATICAL ITALIC SMALL R
1D460 MATHEMATICAL ITALIC SMALL S
1D461 MATHEMATICAL ITALIC SMALL T
1D462 MATHEMATICAL ITALIC SMALL U
1D463 MATHEMATICAL ITALIC SMALL V
1D464 MATHEMATICAL ITALIC SMALL W
1D465 MATHEMATICAL ITALIC SMALL X
1D466 MATHEMATICAL ITALIC SMALL Y
1D467 MATHEMATICAL ITALIC SMALL Z
1D468 MATHEMATICAL BOLD ITALIC CAPITAL A
1D469 MATHEMATICAL BOLD ITALIC CAPITAL B
1D46A MATHEMATICAL BOLD ITALIC CAPITAL C
1D46B MATHEMATICAL BOLD ITALIC CAPITAL D
1D46C MATHEMATICAL BOLD ITALIC CAPITAL E
1D46D MATHEMATICAL BOLD ITALIC CAPITAL F
1D46E MATHEMATICAL BOLD ITALIC CAPITAL G
1D46F MATHEMATICAL BOLD ITALIC CAPITAL H
1D470 MATHEMATICAL BOLD ITALIC CAPITAL I
1D471 MATHEMATICAL BOLD ITALIC CAPITAL J
1D472 MATHEMATICAL BOLD ITALIC CAPITAL K
1D473 MATHEMATICAL BOLD ITALIC CAPITAL L
1D474 MATHEMATICAL BOLD ITALIC CAPITAL M
1D475 MATHEMATICAL BOLD ITALIC CAPITAL N
1D476 MATHEMATICAL BOLD ITALIC CAPITAL O
1D477 MATHEMATICAL BOLD ITALIC CAPITAL P
1D478 MATHEMATICAL BOLD ITALIC CAPITAL Q
1D479 MATHEMATICAL BOLD ITALIC CAPITAL R
1D47A MATHEMATICAL BOLD ITALIC CAPITAL S
1D47B MATHEMATICAL BOLD ITALIC CAPITAL T
1D47C MATHEMATICAL BOLD ITALIC CAPITAL U
1D47D MATHEMATICAL BOLD ITALIC CAPITAL V
1D47E MATHEMATICAL BOLD ITALIC CAPITAL W
1D47F MATHEMATICAL BOLD ITALIC CAPITAL X
1D480 MATHEMATICAL BOLD ITALIC CAPITAL Y
1D481 MATHEMATICAL BOLD ITALIC CAPITAL Z
1D482 MATHEMATICAL BOLD ITALIC SMALL A
1D483 MATHEMATICAL BOLD ITALIC SMALL B
1D484 MATHEMATICAL BOLD ITALIC SMALL C
1D485 MATHEMATICAL BOLD ITALIC SMALL D
1D486 MATHEMATICAL BOLD ITALIC SMALL E
1D487 MATHEMATICAL BOLD ITALIC SMALL F
1D488 MATHEMATICAL BOLD ITALIC SMALL G
1D489 MATHEMATICAL BOLD ITALIC SMALL H
1D48A MATHEMATICAL BOLD ITALIC SMALL I
1D48B MATHEMATICAL BOLD ITALIC SMALL J
1D48C MATHEMATICAL BOLD ITALIC SMALL K
1D48D MATHEMATICAL BOLD ITALIC SMALL L
1D48E MATHEMATICAL BOLD ITALIC SMALL M
1D48F MATHEMATICAL BOLD ITALIC SMALL N
1D490 MATHEMATICAL BOLD ITALIC SMALL O
1D491 MATHEMATICAL BOLD ITALIC SMALL P
1D492 MATHEMATICAL BOLD ITALIC SMALL Q
1D493 MATHEMATICAL BOLD ITALIC SMALL R
1D494 MATHEMATICAL BOLD ITALIC SMALL S
1D495 MATHEMATICAL BOLD ITALIC SMALL T
1D496 MATHEMATICAL BOLD ITALIC SMALL U
1D497 MATHEMATICAL BOLD ITALIC SMALL V
1D498 MATHEMATICAL BOLD ITALIC SMALL W
1D499 MATHEMATICAL BOLD ITALIC SMALL X
1D49A MATHEMATICAL BOLD ITALIC SMALL Y
1D49B MATHEMATICAL BOLD ITALIC SMALL Z
1D49C MATHEMATICAL SCRIPT CAPITAL A
1D49E MATHEMATICAL SCRIPT CAPITAL C
1D49F MATHEMATICAL SCRIPT CAPITAL D
1D4A2 MATHEMATICAL SCRIPT CAPITAL G
1D4A5 MATHEMATICAL SCRIPT CAPITAL J
1D4A6 MATHEMATICAL SCRIPT CAPITAL K
1D4A9 MATHEMATICAL SCRIPT CAPITAL N
1D4AA MATHEMATICAL SCRIPT CAPITAL O
1D4AB MATHEMATICAL SCRIPT CAPITAL P
1D4AC MATHEMATICAL SCRIPT CAPITAL Q
1D4AE MATHEMATICAL SCRIPT CAPITAL S
1D4AF MATHEMATICAL SCRIPT CAPITAL T
1D4B0 MATHEMATICAL SCRIPT CAPITAL U
1D4B1 MATHEMATICAL SCRIPT CAPITAL V
1D4B2 MATHEMATICAL SCRIPT CAPITAL W
1D4B3 MATHEMATICAL SCRIPT CAPITAL X
1D4B4 MATHEMATICAL SCRIPT CAPITAL Y
1D4B5 MATHEMATICAL SCRIPT CAPITAL Z
1D4B6 MATHEMATICAL SCRIPT SMALL A
1D4B7 MATHEMATICAL SCRIPT SMALL B
1D4B8 MATHEMATICAL SCRIPT SMALL C
1D4B9 MATHEMATICAL SCRIPT SMALL D
1D4BB MATHEMATICAL SCRIPT SMALL F
1D4BD MATHEMATICAL SCRIPT SMALL H
1D4BE MATHEMATICAL SCRIPT SMALL I
1D4BF MATHEMATICAL SCRIPT SMALL J
1D4C0 MATHEMATICAL SCRIPT SMALL K
1D4C1 MATHEMATICAL SCRIPT SMALL L
1D4C2 MATHEMATICAL SCRIPT SMALL M
1D4C3 MATHEMATICAL SCRIPT SMALL N
1D4C5 MATHEMATICAL SCRIPT SMALL P
1D4C6 MATHEMATICAL SCRIPT SMALL Q
1D4C7 MATHEMATICAL SCRIPT SMALL R
1D4C8 MATHEMATICAL SCRIPT SMALL S
1D4C9 MATHEMATICAL SCRIPT SMALL T
1D4CA MATHEMATICAL SCRIPT SMALL U
1D4CB MATHEMATICAL SCRIPT SMALL V
1D4CC MATHEMATICAL SCRIPT SMALL W
1D4CD MATHEMATICAL SCRIPT SMALL X
1D4CE MATHEMATICAL SCRIPT SMALL Y
1D4CF MATHEMATICAL SCRIPT SMALL Z
1D4D0 MATHEMATICAL BOLD SCRIPT CAPITAL A
1D4D1 MATHEMATICAL BOLD SCRIPT CAPITAL B
1D4D2 MATHEMATICAL BOLD SCRIPT CAPITAL C
1D4D3 MATHEMATICAL BOLD SCRIPT CAPITAL D
1D4D4 MATHEMATICAL BOLD SCRIPT CAPITAL E
1D4D5 MATHEMATICAL BOLD SCRIPT CAPITAL F
1D4D6 MATHEMATICAL BOLD SCRIPT CAPITAL G
1D4D7 MATHEMATICAL BOLD SCRIPT CAPITAL H
1D4D8 MATHEMATICAL BOLD SCRIPT CAPITAL I
1D4D9 MATHEMATICAL BOLD SCRIPT CAPITAL J
1D4DA MATHEMATICAL BOLD SCRIPT CAPITAL K
1D4DB MATHEMATICAL BOLD SCRIPT CAPITAL L
1D4DC MATHEMATICAL BOLD SCRIPT CAPITAL M
1D4DD MATHEMATICAL BOLD SCRIPT CAPITAL N
1D4DE MATHEMATICAL BOLD SCRIPT CAPITAL O
1D4DF MATHEMATICAL BOLD SCRIPT CAPITAL P
1D4E0 MATHEMATICAL BOLD SCRIPT CAPITAL Q
1D4E1 MATHEMATICAL BOLD SCRIPT CAPITAL R
1D4E2 MATHEMATICAL BOLD SCRIPT CAPITAL S
1D4E3 MATHEMATICAL BOLD SCRIPT CAPITAL T
1D4E4 MATHEMATICAL BOLD SCRIPT CAPITAL U
1D4E5 MATHEMATICAL BOLD SCRIPT CAPITAL V
1D4E6 MATHEMATICAL BOLD SCRIPT CAPITAL W
1D4E7 MATHEMATICAL BOLD SCRIPT CAPITAL X
1D4E8 MATHEMATICAL BOLD SCRIPT CAPITAL Y
1D4E9 MATHEMATICAL BOLD SCRIPT CAPITAL Z
1D4EA MATHEMATICAL BOLD SCRIPT SMALL A
1D4EB MATHEMATICAL BOLD SCRIPT SMALL B
1D4EC MATHEMATICAL BOLD SCRIPT SMALL C
1D4ED MATHEMATICAL BOLD SCRIPT SMALL D
1D4EE MATHEMATICAL BOLD SCRIPT SMALL E
1D4EF MATHEMATICAL BOLD SCRIPT SMALL F
1D4F0 MATHEMATICAL BOLD SCRIPT SMALL G
1D4F1 MATHEMATICAL BOLD SCRIPT SMALL H
1D4F2 MATHEMATICAL BOLD SCRIPT SMALL I
1D4F3 MATHEMATICAL BOLD SCRIPT SMALL J
1D4F4 MATHEMATICAL BOLD SCRIPT SMALL K
1D4F5 MATHEMATICAL BOLD SCRIPT SMALL L
1D4F6 MATHEMATICAL BOLD SCRIPT SMALL M
1D4F7 MATHEMATICAL BOLD SCRIPT SMALL N
1D4F8 MATHEMATICAL BOLD SCRIPT SMALL O
1D4F9 MATHEMATICAL BOLD SCRIPT SMALL P
1D4FA MATHEMATICAL BOLD SCRIPT SMALL Q
1D4FB MATHEMATICAL BOLD SCRIPT SMALL R
1D4FC MATHEMATICAL BOLD SCRIPT SMALL S
1D4FD MATHEMATICAL BOLD SCRIPT SMALL T
1D4FE MATHEMATICAL BOLD SCRIPT SMALL U
1D4FF MATHEMATICAL BOLD SCRIPT SMALL V
1D500 MATHEMATICAL BOLD SCRIPT SMALL W
1D501 MATHEMATICAL BOLD SCRIPT SMALL X
1D502 MATHEMATICAL BOLD SCRIPT SMALL Y
1D503 MATHEMATICAL BOLD SCRIPT SMALL Z
1D504 MATHEMATICAL FRAKTUR CAPITAL A
1D505 MATHEMATICAL FRAKTUR CAPITAL B
1D507 MATHEMATICAL FRAKTUR CAPITAL D
1D508 MATHEMATICAL FRAKTUR CAPITAL E
1D509 MATHEMATICAL FRAKTUR CAPITAL F
1D50A MATHEMATICAL FRAKTUR CAPITAL G
1D50D MATHEMATICAL FRAKTUR CAPITAL J
1D50E MATHEMATICAL FRAKTUR CAPITAL K
1D50F MATHEMATICAL FRAKTUR CAPITAL L
1D510 MATHEMATICAL FRAKTUR CAPITAL M
1D511 MATHEMATICAL FRAKTUR CAPITAL N
1D512 MATHEMATICAL FRAKTUR CAPITAL O
1D513 MATHEMATICAL FRAKTUR CAPITAL P
1D514 MATHEMATICAL FRAKTUR CAPITAL Q
1D516 MATHEMATICAL FRAKTUR CAPITAL S
1D517 MATHEMATICAL FRAKTUR CAPITAL T
1D518 MATHEMATICAL FRAKTUR CAPITAL U
1D519 MATHEMATICAL | |
def glAccum(op, value):
'''Operate on the accumulation buffer.
:param op: The accumulation buffer operation.
:type op: Enumerated constant
:param value: a value used in the accumulation buffer operation.
:type value: float
'''
pass
def glAlphaFunc(func, ref):
'''Specify the alpha test function.
:param func: Specifies the alpha comparison function.
:type func: Enumerated constant
:param ref: The reference value that incoming alpha values are compared to. Clamped between 0 and 1.
:type ref: float
'''
pass
def glAreTexturesResident(n, textures, residences):
'''Determine if textures are loaded in texture memory
:param n: Specifies the number of textures to be queried.
:type n: int
:param textures: Specifies an array containing the names of the textures to be queried
:type textures: bgl.Buffer object I{type GL_INT}
:param residences: An array in which the texture residence status in returned. The residence status of a texture named by an element of textures is returned in the corresponding element of residences.
:type residences: bgl.Buffer object I{type GL_INT}(boolean)
'''
pass
def glBegin(mode):
'''Delimit the vertices of a primitive or a group of like primatives
:param mode: Specifies the primitive that will be create from vertices between glBegin and glEnd.
:type mode: Enumerated constant
'''
pass
def glBindTexture(target, texture):
'''Bind a named texture to a texturing target
:param target: Specifies the target to which the texture is bound.
:type target: Enumerated constant
:param texture: Specifies the name of a texture.
:type texture: unsigned int
'''
pass
def glBitmap(width, height, xorig, yorig, xmove, ymove, bitmap):
'''Draw a bitmap
:param height: Specify the pixel width and height of the bitmap image.
:type height: width,
:param yorig: Specify the location of the origin in the bitmap image. The origin is measured from the lower left corner of the bitmap, with right and up being the positive axes.
:type yorig: xorig,
:param ymove: Specify the x and y offsets to be added to the current raster position after the bitmap is drawn.
:type ymove: xmove,
:param bitmap: Specifies the address of the bitmap image.
:type bitmap: bgl.Buffer object I{type GL_BYTE}
'''
pass
def glBlendFunc(sfactor, dfactor):
'''Specify pixel arithmetic
:param sfactor: Specifies how the red, green, blue, and alpha source blending factors are computed.
:type sfactor: Enumerated constant
:param dfactor: Specifies how the red, green, blue, and alpha destination blending factors are computed.
:type dfactor: Enumerated constant
'''
pass
def glCallList(list):
'''Execute a display list
:param list: Specifies the integer name of the display list to be executed.
:type list: unsigned int
'''
pass
def glCallLists(n, type, lists):
'''Execute a list of display lists
:param n: Specifies the number of display lists to be executed.
:type n: int
:param type: Specifies the type of values in lists.
:type type: Enumerated constant
:param lists: Specifies the address of an array of name offsets in the display list. The pointer type is void because the offsets can be bytes, shorts, ints, or floats, depending on the value of type.
:type lists: bgl.Buffer object
'''
pass
def glClear(mask):
'''Clear buffers to preset values
:param mask: Bitwise OR of masks that indicate the buffers to be cleared.
:type mask: Enumerated constant(s)
'''
pass
def glClearAccum(red, green, blue, alpha):
'''Specify clear values for the accumulation buffer
:param alpha: Specify the red, green, blue, and alpha values used when the accumulation buffer is cleared. The initial values are all 0.
:type alpha: red,
'''
pass
def glClearColor(red, green, blue, alpha):
'''Specify clear values for the color buffers
:param alpha: Specify the red, green, blue, and alpha values used when the color buffers are cleared. The initial values are all 0.
:type alpha: red,
'''
pass
def glClearDepth(depth):
'''Specify the clear value for the depth buffer
:param depth: Specifies the depth value used when the depth buffer is cleared. The initial value is 1.
:type depth: int
'''
pass
def glClearIndex(c):
'''Specify the clear value for the color index buffers
:param c: Specifies the index used when the color index buffers are cleared. The initial value is 0.
:type c: float
'''
pass
def glClearStencil(s):
'''Specify the clear value for the stencil buffer
:param s: Specifies the index used when the stencil buffer is cleared. The initial value is 0.
:type s: int
'''
pass
def glClipPlane(plane, equation):
'''Specify a plane against which all geometry is clipped
:param plane: Specifies which clipping plane is being positioned.
:type plane: Enumerated constant
:param equation: Specifies the address of an array of four double- precision floating-point values. These values are interpreted as a plane equation.
:type equation: bgl.Buffer object I{type GL_FLOAT}(double)
'''
pass
def glColor(red, green, blue, alpha):
'''Set a new color.
:param blue: Specify new red, green, and blue values for the current color.
:type blue: red,
:param alpha: Specifies a new alpha value for the current color. Included only in the four-argument glColor4 commands. (With ‘4’ colors only)
:type alpha:
'''
pass
def glColorMask(red, green, blue, alpha):
'''Enable and disable writing of frame buffer color components
:param alpha: Specify whether red, green, blue, and alpha can or cannot be written into the frame buffer. The initial values are all GL_TRUE, indicating that the color components can be written.
:type alpha: red,
'''
pass
def glColorMaterial(face, mode):
'''Cause a material color to track the current color
:param face: Specifies whether front, back, or both front and back material parameters should track the current color.
:type face: Enumerated constant
:param mode: Specifies which of several material parameters track the current color.
:type mode: Enumerated constant
'''
pass
def glCopyPixels(x, y, width, height, type):
'''Copy pixels into a 2D texture image
:param target: Specifies the target texture.
:type target: Enumerated constant
:param level: Specifies the level-of-detail number. Level 0 is the base image level. Level n is the nth mipmap reduction image.
:type level: int
:param internalformat: Specifies the number of color components in the texture.
:type internalformat: int
:param y: Specify the window coordinates of the first pixel that is copied from the frame buffer. This location is the lower left corner of a rectangular block of pixels.
:type y: x,
:param width: Specifies the width of the texture image. Must be 2n+2(border) for some integer n. All implementations support texture images that are at least 64 texels wide.
:type width: int
:param height: Specifies the height of the texture image. Must be 2m+2(border) for some integer m. All implementations support texture images that are at least 64 texels high.
:type height: int
:param border: Specifies the width of the border. Must be either 0 or 1.
:type border: int
'''
pass
def glCullFace(mode):
'''Specify whether front- or back-facing facets can be culled
:param mode: Specifies whether front- or back-facing facets are candidates for culling.
:type mode: Enumerated constant
'''
pass
def glDeleteLists(list, range):
'''Delete a contiguous group of display lists
:param list: Specifies the integer name of the first display list to delete
:type list: unsigned int
:param range: Specifies the number of display lists to delete
:type range: int
'''
pass
def glDeleteTextures(n, textures):
'''Delete named textures
:param n: Specifies the number of textures to be deleted
:type n: int
:param textures: Specifies an array of textures to be deleted
:type textures: bgl.Buffer I{GL_INT}
'''
pass
def glDepthFunc(func):
'''Specify the value used for depth buffer comparisons
:param func: Specifies the depth comparison function.
:type func: Enumerated constant
'''
pass
def glDepthMask(flag):
'''Enable or disable writing into the depth buffer
:param flag: Specifies whether the depth buffer is enabled for writing. If flag is GL_FALSE, depth buffer writing | |
s.qerror[2]
err = numpy.sqrt(x * x + y * y + z * z)
if err > 20.0:
pass
# del spectra[i]
elif numpy.add.reduce(s.qachieved) == 0.0:
pass
# del spectra[i]
elif alt < 0.0 or alt > 120000.0:
pass
# odin.Warn("deleting altitude: %10.3f" % (alt/1000.0))
# del spectra[i]
n2 = len(spectra)
def freqMode(self, s):
df = 30.0e6
self.freqmode = 0
self.split = 0
LO = s.lofreq * (1.0 + s.vgeo / 2.99792456e8)
modes = {
'STRAT': "Stratospheric",
'ODD_H': "Odd hydrogen",
'ODD_N': "Odd nitrogen",
'WATER': "Water isotope",
'SUMMER': "Summer mesosphere",
'DYNAM': "Transport"
}
config = None
if s.backend == 'AC1':
if s.frontend == '495':
config = [[492.750e9, 23, "DYNAM", 0],
[495.402e9, 29, "DYNAM", 0],
[499.698e9, 25, "DYNAM", 0]]
elif s.frontend == '549':
config = [[548.502e9, 2, "STRAT", 0],
[553.050e9, 19, "WATER", 0],
[547.752e9, 21, "WATER", 1],
[553.302e9, 23, "DYNAM", 0],
[553.302e9, 29, "DYNAM", 0]]
elif s.frontend == '555':
config = [[553.298e9, 13, "SUMMER", 0]]
elif s.frontend == '572':
config = [[572.762e9, 24, "DYNAM", 0]]
elif s.backend == 'AC2':
if s.frontend == '495':
config = [[497.880e9, 1, "STRAT", 1],
[492.750e9, 8, "WATER", 1],
[494.250e9, 17, "WATER", 0],
[499.698e9, 25, "DYNAM", 0]]
elif s.frontend == '572':
config = [[572.964e9, 22, "DYNAM", 1],
[572.762e9, 14, "SUMMER", 0]]
if config:
df = [0.0] * len(config)
for i in range(len(df)):
df[i] = abs(LO - config[i][0])
i = df.index(min(df))
# print "configuration", i, config[i]
self.freqmode = config[i][1]
self.topic = config[i][2]
self.split = config[i][3]
# print "configuration %s:%s:%10.1f" % \
# (s.backend, s.frontend, LO/1.0e6),
# print " %d, %s" % (self.freqmode, self.topic)
else:
odin.Warn("unknown configuration %s:%s:%10.1f" %
(s.backend, s.frontend, LO / 1.0e6))
if self.freqmode:
self.source = "%s FM=%d" % (modes[self.topic], self.freqmode)
else:
self.source = "unknown"
self.topic = "N/A"
return self.freqmode
def planck(T, f):
h = 6.626176e-34 # Planck constant (Js)
k = 1.380662e-23 # Boltzmann constant (J/K)
T0 = h * f / k
if (T > 0.0):
Tb = T0 / (numpy.exp(T0 / T) - 1.0)
else:
Tb = 0.0
return Tb
class Spectra:
"""A class that perform frequency calibration of ac level 1a spectra"""
def __init__(self, con, data, ref):
self.ref = ref
self.start = data['start']
self.data = numpy.ndarray(shape=(112 * 8,),
dtype='float64', buffer=data['spectra'])
self.stw = data['stw']
self.LO = data['ssb_fq']
self.backend = data['backend']
self.frontend = data['frontend']
self.vgeo = data['vgeo']
self.mode = data['mode']
self.tcal = data['hotloada']
if self.tcal == 0:
self.tcal = data['hotloadb']
self.freqres = 1e6
if data['sig_type'] == 'SIG':
self.qerror = data['qerror']
self.qachieved = data['qachieved']
self.inttime = data['inttime']
self.intmode = 511
self.skyfreq = 0
self.lofreq = data['lo']
self.ssb = data['ssb']
self.Tpll = data['imageloadb']
if self.Tpll == 0:
self.Tpll = data['imageloada']
self.current = data['mixc']
self.type = data['sig_type']
self.source = []
self.topic = []
self.restfreq = []
self.latitude = data['latitude']
self.longitude = data['longitude']
self.altitude = data['altitude']
self.tsys = 0
self.efftime = 0
self.sbpath = 0
self.cc = numpy.ndarray(shape=(8, 96), dtype='float64',
buffer=data['cc'])
self.zerolag = numpy.array(self.cc[0:8, 0])
self.skybeamhit = data['skybeamhit']
self.ssb_att = data['ssb_att']
def tuning(self):
if self.frontend == '119':
IFfreq = 3900.0e6
self.lofreq = 114.8498600000000e+9
self.fcalibrate(self.lofreq, IFfreq)
return
rxs = {'495': 1, '549': 2, '555': 3, '572': 4}
if self.frontend not in rxs.keys():
return
(IFfreq, sbpath) = getSideBand(self.frontend, self.lofreq, self.ssb)
self.sbpath = sbpath / 1.0e6
(ADC_SPLIT, ADC_UPPER) = (0x0200, 0x0400)
if self.intmode & ADC_SPLIT:
if self.backend == 'AC1':
if self.intmode & ADC_UPPER:
IFfreq = IFfreq * 3.6 / 3.9
else:
IFfreq = IFfreq * 4.2 / 3.9
elif self.backend == 'AC2':
if self.intmode & ADC_UPPER:
IFfreq = IFfreq * 4.2 / 3.9
else:
IFfreq = IFfreq * 3.6 / 3.9
if self.current < 0.25:
# odin.Warn("low mixer current %5.2f" % (current))
if self.frontend != '572':
self.lofreq = 0.0
# else:
# IFfreq = 0.0
# odin.Warn("LO frequency lookup failed")
self.fcalibrate(self.lofreq, IFfreq)
def fcalibrate(self, LOfreq, IFfreq):
"""Perform frequency calibration."""
if LOfreq == 0.0:
return
if self.frontend == '495' or self.frontend == '549':
drift = 1.0 + (29.23 - 0.138 * self.Tpll) * 1.0e-6
else:
drift = 1.0 + (24.69 - 0.109 * self.Tpll) * 1.0e-6
LOfreq = LOfreq * drift
self.lofreq = LOfreq
self.skyfreq = LOfreq + IFfreq
self.maxsup = LOfreq - IFfreq
# self.restfreq = self.skyfreq
# apply Doppler correction
self.restfreq = self.skyfreq / (1.0 - self.vgeo / 2.99792456e8)
# self.quality = quality
def getSideBand(rx, LO, ssb):
SSBparams = {
'495': (61600.36, 104188.89, 0.0002977862, 313.0),
'549': (57901.86, 109682.58, 0.0003117128, 313.0),
'555': (60475.43, 116543.50, 0.0003021341, 308.0),
'572': (58120.92, 115256.73, 0.0003128605, 314.0)}
d = 0.0
C1 = SSBparams[rx][0]
C2 = SSBparams[rx][1]
sf = SSBparams[rx][2]
sbpath = (-SSBparams[rx][3] + 2.0 * SSBparams[rx][3] * ssb / 4095.0) * 2.0
for i in range(-2, 3):
s3900 = 299.79 / (ssb + C1) * (C2 + i / sf) - LO / 1.0e9
if abs(abs(s3900) - 3.9) < abs(abs(d) - 3.9):
d = s3900
if d < 0.0:
IFfreq = -3900.0e6
else:
IFfreq = 3900.0e6
return (IFfreq, sbpath)
# if __name__ == "__main__":
# def level1b_importer_window():
class Newer(Level1b_cal):
def __init__(self, spectra, calstw, con):
Level1b_cal.__init__(self, spectra, calstw, con)
self.ref_fit = Ref_fit()
# def interpolate(self, mstw, m, stw, inttime, start):
# return self.ref_fit.interp(mstw, m, stw, inttime, start)
def report_result(con, acfile, info):
temp = [acfile, info['version']]
con.query('''delete from in_process
where file='{0}' and version={1} '''.format(*temp))
if info['info'] == 'pg problem':
return
processtemp = {'file': acfile,
'info': info['info'],
'total_scans': info['total_scans'],
'success_scans': info['success_scans'],
'version': info['version']}
con.query('''delete from processed
where file='{0}' and version={1} '''.format(*temp))
con.insert('processed', processtemp)
def level1b_importer():
if len(argv) != 5:
print 'error in function call, example usage: bin/ipython level1b_importer acfile AC2(backend) filter'
exit(0)
acfile = argv[1]
backend = argv[2]
ss = int(argv[3])
version = int(argv[4])
con = db()
set_odin_logging()
logger = logging.getLogger('level1b process')
logger.info('processing file {0}'.format(acfile))
print acfile
# find min and max stws from acfile
temp = [acfile, version]
query = con.query('''select min(stw),max(stw)
from ac_level0 where file='{0}' '''.format(*temp))
result1 = query.dictresult()
if result1[0]['max'] is None:
# no data from file imported in ac_level0 table
info = {'info': 'no ac data',
'total_scans': 0,
'success_scans': 0,
'version': version}
report_result(con, acfile, info)
logger.warning(
'no imported level0 ac data found for processing file {0}'.format(acfile))
return
sodakeys = [result1[0]['min'], result1[0]['max']]
sodaquery = con.query('''select soda
from attitude_level0 where stw>{0} and stw<{1}
group by soda'''.format(*sodakeys))
sodaresult = sodaquery.dictresult()
if sodaresult == []:
info = {'info': 'no attitude data',
'total_scans': 0,
'success_scans': 0,
'version': version}
report_result(con, acfile, info)
logger.warning(
'no imported level0 attitude data found for processing file {0}'.format(acfile))
return
soda = sodaresult[0]['soda']
tdiff = 45 * 60 * 16
if ss == 1:
# attitude import
print 'attitude'
print datetime.datetime.now()
error = att_level1_importer(result1[0]['min'] - tdiff,
result1[0]['max'] + tdiff,
sodaresult[0]['soda'], backend)
print datetime.datetime.now()
if error == 1:
info = {'info': 'pg problem',
'total_scans': 0,
'success_scans': 0,
'version': version}
report_result(con, acfile, info)
return
# shk import
print 'shk'
print datetime.datetime.now()
shk_level1_importer(
result1[0]['min'] - tdiff,
result1[0]['max'] + tdiff,
backend)
print datetime.datetime.now()
# ac level1a import
print 'ac'
print datetime.datetime.now()
error = ac_level1a_importer(
result1[0]['min'] - tdiff,
result1[0]['max'] + tdiff,
backend)
print datetime.datetime.now()
if error == 1:
info = {'info': 'pg problem',
'total_scans': 0,
'success_scans': 0,
'version': version}
report_result(con, acfile, info)
return
# find out which scans that starts in the file
if backend == 'AC1':
stwoff = 1
else:
stwoff = 0
temp = [result1[0]['min'], result1[0]['max'], backend, stwoff]
if backend == 'AC1':
query = con.query('''select start,ssb_att from ac_level0
natural join getscansac1({0},{1})
join shk_level1 using(stw,backend)
where start>={0} and start<={1}
and backend='AC1' group by start,ssb_att
order by start'''.format(*temp))
if backend == 'AC2':
query = con.query('''select start,ssb_att from ac_level0
natural join getscansac2({0},{1})
join shk_level1 using(stw,backend)
where start>={0} and start<={1}
and backend='AC2' group by start,ssb_att
order by start'''.format(*temp))
result2 = query.dictresult()
if result2 == []:
info = {'info': 'no scans found in file',
'total_scans': 0,
'success_scans': 0,
'version': version}
report_result(con, acfile, info)
# logger.warning('no scans found for processing file {0}'.format(acfile))
return
success_scans = 0
total_scans = len(result2)
firstscan = result2[0]['start']
lastscan = result2[len(result2) - 1]['start']
temp = [
firstscan - tdiff,
lastscan + tdiff,
backend,
stwoff,
sodaresult[0]['soda']]
# extract all necessary data for processing
if backend == 'AC1':
query = con.query('''(
select ac_level0.stw,start,ssb_att,skybeamhit,cc,
ac_level0.backend,
frontend,sig_type,
| |
#! /usr/bin/env python
"""
Test source_finder.py
"""
__author__ = '<NAME>'
from astropy.io import fits
from AegeanTools import source_finder as sf
from AegeanTools.wcs_helpers import Beam, WCSHelper
from AegeanTools import models, flags
from AegeanTools.models import classify_catalog
from copy import deepcopy
import numpy as np
import logging
import os
logging.basicConfig(format="%(module)s:%(levelname)s %(message)s")
log = logging.getLogger("Aegean")
log.setLevel(logging.INFO)
def test_psf_with_nans():
"""Test that a psf map with nans doesn't create a crash"""
log = logging.getLogger("Aegean")
sfinder = sf.SourceFinder(log=log)
filename = "tests/test_files/synthetic_test.fits"
psf = "tests/test_files/synthetic_test_psf.fits"
# create a test psf map with all major axis being nans
hdu = fits.open(psf)
print(hdu[0].data.shape)
hdu[0].data[0,:,:] = np.nan
hdu.writeto('dlme_psf.fits')
try:
found = sfinder.find_sources_in_image(filename,
cores=1, rms=0.5, bkg=0,
imgpsf='dlme_psf.fits')
except AssertionError as e:
os.remove('dlme_psf.fits')
if 'major' in e.args[0]:
raise AssertionError("Broken on psf maps with nans")
else:
raise
else:
os.remove('dlme_psf.fits')
return
def test_misc():
"""Test some random things"""
sf.IslandFittingData()
sf.DummyLM()
sf.SourceFinder(ignored=None, log=log)
def test_helpers():
"""Test the helper functions"""
# fix shape
src = sf.ComponentSource()
src.a = 1
src.b = 2
src.pa = 90
src2 = deepcopy(src)
sf.fix_shape(src2)
if not (src.a == src2.b): raise AssertionError()
if not (src.b == src2.a): raise AssertionError()
if not (src.pa == src2.pa - 90): raise AssertionError()
# pa limit
if not (sf.pa_limit(-180.) == 0.): raise AssertionError()
if not (sf.pa_limit(95.) == -85.): raise AssertionError()
# theta limit
if not (sf.theta_limit(0.) == 0.): raise AssertionError()
if not (sf.theta_limit(np.pi) == 0.): raise AssertionError()
if not (sf.theta_limit(-3*np.pi/2) == np.pi/2): raise AssertionError()
# get_aux
if not (np.all(a is None for a in sf.get_aux_files('_$_fkjfjl'))): raise AssertionError()
aux_files = sf.get_aux_files('tests/test_files/1904-66_SIN.fits')
if not (aux_files['rms'] == 'tests/test_files/1904-66_SIN_rms.fits'): raise AssertionError()
if not (aux_files['bkg'] == 'tests/test_files/1904-66_SIN_bkg.fits'): raise AssertionError()
if not (aux_files['mask'] == 'tests/test_files/1904-66_SIN.mim'): raise AssertionError()
def dont_test_load_globals():
"""Test load_globals"""
log = logging.getLogger("Aegean")
sfinder = sf.SourceFinder(log=log)
filename = 'tests/test_files/1904-66_SIN.fits'
aux_files = sf.get_aux_files('tests/test_files/1904-66_SIN.fits')
sfinder.load_globals(filename)
if sfinder.global_data.img is None: raise AssertionError()
del sfinder
sfinder = sf.SourceFinder(log=log)
sfinder.load_globals(filename, bkgin=aux_files['bkg'], rms=1, mask=aux_files['mask'])
# region isn't available due to healpy not being installed/required
if sfinder.global_data.region is None: raise AssertionError()
del sfinder
sfinder = sf.SourceFinder(log=log)
sfinder.load_globals(filename, bkgin=aux_files['bkg'], bkg=0, mask=aux_files['mask'])
# region isn't available due to healpy not being installed/required
if sfinder.global_data.region is None: raise AssertionError()
del sfinder
sfinder = sf.SourceFinder(log=log)
sfinder.load_globals(filename, bkgin=aux_files['bkg'], rms=1, bkg=0, mask=aux_files['mask'])
# region isn't available due to healpy not being installed/required
if sfinder.global_data.region is None: raise AssertionError()
del sfinder
sfinder = sf.SourceFinder(log=log)
sfinder.load_globals(filename, rmsin=aux_files['rms'], do_curve=False, mask='derp')
if sfinder.global_data.region is not None: raise AssertionError()
img = sfinder._load_aux_image(sfinder.global_data.img, filename)
if img is None: raise AssertionError()
del sfinder
sfinder = sf.SourceFinder(log=log)
aux_files = sf.get_aux_files('tests/test_files/1904-66_SIN.fits')
from AegeanTools.regions import Region
sfinder.load_globals(filename, rms=1, mask=Region())
if sfinder.global_data.region is None: raise AssertionError()
def test_find_and_prior_sources():
"""Test find sources and prior sources"""
log = logging.getLogger("Aegean")
sfinder = sf.SourceFinder(log=log)
filename = 'tests/test_files/synthetic_test.fits'
nsrc = 98
nisl = 97
ntot = nsrc+nisl
# vanilla source finding
found = sfinder.find_sources_in_image(filename, cores=1, rms=0.5, bkg=0)
if not (len(found) == nsrc):
raise AssertionError("Found the wrong number of sources {0}".format(len(found)))
# source finding but not fitting
found = sfinder.find_sources_in_image(filename, cores=1, max_summits=0, rms=0.5, bkg=0)
if not (len(found) == nsrc):
raise AssertionError("Found the wrong number of sources {0}".format(len(found)))
# now with some options
aux_files = sf.get_aux_files(filename)
found2 = sfinder.find_sources_in_image(filename, doislandflux=True, outfile=open('dlme', 'w'), nonegative=False,
rmsin=aux_files['rms'], bkgin=aux_files['bkg'],
mask=aux_files['mask'], cores=1, docov=False)
if not (len(found2) == ntot):
raise AssertionError("Found the wrong number of sources {0}".format(len(found2)))
isle1 = found2[1]
if not (isle1.int_flux > 0):
raise AssertionError()
if not (isle1.max_angular_size > 0):
raise AssertionError()
# we should have written some output file
if not (os.path.exists('dlme')):
raise AssertionError()
os.remove('dlme')
# some more tests, now using multiple cores
cores = 2
priorized = sfinder.priorized_fit_islands(filename, catalogue=found, doregroup=False, ratio=1.2, cores=cores,
rmsin=aux_files['rms'], bkgin=aux_files['bkg'], docov=False)
if not (len(priorized) == nsrc): raise AssertionError("Found the wrong number of sources {0}".format(len(priorized)))
priorized = sfinder.priorized_fit_islands(filename, catalogue=found, doregroup=True, cores=1,
rmsin=aux_files['rms'], bkgin=aux_files['bkg'], outfile=open('dlme','w'), stage=1)
if not (len(priorized) == nsrc): raise AssertionError("Found the wrong number of sources {0}".format(len(priorized)))
if not (len(sfinder.priorized_fit_islands(filename, catalogue=[])) == 0): raise AssertionError()
# we should have written some output file
if not (os.path.exists('dlme')): raise AssertionError("Failed to creat outputfile")
os.remove('dlme')
def dont_test_find_and_prior_parallel():
"""Test find/piroirze with parallel operation"""
log = logging.getLogger("Aegean")
cores = 1
filename = 'tests/test_files/synthetic_test.fits'
# vanilla source finding
log.info("basic fitting (no bkg/rms")
sfinder = sf.SourceFinder(log=log)
found = sfinder.find_sources_in_image(filename, cores=cores,
bkg=0, rms=0.5)
if not (len(found) == 98): raise AssertionError('found {0} sources'.format(len(found)))
# now with some options
aux_files = sf.get_aux_files(filename)
del sfinder
log.info("fitting with supplied bkg/rms and 2 cores")
cores=2
sfinder = sf.SourceFinder(log=log)
_ = sfinder.find_sources_in_image(filename, doislandflux=True, outfile=open('dlme', 'w'), nonegative=False,
rmsin=aux_files['rms'], bkgin=aux_files['bkg'],
mask=aux_files['mask'], cores=cores)
log.info('now priorised fitting')
_ = sfinder.priorized_fit_islands(filename, catalogue=found, doregroup=True, cores=cores, outfile=open('dlme','w'))
os.remove('dlme')
del sfinder
log.info('fitting negative sources')
sfinder = sf.SourceFinder(log=log)
sfinder.find_sources_in_image('tests/test_files/1904-66_SIN_neg.fits', doislandflux=True, nonegative=False, cores=cores)
def test_save_files():
"""Test that we can save files"""
log = logging.getLogger("Aegean")
sfinder = sf.SourceFinder(log=log)
filename = 'tests/test_files/small.fits'
sfinder.save_background_files(image_filename=filename, outbase='dlme')
for ext in ['bkg', 'rms', 'snr', 'crv']:
if not (os.path.exists("dlme_{0}.fits".format(ext))): raise AssertionError()
os.remove("dlme_{0}.fits".format(ext))
def test_save_image():
"""Test save_image"""
log = logging.getLogger("Aegean")
sfinder = sf.SourceFinder(log=log)
filename = 'tests/test_files/small.fits'
_ = sfinder.find_sources_in_image(filename, cores=1, max_summits=0, blank=True)
bfile = 'dlme_blanked.fits'
sfinder.save_image(bfile)
if not (os.path.exists(bfile)): raise AssertionError()
os.remove(bfile)
def test_esimate_lmfit_parinfo():
"""Test estimate_lmfit_parinfo"""
log = logging.getLogger("Aegean")
# log.setLevel(logging.DEBUG)
sfinder = sf.SourceFinder(log=log)
data = np.zeros(shape=(3, 3))
rmsimg = np.ones(shape=(3, 3))
beam = Beam(1, 1, 0)
# should hit isnegative
data[1, 1] = -6
# should hit outerclip is None
outerclip = None
# should run error because curve is the wrong shape
curve = np.zeros((3, 4))
try:
sfinder.estimate_lmfit_parinfo(data=data, rmsimg=rmsimg, curve=curve,
beam=beam, innerclip=5, outerclip=outerclip)
except AssertionError as e:
e.message = 'Passed'
else:
raise AssertionError("estimate_lmfit_parinfo should err when curve.shape != data.shape")
return
def test_island_contours():
"""Test that island contours are correct"""
log = logging.getLogger("Aegean")
sfinder = sf.SourceFinder(log=log)
filename = 'tests/test_files/synthetic_test.fits'
nsrc = 98
nisl = 97
ntot = nsrc+nisl
# vanilla source finding
found = sfinder.find_sources_in_image(filename, cores=1, rms=0.5, bkg=0, doislandflux=True)
components, islands, simples = classify_catalog(found)
isle_0_contour = np.array([(41, 405), (41, 406), (41, 407), (42, 407), (42, 408), (42, 409), (43, 409), (43, 410),
(44, 410), (45, 410), (46, 410), (47, 410), (47, 409), (48, 409), (48, 408), (49, 408),
(49, 407), (49, 406), (49, 405), (48, 405), (48, 404), (48, 403), (47, 403), (46, 403),
(45, 403), (44, 403), (43, 403), (43, 404), (42, 404), (42, 405)])
if not np.all(np.array(islands[0].contour) == isle_0_contour):
raise AssertionError("Island contour for island 0 is incoorect")
return
# for 3.0 functionality
def test_find_islands():
im = np.ones((10,12), dtype=np.float32)
bkg = np.zeros_like(im)
rms = np.ones_like(im)
# test with no islands and no logger
islands = sf.find_islands(im, bkg, rms)
if len(islands) != 0:
return AssertionError("Found islands where none existed")
# now set just one island
im[3:6,4:7] *= 10
# and have some pixels masked or below the clipping threshold
im[6,5] = np.nan
im[4,4] = 0
# make the border nans
im[0:3,:] = im[-1:,:] = np.nan
im[:,0] = im[:,-1] = np.nan
islands = sf.find_islands(im, bkg, rms, log=log)
if len(islands) != 1:
raise AssertionError("Incorrect number of islands found {0}, expecting 1".format(len(islands)))
if not isinstance(islands[0], models.PixelIsland):
raise AssertionError("Islands[0] is not a PixelIsland but instead a {0}".format(type(islands[0])))
correct_box = [[3, 6], [4, 7]]
if not np.all(islands[0].bounding_box == correct_box):
raise AssertionError("Bounding box incorrect, should be {0}, but is {1}".format(correct_box,islands[0].bounding_box))
# add another island that is between the seed/flood thresholds
im[7:9,2:5] = 4.5
islands = sf.find_islands(im, bkg, rms, log=log)
if len(islands) != 1:
raise AssertionError("Incorrect number of islands found {0}, expecting 1".format(len(islands)))
return
def test_estimate_parinfo_image():
"""Test"""
log = logging.getLogger("Aegean")
#log.setLevel(logging.DEBUG)
wcshelper = WCSHelper.from_file(filename='tests/test_files/1904-66_SIN.fits')
im = np.zeros(shape=(10, 10), dtype=np.float32) * np.nan
bkg = np.zeros_like(im)
rms = np.ones_like(im)
im[2:5, 2:5] = 6.
im[3,3] = 8.
islands = sf.find_islands(im, bkg, rms, log=log)
sources = sf.estimate_parinfo_image(islands, im=im, rms=rms, wcshelper=wcshelper, log=log)
if len(sources) != 1:
raise AssertionError("Incorrect number of sources found {0}, expecting 1".format(len(sources)))
if not sources[0]['components'].value == 1:
raise AssertionError("Found {0} components, expecting 1".format(sources[0]['components'].value))
if not sources[0]['c0_amp'].value == 8.0:
raise AssertionError("c0_amp is not 8.0 (is {0})".format(sources[0]['c0_amp'].value))
# test on a negative island
im *= -1.
islands = sf.find_islands(im, bkg, rms, log=log)
sources = sf.estimate_parinfo_image(islands, im=im, rms=rms, wcshelper=wcshelper, log=log)
if len(sources) != 1:
raise AssertionError("Incorrect number of sources found {0}, expecting 1".format(len(sources)))
if not sources[0]['components'].value == 1:
raise AssertionError("Found {0} components, expecting 1".format(sources[0]['components'].value))
if not sources[0]['c0_amp'].value == -8.0:
raise AssertionError("c0_amp is not -8.0 (is {0})".format(sources[0]['c0_amp'].value))
# test on a small island
im[:,:] = np.nan
im[2:4,2:4] = 6.
im[3,3] = 8.
islands = sf.find_islands(im, bkg, rms, | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------
# Copyright Commvault Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
"""File for performing Operation Window related operations on given Commcell entity.
OperationWindow and OperationWindowDetails are 2 classes defined in this class.
OperationWindow: Class for performing Operation Window related operations on given Commcell entity.
OperationWindowDetails: Class for modifying an existing operation window
OperationWindow:
===============
__init__() -- Initialize instance of the OperationWindow class
create_operation_window() -- Creates a Operation rule on the given commcell entity
delete_operation_window() -- Deletes a Operation rule on the commcell entity(Using rule_id/name)
list_operation_window() -- Lists all the operation rule associated with given commcell entity
get() -- Returns instance of OperationWindowDetails class(Using rule_id/name)
OperationWindowDetails:
======================
__init__() -- Initialize instance of OperationWindowDetails class
modify_operation_window() -- Modifies a Operation window
_refresh() -- Refreshes the properties of a rule
_get_rule_properties() -- Assigns the properties of an operation by getting the rule using rule id
OperationWindowDetails Instance Attributes:
==========================================
**name** -- Returns/Modifies the name of the operation window
**start_date** -- Returns/Modifies the start date of the operation window
**end_date** -- Returns/Modifies the end date of the operation window
**operations** -- Returns/Modifies the operations of the operation window
**day_of_week** -- Returns/Modifies the day of week of the operation window
**start_time** -- Returns/Modifies the start time of the operation window
**end_time** -- Returns/Modifies the end time of the operation window
**rule_id** -- Returns rule id of the operation window
**commcell_id** -- Returns commcell id of the entity object
**clientgroup_id** -- Returns client group id of the entity object
**client_id** -- Returns client id of the entity object
**agent_id** -- Returns agent id of the entity object
**instance_id** -- Returns instance id of the entity object
**backupset_id** -- Returns backupset id of the entity object
**subclient_id** -- Returns subclient id of the entity object
**entity_level** -- Returns entity level of the entity object
Example with client entity:
from cvpysdk.commcell import Commcell
commcell = Commcell(<CS>, username, password)
client = commcell.clients.get(<client Name>)
from cvpysdk.operation_window import OperationWindow
client_operation_window = OperationWindow(client)
client_operation_window.list_operation_window()
client_operation_window_details = client_operation_window.create_operation_window(name="operation
window example on clientLevel")
client_operation_window.delete_operation_window(rule_id=client_operation_window_details.rule_id)
client_operation_window_details = client_operation_window.get(rule_id=client_operation_window_details.rule_id)
client_operation_window_details.modify_operation_window(name="Modified operation window example on clientLevel")
Example for modifying a rule:
client_operation_window = OperationWindow(client)
rules = client_operation_window.list_operation_window()
ruleId = rules[0]['ruleId']
client_operation_window_details = OperationWindowDetails(client, ruleId, client_operation_window.entity_details)
# You can use get(OperationWindow) method to modify a rule too.
client_operation_window_details.modify_operation_window(name="Modified operation window example on clientLevel")
"""
from __future__ import absolute_import
import time
from datetime import timedelta
from .exception import SDKException
from .clientgroup import ClientGroup
from .client import Client
from .agent import Agent
from .instance import Instance
from .backupset import Backupset
from .subclient import Subclient
DAY_OF_WEEK_MAPPING = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']
OPERATION_MAPPING = {"FULL_DATA_MANAGEMENT": 1,
"NON_FULL_DATA_MANAGEMENT": 2,
"SYNTHETIC_FULL": 4,
"DATA_RECOVERY": 8,
"AUX_COPY": 16,
"DR_BACKUP": 32,
"DATA_VERIFICATION": 64,
"ERASE_SPARE_MEDIA": 128,
"SHELF_MANAGEMENT": 256,
"DELETE_DATA_BY_BROWSING": 512,
"DELETE_ARCHIVED_DATA": 1024,
"OFFLINE_CONTENT_INDEXING": 2048,
"ONLINE_CONTENT_INDEXING": 4096,
"SRM": 8192,
"INFORMATION_MANAGEMENT": 16384,
"MEDIA_REFRESHING": 32768,
"DATA_ANALYTICS": 65536,
"DATA_PRUNING": 131072,
"BACKUP_COPY": 262144,
"CLEANUP_OPERATION": 524288}
class OperationWindow:
"""Class for representing all operation window related operations"""
def __init__(self, generic_entity_obj):
"""Initialize the OperationWindow class instance for
performing Operation Window related operations.
Args:
generic_entity_obj (object) -- Commcell entity object
Expected value : commcell/Client/Agent/Instance/BackupSet/Subclient/Clientgroup Instance
Returns:
object - instance of the OperationWindow class
Raises:
Exception:
If invalid instance is passed
"""
# imports inside the __init__ method definition to avoid cyclic imports
from .commcell import Commcell
if isinstance(generic_entity_obj, Commcell):
self._commcell_object = generic_entity_obj
else:
self._commcell_object = generic_entity_obj._commcell_object
self._commcell_services = self._commcell_object._services
self._operation_window = self._commcell_services['OPERATION_WINDOW']
self._list_operation_window = self._commcell_services['LIST_OPERATION_WINDOW']
self._cvpysdk_object = self._commcell_object._cvpysdk_object
self._update_response = self._commcell_object._update_response_
self.clientgroup_id = 0
self.client_id = 0
self.agent_id = 0
self.instance_id = 0
self.backupset_id = 0
self.subclient_id = 0
self.entity_type = ''
self.entity_id = ''
self.entity_details = dict()
self.generic_entity_obj = generic_entity_obj
# we will derive all the entity id's based on the input entity type
if isinstance(generic_entity_obj, Commcell):
self.entity_details["entity_level"] = "commserv"
elif isinstance(generic_entity_obj, ClientGroup):
self.clientgroup_id = generic_entity_obj.clientgroup_id
self.entity_type = "clientgroupId"
self.entity_id = self.clientgroup_id
self.entity_details["entity_level"] = self.entity_type[:-2]
elif isinstance(generic_entity_obj, Client):
self.client_id = generic_entity_obj.client_id
self.entity_type = "clientId"
self.entity_id = self.client_id
self.entity_details["entity_level"] = self.entity_type[:-2]
elif isinstance(generic_entity_obj, Agent):
self.client_id = generic_entity_obj._client_object.client_id
self.agent_id = generic_entity_obj.agent_id
self.entity_type = "applicationId"
self.entity_id = self.agent_id
self.entity_details["entity_level"] = "agent"
elif isinstance(generic_entity_obj, Instance):
self.client_id = generic_entity_obj._agent_object._client_object.client_id
self.agent_id = generic_entity_obj._agent_object.agent_id
self.instance_id = generic_entity_obj.instance_id
self.entity_type = "instanceId"
self.entity_id = self.instance_id
self.entity_details["entity_level"] = self.entity_type[:-2]
elif isinstance(generic_entity_obj, Backupset):
self.client_id = generic_entity_obj._instance_object._agent_object. \
_client_object.client_id
self.agent_id = generic_entity_obj._instance_object._agent_object.agent_id
self.instance_id = generic_entity_obj._instance_object.instance_id
self.backupset_id = generic_entity_obj.backupset_id
self.entity_type = "backupsetId"
self.entity_id = self.backupset_id
self.entity_details["entity_level"] = self.entity_type[:-2]
elif isinstance(generic_entity_obj, Subclient):
self.client_id = generic_entity_obj._backupset_object._instance_object. \
_agent_object._client_object.client_id
self.agent_id = generic_entity_obj._backupset_object. \
_instance_object._agent_object.agent_id
self.instance_id = generic_entity_obj._backupset_object._instance_object.instance_id
self.backupset_id = generic_entity_obj._backupset_object.backupset_id
self.subclient_id = generic_entity_obj.subclient_id
self.entity_type = "subclientId"
self.entity_id = self.subclient_id
self.entity_details["entity_level"] = self.entity_type[:-2]
else:
raise SDKException('Response', '101', "Invalid instance passed")
self.entity_details.update({"clientGroupId": self.clientgroup_id,
"clientId": self.client_id,
"applicationId": self.agent_id,
"instanceId": self.instance_id,
"backupsetId": self.backupset_id,
"subclientId": self.subclient_id})
# append the entity type and entity id to end of list operation window REST API.
# For commcell it will empty string
self.connect_string = self._list_operation_window.split('?')[0] + '?' + self.entity_type + "=" + self.entity_id
def create_operation_window(
self,
name,
start_date=None,
end_date=None,
operations=None,
day_of_week=None,
start_time=None,
end_time=None):
""" Creates operation rule on the initialized commcell entity
Args:
name (str) -- Name of the Operation rule
start_date (int) -- The start date for the operation rule.
Valid values are UNIX-style timestamps (seconds since January 1, 1970).
default - current date
end_date (int) -- The end date for the operation rule.
Valid values are UNIX-style timestamps (seconds since January 1, 1970).
default - 365 days
operations (list) -- List of operations for which the operation
window is created
Acceptable Values:
FULL_DATA_MANAGEMENT/NON_FULL_DATA_MANAGEMENT/SYNTHETIC_FULL/
DATA_RECOVERY/AUX_COPY/DR_BACKUP/DATA_VERIFICATION/ERASE_SPARE_MEDIA/
SHELF_MANAGEMENT/DELETE_DATA_BY_BROWSING/DELETE_ARCHIVED_DATA/
OFFLINE_CONTENT_INDEXING/ONLINE_CONTENT_INDEXING/SRM/INFORMATION_MANAGEMENT/
MEDIA_REFRESHING/DATA_ANALYTICS/DATA_PRUNING/BACKUP_COPY/CLEANUP_OPERATION
day_of_week (list) -- List of days of the week on which the operation rule applies to
Acceptable Values:
sunday/ monday/ tuesday/ wednesday/ thursday/ friday/ saturday
default- Weekdays
start_time (int) -- The start time for the "do not run" interval.
Valid values are UNIX-style timestamps (seconds since January 1, 1970).
default - 28800 (8 AM)
end_time (int) -- The end time for the "do not run" interval.
Valid values are UNIX-style timestamps (seconds since January 1, 1970).
default - 86400 (6 PM)
Returns:
Returns the instance of created Operation window details
Raises:
SDKException:
if the Operation window could not be created
if response is empty
if response is not success
"""
if start_date is None:
start_date = int(time.time())
if end_date is None:
end_date = int(time.time()) + int(timedelta(days=365).total_seconds())
if start_time is None:
start_time = int(timedelta(hours=8).total_seconds())
if end_time is None:
end_time = int(timedelta(hours=18).total_seconds())
operations_list = []
if operations is None:
operations_list = [OPERATION_MAPPING["FULL_DATA_MANAGEMENT"]]
else:
for operation in operations:
if operation not in OPERATION_MAPPING:
response_string = "Invalid input %s for operation is passed" % operation
raise SDKException('OperationWindow', '102', response_string)
operations_list.append(OPERATION_MAPPING[operation.upper()])
day_of_week_list = []
if day_of_week is None:
day_of_week_list = [1, 2, 3, 4, 5] # defaults to weekdays
else:
for day in day_of_week:
if day.lower() not in DAY_OF_WEEK_MAPPING:
response_string = "Invalid input value %s for day_of_week" % day
raise SDKException('OperationWindow', '102', response_string)
day_of_week_list.append(DAY_OF_WEEK_MAPPING.index(day.lower()))
payload = {
"operationWindow": {
"ruleEnabled": True,
"startDate": start_date,
"endDate": end_date,
"name": name,
"operations": operations_list,
"dayTime": [{
"startTime": start_time,
"endTime": end_time,
"dayOfWeek": day_of_week_list
}]
},
"entity": {
"clientGroupId": int(self.clientgroup_id),
"clientId": int(self.client_id),
"applicationId": int(self.agent_id),
"instanceId": int(self.instance_id),
"backupsetId": int(self.backupset_id),
"subclientId": int(self.subclient_id)
}
}
flag, response = self._cvpysdk_object.make_request(
'POST', self._operation_window, payload=payload)
if flag:
if response.json():
error_code = response.json().get("error", {}).get('errorCode')
if int(error_code) == 0:
return self.get(rule_id=int(response.json().get('operationWindow', {}).get('ruleId')))
raise SDKException('OperationWindow', '101')
raise SDKException('Response', '102')
response_string = self._update_response(response.text)
raise SDKException('Response', '102', response_string)
def delete_operation_window(self, rule_id=None, name=None):
"""Deletes the operation rule associated with given rule Id/Name.
Args:
rule_id (int) -- Rule Id of the operation window
name (str) -- Name of the operation window
Raises:
SDKException:
if the Operation window could not be deleted
if response is empty
if response is not success
"""
| |
__builtin__.property(_get_wait)
hardware_interrupt = __builtin__.property(_get_hardware_interrupt)
software_interrupt = __builtin__.property(_get_software_interrupt)
_pyangbind_elements = OrderedDict([('index', index), ('total', total), ('user', user), ('kernel', kernel), ('nice', nice), ('idle', idle), ('wait', wait), ('hardware_interrupt', hardware_interrupt), ('software_interrupt', software_interrupt), ])
from . import total
from . import user
from . import kernel
from . import nice
from . import idle
from . import wait
from . import hardware_interrupt
from . import software_interrupt
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-system - based on the path /system/cpus/cpu/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state data for the system CPU(s)
"""
__slots__ = ('_path_helper', '_extmethods', '__index','__total','__user','__kernel','__nice','__idle','__wait','__hardware_interrupt','__software_interrupt',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
helper = kwargs.pop("path_helper", None)
if helper is False:
self._path_helper = False
elif helper is not None and isinstance(helper, xpathhelper.YANGPathHelper):
self._path_helper = helper
elif hasattr(self, "_parent"):
helper = getattr(self._parent, "_path_helper", False)
self._path_helper = helper
else:
self._path_helper = False
self._extmethods = False
self.__index = YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ALL': {}},),RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32),], is_leaf=True, yang_name="index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='union', is_config=False)
self.__total = YANGDynClass(base=total.total, is_container='container', yang_name="total", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
self.__user = YANGDynClass(base=user.user, is_container='container', yang_name="user", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
self.__kernel = YANGDynClass(base=kernel.kernel, is_container='container', yang_name="kernel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
self.__nice = YANGDynClass(base=nice.nice, is_container='container', yang_name="nice", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
self.__idle = YANGDynClass(base=idle.idle, is_container='container', yang_name="idle", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
self.__wait = YANGDynClass(base=wait.wait, is_container='container', yang_name="wait", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
self.__hardware_interrupt = YANGDynClass(base=hardware_interrupt.hardware_interrupt, is_container='container', yang_name="hardware-interrupt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
self.__software_interrupt = YANGDynClass(base=software_interrupt.software_interrupt, is_container='container', yang_name="software-interrupt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['system', 'cpus', 'cpu', 'state']
def _get_index(self):
"""
Getter method for index, mapped from YANG variable /system/cpus/cpu/state/index (union)
YANG Description: The CPU index for each processor core on the system. On a
single-core system, the index should be zero. The ALL
index signifies an aggregation of the CPU utilization
statistics over all cores in the system.
"""
return self.__index
def _set_index(self, v, load=False):
"""
Setter method for index, mapped from YANG variable /system/cpus/cpu/state/index (union)
If this variable is read-only (config: false) in the
source YANG file, then _set_index is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_index() directly.
YANG Description: The CPU index for each processor core on the system. On a
single-core system, the index should be zero. The ALL
index signifies an aggregation of the CPU utilization
statistics over all cores in the system.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ALL': {}},),RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32),], is_leaf=True, yang_name="index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='union', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """index must be of a type compatible with union""",
'defined-type': "openconfig-system:union",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ALL': {}},),RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32),], is_leaf=True, yang_name="index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='union', is_config=False)""",
})
self.__index = t
if hasattr(self, '_set'):
self._set()
def _unset_index(self):
self.__index = YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ALL': {}},),RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32),], is_leaf=True, yang_name="index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='union', is_config=False)
def _get_total(self):
"""
Getter method for total, mapped from YANG variable /system/cpus/cpu/state/total (container)
YANG Description: Total CPU utilization.
"""
return self.__total
def _set_total(self, v, load=False):
"""
Setter method for total, mapped from YANG variable /system/cpus/cpu/state/total (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_total is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_total() directly.
YANG Description: Total CPU utilization.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=total.total, is_container='container', yang_name="total", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """total must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=total.total, is_container='container', yang_name="total", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)""",
})
self.__total = t
if hasattr(self, '_set'):
self._set()
def _unset_total(self):
self.__total = YANGDynClass(base=total.total, is_container='container', yang_name="total", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
def _get_user(self):
"""
Getter method for user, mapped from YANG variable /system/cpus/cpu/state/user (container)
YANG Description: Percentage of CPU time spent running in user space.
"""
return self.__user
def _set_user(self, v, load=False):
"""
Setter method for user, mapped from YANG variable /system/cpus/cpu/state/user (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_user is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_user() directly.
YANG Description: Percentage of CPU time spent running in user space.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=user.user, is_container='container', yang_name="user", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """user must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=user.user, is_container='container', yang_name="user", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)""",
})
self.__user = t
if hasattr(self, '_set'):
self._set()
def _unset_user(self):
self.__user = YANGDynClass(base=user.user, is_container='container', yang_name="user", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
def _get_kernel(self):
"""
Getter method for kernel, mapped from YANG variable /system/cpus/cpu/state/kernel (container)
YANG Description: Percentage of CPU time spent running in kernel space.
"""
return self.__kernel
def _set_kernel(self, v, load=False):
"""
Setter method for kernel, mapped from YANG variable /system/cpus/cpu/state/kernel (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_kernel is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_kernel() directly.
YANG Description: Percentage of CPU time spent running in kernel space.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=kernel.kernel, is_container='container', yang_name="kernel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """kernel must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=kernel.kernel, is_container='container', yang_name="kernel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)""",
})
self.__kernel = t
if hasattr(self, '_set'):
self._set()
def _unset_kernel(self):
self.__kernel = YANGDynClass(base=kernel.kernel, is_container='container', yang_name="kernel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
def _get_nice(self):
"""
Getter method for nice, mapped from YANG variable /system/cpus/cpu/state/nice (container)
YANG Description: Percentage of CPU time spent running low-priority (niced)
user processes.
"""
return self.__nice
def _set_nice(self, v, load=False):
"""
Setter method for nice, mapped from YANG variable /system/cpus/cpu/state/nice (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_nice is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_nice() directly.
YANG Description: Percentage of CPU time spent running low-priority (niced)
user processes.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=nice.nice, is_container='container', yang_name="nice", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """nice must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=nice.nice, is_container='container', yang_name="nice", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)""",
})
self.__nice = t
if hasattr(self, '_set'):
self._set()
def _unset_nice(self):
self.__nice = YANGDynClass(base=nice.nice, is_container='container', yang_name="nice", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
def _get_idle(self):
"""
Getter method for idle, mapped from YANG variable /system/cpus/cpu/state/idle (container)
YANG Description: Percentage of CPU time spent idle.
"""
return self.__idle
def _set_idle(self, v, load=False):
"""
Setter method for idle, mapped from YANG variable | |
# -*- coding: utf-8 -*-
import asyncio
from .api import (
default_api_dict, API, KINDS, KIND_VALS, KIND_KEYS,
OFFIC, CHI, STAR, OFFICS, UNOFFICS,
)
from .api_toolkit import rearrange_params, _rearrange_args
from .base_classes import AsyncInitObject, AsyncWith, SyncWith
from .cache_utils import iscorofunc
from .sessions import AsyncSession, SyncSession
from configparser import ConfigParser
from functools import update_wrapper
from types import TracebackType
from typing import (
Any,
Callable,
Coroutine,
Dict,
Generator,
Iterable,
Generic,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from .typedefs import (STRS, JSONSEQ, JSONS, HANDLER,
NUMBER, INTSTR, BOOLS, STRDICT, AKW)
import time
__all__ = (
"AsyncClient",
"SyncClient",
"offic_gets_handler",
"star_gets_handler",
"gets_handler")
COLLECT = "collect"
RELEASE = "release"
DEFAULT = "default"
def offic_gets_handler(data_list: JSONSEQ) -> JSONSEQ:
results = []
for data in data_list:
get_items = data.get("items")
if get_items is not None and isinstance(get_items, list):
results.append(get_items)
else:
results.append(data)
return results
def star_gets_handler(data_list: JSONSEQ) -> JSONSEQ:
results = []
for data in data_list:
data.pop("status", None)
if len(data) == 1:
results += list(data.values())
else:
results.append(data)
return results
def gets_handler(self, data_list: JSONSEQ) -> JSONSEQ:
name = self._current_api
if name in OFFICS:
res = offic_gets_handler(data_list)
elif name == STAR:
res = star_gets_handler(data_list)
else:
res = data_list
if self._return_unit and len(res) == 1:
return res[0]
return res
def _find_save(self, kind: str, match: INTSTR,
parameter: str = None) -> Optional[JSONS]:
collectable = self._saves[kind]
count = len(collectable)
if isinstance(match, int):
if -count <= match < count:
return collectable[match]
elif isinstance(match, str):
match = match.upper()
if parameter is None:
for part in collectable:
if match in part.values():
return part
else:
for part in collectable:
if part.get(parameter) == match:
return part
return None # returns explicitly
def _rankings(self, kind: str, api: str,
key: Optional[INTSTR] = None,
code: str = "global",
limit: INTSTR = 200) -> JSONS:
if kind in KIND_KEYS:
kind = KINDS[kind]
if kind == KINDS["b"]:
if key is None:
raise ValueError(
"If the kind is b or brawlers, the key must be entered")
brawler = self.find_save("b", key)
if brawler is not None:
key = brawler["id"]
elif kind == KINDS["ps"]:
if key is None:
key = -1
powerplay = self.find_save("ps", key)
if powerplay is not None:
key = powerplay["id"]
if key is None:
key = ""
return ("rankings",), {"code": code, "kind": kind,
"id": key, "limit": limit}
def get_and_apply_api_keys(filename: str, section: str,
api_dict: Dict[str, API]) -> None:
if filename.endswith(".env"):
raise ValueError("this file extension is not accepted")
# if filename.endswith(".ini"):
config = ConfigParser()
config.read(filename)
config = config[section]
for name, api in api_dict.items():
if name in OFFICS:
name = OFFIC
api_key = config.get(name + "_api_key")
api.set_api_key(api_key)
class AsyncClient(AsyncInitObject, AsyncWith):
_gets_handler = gets_handler
async def __init__(
self, # api_keys: Union[str, STRDICT],
config_file_name: str = "config.ini",
section: str = "DEFAULT",
api_dict: Dict[str, API] = {},
default_api: str = OFFIC,
return_unit: bool = True,
min_update_time: NUMBER = 60 * 10,
data_handler: HANDLER = gets_handler,
trust_env: bool = True,
cache_ttl: NUMBER = 60,
cache_limit: int = 1024,
use_cache: bool = True,
timeout: NUMBER = 30,
repeat_failed: int = 3) -> None:
self.session = await AsyncSession(
trust_env=trust_env, cache_ttl=cache_ttl,
cache_limit=cache_limit, use_cache=use_cache,
timeout=timeout, repeat_failed=repeat_failed)
self.api_dict = {**default_api_dict, **api_dict}
get_and_apply_api_keys(config_file_name, section, self.api_dict)
self._current_api = self._default_api = default_api
self._return_unit = return_unit
self._gets_handler = data_handler
self._requests = []
self._mode = DEFAULT
self._saves = {}
self._min_update_time = min_update_time
await self.update_saves(True)
async def close(self) -> None:
"""Close session"""
await self.session.close()
@property
def closed(self) -> bool:
"""Is client session closed.
A readonly property.
"""
return self.session.closed
async def _gets(self, *args) -> JSONSEQ:
# not_collect =
resps = await self.session.gets(*args)
if self.session.mode != COLLECT:
return self._gets_handler(resps)
if self.session.mode == RELEASE:
return resps # None
def _get_api(self, api: str):
return self.api_dict[api]
async def _fetchs(self, paths: STRS, api_names: str,
from_json: BOOLS = True, rearrange: bool = True,
**kwargs) -> JSONS:
if rearrange:
urls = []
headers = []
pars = rearrange_params(api_names, paths, **kwargs)
for (api_name, *a), kw in pars:
api = self._get_api(api_name)
urls.append(api.make_url(*a, **kw))
headers.append(
(api.headers)) # self.session.headers_handler
else:
api = self._get_api(api_names)
urls = api.make_url(paths, **kwargs)
headers = self.session.headers_handler(api.headers)
return await self._gets(urls, from_json, headers)
def collect(self):
self.session.collect()
async def release(self):
return self._gets_handler(await self.session.release())
# @add_api_name(None)
async def test_fetch(self, *args, **kwargs):
return await self._fetchs(*args, **kwargs)
async def players(self, tag: str, api: str = OFFIC) -> JSONS:
return await self._fetchs("players", api, tag=tag)
async def battlelog(self, tag: str, api: str = OFFIC) -> JSONS:
return await self._fetchs("battlelog", api, tag=tag)
async def clubs(self, tag: str, api: str = OFFIC) -> JSONS:
return await self._fetchs("clubs", api, tag=tag)
async def members(self, tag: str, limit: INTSTR = 100,
api: str = OFFIC) -> JSONS:
return await self._fetchs("members", api, tag=tag, limit=limit)
async def rankings(self, kind: str,
key: Optional[INTSTR] = None,
code: str = "global",
limit: INTSTR = 200,
api: str = OFFIC) -> JSONS:
pars = rearrange_params(kind, api, key=key, code=code, limit=limit)
self.collect()
for args, kwargs in pars:
a, kw = _rankings(self, *args, **kwargs)
await self._fetchs(*a, rearrange=False, **kw)
return await self.release()
async def brawlers(self, id: INTSTR = "", limit: Optional[INTSTR] = None,
api: str = OFFIC) -> JSONS:
return await self._fetchs("brawlers", api, id=id, limit=limit)
async def powerplay(self, code: str = "global", limit: int = 200,
api: str = OFFIC) -> JSONS:
return await self._fetchs("rankings", api, code=code, limit=limit,
kind=KINDS["ps"])
async def events(self, api: str = STAR) -> JSONS:
return await self._fetchs("events", api)
async def icons(self, api: str = STAR) -> JSONS:
return await self._fetchs("icons", api)
async def maps(self, id: INTSTR = "", api: str = STAR) -> JSONS:
return await self._fetchs("maps", api, id=id)
async def gamemodes(self, api: str = STAR) -> JSONS:
return await self._fetchs("gamemodes", api)
async def clublog(self, tag: str, api: str = STAR) -> JSONS:
return await self._fetchs("clublog", api, tag=tag)
async def translations(self, code: str = "", api: str = STAR) -> JSONS:
return await self._fetchs("translations", api, code=code)
# TODO: api rearrange
async def update_saves(self, now: bool = False, api: str = OFFIC) -> None:
if now or time.time() - self._last_update >= self._min_update_time:
self.collect()
await self.brawlers(api=api)
await self.powerplay(api=api)
b, ps = await self.release()
self._saves.update({"b": b, "ps": ps})
self._last_update = time.time()
find_save = _find_save
class SyncClient(SyncWith):
def __init__(
self, api_keys: Union[str, STRDICT],
api_dict: Dict[str, API] = {},
# default_api: str = OFFIC,
return_unit: bool = True,
min_update_time: NUMBER = 60 * 10,
data_handler: HANDLER = gets_handler,
trust_env: bool = True,
cache_ttl: NUMBER = 60,
cache_limit: int = 1024,
use_cache: bool = True,
timeout: NUMBER = 30,
repeat_failed: int = 3) -> None:
self.session = SyncSession(
trust_env=trust_env, cache_ttl=cache_ttl,
cache_limit=cache_limit, use_cache=use_cache,
timeout=timeout, repeat_failed=repeat_failed
)
self.api_dict = {**default_api_dict, **api_dict}
# self._current_api = self._default_api = default_api
if isinstance(api_keys, str):
self.api_dict[default_api].set_api_key(api_keys)
else:
for name, api_key in api_keys.items():
self.api_dict[name].set_api_key(api_key)
self._return_unit = return_unit
self._gets_handler = data_handler
self._saves = {}
self._min_update_time = min_update_time
self.update_saves(True)
def close(self) -> None:
"""Close session"""
self.session.close()
@property
def closed(self) -> bool:
"""Is client session closed.
A readonly property.
"""
return self.session.closed
def _gets(self, *args: Any, **kwargs: Any) -> JSONSEQ:
resps = self.session.gets(*args, **kwargs)
return self._gets_handler(self, resps)
def _get_api(self):
if self._current_api is None:
self._current_api = self._default_api
return self.api_dict[self._current_api]
def _fetch(self, path: str, from_json: bool = True,
**kwargs: Any) -> JSONS:
api = self._get_api()
return self._gets(
api.get(path, **kwargs), headers=api.headers, from_json=from_json)
def _fetchs(self, paths: Union[STRS, AKW], from_json: BOOLS = True,
rearrange: bool = True, **kwargs: Any) -> JSONS:
api = self._get_api()
if rearrange:
pars = rearrange_params(paths, **kwargs)
else:
pars = paths
urls = [api.get(*a, **kw) for a, kw in pars]
return self._gets(urls, headers=api.headers, from_json=from_json)
# @add_api_name(None)
def test_fetch(self, *args, **kwargs):
return self._fetchs(*args, **kwargs)
# @add_api_name(OFFIC)
def players(self, tag: str) -> JSONS:
return self._fetchs("players", tag=tag)
# @add_api_name(OFFIC)
def battlelog(self, tag: str) -> JSONS:
return self._fetchs("battlelog", tag=tag)
# @add_api_name(OFFIC)
def clubs(self, tag: str) -> JSONS:
return self._fetchs("clubs", tag=tag)
# @add_api_name(OFFIC)
def members(self, tag: str, limit: INTSTR = 100) -> JSONS:
return self._fetchs("members", tag=tag, limit=limit)
# @add_api_name(OFFIC)
def rankings(self, kind: str,
key: Optional[INTSTR] = None,
code: str = "global",
limit: INTSTR = 200) -> JSONS:
pars = rearrange_params(
kind, key=key, code=code, limit=limit)
return self._fetchs(
[_rankings(self, *a, **kw) for a, kw in pars], rearrange=False)
# @add_api_name(OFFIC)
def brawlers(self, id: INTSTR = "",
limit: INTSTR = "") -> JSONS:
return self._fetchs("brawlers", id=id, limit=limit)
# @add_api_name(OFFIC)
def powerplay(self, code: str = "global", limit: int = 200) -> JSONS:
return self._fetchs("rankings", | |
guest_obj = cpc_obj.lpars.find(name=guest_name.upper())
except zhmcclient.NotFound:
raise ValueError(
'LPAR <{}> does not exist or is not accessible by '
'this HMC user'.format(guest_name.upper())) from None
return guest_obj
# _get_guest()
def _get_os_channel(self, guest_obj):
"""
Retrieve OS channel topic from guest object
Args:
guest_obj (Union[LPAR, Partition]): LPAR or DPM Partition
Returns:
str: os messages channel name (STOMP topic)
Raises:
RuntimeError: OS messages channel could not be found
zhmcclient.HTTPError: other HMC errors
"""
self._logger.debug("Opening OS messages channel ")
timeout = time.monotonic() + MSG_POST_LOAD_DURATION
while timeout - time.monotonic() > 0:
try:
return guest_obj.open_os_message_channel(
include_refresh_messages=True)
except zhmcclient.HTTPError as exc:
if exc.http_status == 409 and exc.reason == 332:
# The messages interface for the partition
# is not available, try again once more
time.sleep(2)
elif exc.http_status == 409 and exc.reason == 331:
# Topic already exists for this partition
# for the current API session
# Which means, we need to get the list of existing topics
# and pick one from there
all_topics = self._conn[0].session.get_notification_topics(
)
# From all the topics returned we only need those that are
# related to os-message-notification AND have the desired
# LPAR object specified
# LPAR object has its unique ID, and we search for it
# in the 'object-uri' field (comparing this directly is
# not robust enough)
os_topics = [
topic['topic-name'] for topic in all_topics if (
topic['topic-type'] == 'os-message-notification'
and topic['object-uri'].split(
'/')[-1] == guest_obj.uri.split('/')[-1]
)]
if not os_topics:
# none found - that is very much an error
self._logger.debug(
'No matching topic found for LPAR object %s: %s',
guest_obj.uri, all_topics)
raise RuntimeError(
'Could not determine OS messages topic') from None
if len(os_topics) > 1:
# make a note, but can probably work
self._logger.debug(
'Multiple topics found for LPAR object %s: %s',
guest_obj.uri, all_topics)
return os_topics[0]
else:
raise
raise RuntimeError("Timed out waiting for OS messages channel")
def _get_svol_uri(self, part_obj, boot_params):
"""
Find the uri of a storage volume
"""
if boot_params['boot_method'] == 'scsi':
sg_type = 'fcp'
prop_key = 'uuid'
prop_value = boot_params['uuid']
else:
sg_type = 'fc'
prop_key = 'device-number'
prop_value = boot_params['devicenr']
self._logger.debug("Looking for storage volume object with %s='%s'",
prop_key, prop_value)
# search the corresponding volume in the storage groups attached to the
# partition
for sg_uri in part_obj.get_property('storage-group-uris'):
sg_obj = (part_obj.manager.cpc.manager.console.storage_groups
.resource_object(sg_uri))
if sg_obj.get_property('type').lower() != sg_type:
self._logger.debug(
"Skipping storage group %s, type '%s' (actual) != '%s' "
"(expected)", sg_obj.get_property('name'),
sg_obj.get_property('type').lower(), sg_type)
continue
# find the matching volume
for sg_vol in sg_obj.storage_volumes.list():
sg_vol.pull_full_properties()
try:
sg_vol_value = sg_vol.properties[prop_key]
except KeyError:
continue
if sg_vol_value.lower() != prop_value.lower():
continue
if sg_vol.properties['usage'] != 'boot':
sg_vol.update_properties({'usage': 'boot'})
return sg_vol.get_property('element-uri')
raise ValueError(
'Storage volume <{}:{}> not found or not attached to partition'
.format(prop_key, prop_value))
# _get_svol_uri()
def _load(self, guest_obj, boot_params):
"""
Perform the load operation on a profile according to the specified
method.
Args:
guest_obj (Lpar or Partition): zhmcclient object
boot_params (dict): options as specified in json schema
Raises:
NotImplementedError: if cpc in dpm mode has no storage management
firmware feature
ValueError: if an unsupported boot method is specified
"""
if boot_params['boot_method'] == 'none':
self._logger.debug("No boot should be performed")
return
self._logger.debug("Performing boot")
# dpm mode
if isinstance(guest_obj, zhmcclient.Partition):
# perform load of a storage volume
update_props = self._get_dpm_boot_props(guest_obj, boot_params)
if not update_props:
self._logger.warning("Unsupported boot parameters, "
"no boot performed")
return
if guest_obj.get_property('status') != 'stopped':
self._logger.debug("Stopping partition")
guest_obj.stop(wait_for_completion=True)
guest_obj.wait_for_status('stopped')
guest_obj.update_properties(update_props)
self._logger.debug("Starting partition")
guest_obj.start(wait_for_completion=True)
return
# perform load of a SCSI disk
if boot_params['boot_method'] == 'scsi':
guest_obj.scsi_load(
load_address=self._normalize_address(boot_params['devicenr']),
wwpn=boot_params['wwpn'],
lun=boot_params['lun'],
wait_for_completion=True,
force=True
)
# perform load of a DASD disk
elif boot_params['boot_method'] == 'dasd':
guest_obj.load(
load_address=self._normalize_address(boot_params['devicenr']),
wait_for_completion=True,
force=True)
# sanity check
elif boot_params['boot_method'] in ('ftp', 'ftps', 'sftp'):
raise ValueError('{} boot is only available in DPM mode'.format(
boot_params['boot_method']))
# _load()
@staticmethod
def _normalize_address(address):
"""
Convert the load address to the format expected by the HMC API.
Args:
address (str): string in the format 0.0.1500 or 1500
Returns:
str: normalized load address
"""
return address.replace('.', '')[-5:]
# _normalize_address()
def _prepare_setup_network_cmds(self, net_setup):
"""
Auxiliary method. Setup the network on the loaded operating system.
Args:
net_setup (dict): dictionary with network parameters
Returns:
List[str]: network setup commands
"""
self._logger.debug(
"Setting up network: args='%s'",
# log arguments without a specific key
{k: v for k, v in net_setup.items() if k != 'password'}
)
net_cmds = []
ip_addr = net_setup['ip']
dns_servers = net_setup.get('dns')
subnet_mask = net_setup['mask']
gw_addr = net_setup['gateway']
vlan_id = net_setup.get('vlan')
# PCI card: find out interface name
if net_setup.get('type') == 'pci':
# export interface name
net_cmds.extend([
"DEV_PATH=$(dirname $(grep -m1 -r '0x0*{}' --include "
"function_id /sys/bus/pci/devices/*))".format(
net_setup['device'].lower()),
"export IFACE_NAME=$(ls -1 ${DEV_PATH}/net | head -1)",
])
# OSA card: define additional options
else:
mac_addr = net_setup['mac']
channel = net_setup['device']
options = deepcopy(net_setup.get('options', {}))
try:
layer2 = options.pop('layer2')
layer2 = {
'true': 1, 'false': 0, '1': 1, '0': 0
}[str(layer2).lower()]
# option not specified or unknown value used: defaults to off
except (KeyError, ValueError):
layer2 = 0
if channel.find(',') != -1:
ch_list = channel.split(',')
else:
ch_list = [channel]
ch_list.append(hex(int(channel, 16)+1).lstrip("0x"))
ch_list.append(hex(int(channel, 16)+2).lstrip("0x"))
# build the options string, layer2 should always come first
str_option = '-o layer2={}'.format(layer2)
for key, value in options.items():
str_option += ' -o {}={}'.format(key, value)
net_cmds.extend([
# make the osa channels available
"cio_ignore -r {}".format(','.join(ch_list)),
# activate the osa card
"znetconf -a {} {}".format(
ch_list[0].replace("0.0.", ""), str_option),
])
# set interface name
full_ccw = ch_list[0]
if '.' not in full_ccw:
full_ccw = '0.0.{}'.format(full_ccw)
net_cmds.append('export IFACE_NAME=$(ls -1 /sys/devices/qeth/{}/'
'net | head -1)'.format(full_ccw))
# layer2 active: set mac address for network interface
if layer2 == 1 and mac_addr:
net_cmds.append(
"ip link set $IFACE_NAME address {}".format(mac_addr))
if vlan_id:
net_cmds.extend([
'ip link add link ${{IFACE_NAME}} name ${{IFACE_NAME}}.{vlan} '
'type vlan id {vlan}'.format(vlan=vlan_id),
'ip link set $IFACE_NAME up',
'export IFACE_NAME=${{IFACE_NAME}}.{}'.format(vlan_id)
])
net_cmds.extend([
# set ip address and network mask
"ip addr add {}/{} dev $IFACE_NAME".format(ip_addr, subnet_mask),
"ip link set $IFACE_NAME up",
# set default gateway
"ip route add default via {}".format(gw_addr),
])
if dns_servers:
net_cmds.append("echo > /etc/resolv.conf")
for dns_entry in dns_servers:
net_cmds.append(
"echo 'nameserver {}' >> /etc/resolv.conf"
.format(dns_entry))
# sometimes the LPAR is unreachable from the network until a ping
# is performed (likely because of arp cache)
net_cmds.append("ping -c 1 {}".format(gw_addr))
return net_cmds
# _prepare_setup_network_cmds()
def _send_commands(self, guest_obj, commands, msg_channel):
"""
Send command list to a system via HMC
Args:
guest_obj (Lpar or Partition): zhmcclient object
commands (List): list of commands to send.
Command may be a tuple (pattern, command) to wait
for a pattern to appear on msg_channel
msg_channel (zhmcclient.NotificationReceiver): message channel
Raises:
TimeoutError: operation took too long
"""
if not commands:
return
# command generator
def _get_cmd(commands):
for cmd in commands:
if isinstance(cmd, str):
yield (MESSAGES_DEFAULT_PATTERN, cmd)
else:
yield cmd
# the api has a limit of 200 chars per call so we need
# to split the commands in smaller pieces
def _string_to_chunks(string, size=200):
if len(string) < size:
yield string
return
# save command to a temporary file - 'tr' reads stdin as is
yield "tr -d '\\n' > /tmp/command"
for start in range(0, len(string), size):
yield string[start:start+size]
# stop reading stdin
yield '^D'
# run command from temporary file
yield '. /tmp/command'
timeout = time.monotonic() + OS_MESSAGES_TIMEOUT
command_gen = _get_cmd(commands)
pattern, cmd = next(command_gen)
try:
# allow first command to skip waiting for messages
# by specifying an empty wait pattern
if not pattern:
for chunk in _string_to_chunks(cmd):
guest_obj.send_os_command(chunk)
pattern, cmd = next(command_gen)
while time.monotonic() < timeout:
# wait for pattern to appear
messages = msg_channel.get_messages(60.0)
if not messages:
self._logger.debug("Still waiting for '%s' prompt",
pattern)
continue
# log messages
for msg in messages:
self._logger.debug("%s: %s", msg['type'], msg['text'])
# react to pattern with one command only
for text in [msg['text'] for msg in messages
if msg['type'] == 'OS message']:
if re.search(pattern, text):
for chunk in _string_to_chunks(cmd):
guest_obj.send_os_command(chunk)
pattern, cmd = next(command_gen)
break
except StopIteration:
pass
if time.monotonic() > timeout:
raise TimeoutError(
"Timed out communicating with OS Messages interface")
# _send_commands()
def _update_resources_lpar(self, args, guest_obj):
"""
Auxiliary method. If resources are different, we need to change | |
V34_7 = "V34.7"
V34_9 = "V34.9"
V35 = "V35"
V35_0 = "V35.0"
V35_1 = "V35.1"
V35_2 = "V35.2"
V35_3 = "V35.3"
V35_4 = "V35.4"
V35_5 = "V35.5"
V35_6 = "V35.6"
V35_7 = "V35.7"
V35_9 = "V35.9"
V36 = "V36"
V36_0 = "V36.0"
V36_1 = "V36.1"
V36_2 = "V36.2"
V36_3 = "V36.3"
V36_4 = "V36.4"
V36_5 = "V36.5"
V36_6 = "V36.6"
V36_7 = "V36.7"
V36_9 = "V36.9"
V37 = "V37"
V37_0 = "V37.0"
V37_1 = "V37.1"
V37_2 = "V37.2"
V37_3 = "V37.3"
V37_4 = "V37.4"
V37_5 = "V37.5"
V37_6 = "V37.6"
V37_7 = "V37.7"
V37_9 = "V37.9"
V38 = "V38"
V38_0 = "V38.0"
V38_1 = "V38.1"
V38_2 = "V38.2"
V38_3 = "V38.3"
V38_4 = "V38.4"
V38_5 = "V38.5"
V38_6 = "V38.6"
V38_7 = "V38.7"
V38_9 = "V38.9"
V39 = "V39"
V39_0 = "V39.0"
V39_1 = "V39.1"
V39_2 = "V39.2"
V39_3 = "V39.3"
V39_4 = "V39.4"
V39_5 = "V39.5"
V39_6 = "V39.6"
V39_8 = "V39.8"
V39_9 = "V39.9"
V40_V49 = "V40-V49"
V40 = "V40"
V40_0 = "V40.0"
V40_1 = "V40.1"
V40_2 = "V40.2"
V40_3 = "V40.3"
V40_4 = "V40.4"
V40_5 = "V40.5"
V40_6 = "V40.6"
V40_7 = "V40.7"
V40_9 = "V40.9"
V41 = "V41"
V41_0 = "V41.0"
V41_1 = "V41.1"
V41_2 = "V41.2"
V41_3 = "V41.3"
V41_4 = "V41.4"
V41_5 = "V41.5"
V41_6 = "V41.6"
V41_7 = "V41.7"
V41_9 = "V41.9"
V42 = "V42"
V42_0 = "V42.0"
V42_1 = "V42.1"
V42_2 = "V42.2"
V42_3 = "V42.3"
V42_4 = "V42.4"
V42_5 = "V42.5"
V42_6 = "V42.6"
V42_7 = "V42.7"
V42_9 = "V42.9"
V43 = "V43"
V43_0 = "V43.0"
V43_1 = "V43.1"
V43_2 = "V43.2"
V43_3 = "V43.3"
V43_4 = "V43.4"
V43_5 = "V43.5"
V43_6 = "V43.6"
V43_7 = "V43.7"
V43_9 = "V43.9"
V44 = "V44"
V44_0 = "V44.0"
V44_1 = "V44.1"
V44_2 = "V44.2"
V44_3 = "V44.3"
V44_4 = "V44.4"
V44_5 = "V44.5"
V44_6 = "V44.6"
V44_7 = "V44.7"
V44_9 = "V44.9"
V45 = "V45"
V45_0 = "V45.0"
V45_1 = "V45.1"
V45_2 = "V45.2"
V45_3 = "V45.3"
V45_4 = "V45.4"
V45_5 = "V45.5"
V45_6 = "V45.6"
V45_7 = "V45.7"
V45_9 = "V45.9"
V46 = "V46"
V46_0 = "V46.0"
V46_1 = "V46.1"
V46_2 = "V46.2"
V46_3 = "V46.3"
V46_4 = "V46.4"
V46_5 = "V46.5"
V46_6 = "V46.6"
V46_7 = "V46.7"
V46_9 = "V46.9"
V47 = "V47"
V47_0 = "V47.0"
V47_1 = "V47.1"
V47_2 = "V47.2"
V47_3 = "V47.3"
V47_4 = "V47.4"
V47_5 = "V47.5"
V47_6 = "V47.6"
V47_7 = "V47.7"
V47_9 = "V47.9"
V48 = "V48"
V48_0 = "V48.0"
V48_1 = "V48.1"
V48_2 = "V48.2"
V48_3 = "V48.3"
V48_4 = "V48.4"
V48_5 = "V48.5"
V48_6 = "V48.6"
V48_7 = "V48.7"
V48_9 = "V48.9"
V49 = "V49"
V49_0 = "V49.0"
V49_1 = "V49.1"
V49_2 = "V49.2"
V49_3 = "V49.3"
V49_4 = "V49.4"
V49_5 = "V49.5"
V49_6 = "V49.6"
V49_8 = "V49.8"
V49_9 = "V49.9"
V50_V59 = "V50-V59"
V50 = "V50"
V50_0 = "V50.0"
V50_1 = "V50.1"
V50_2 = "V50.2"
V50_3 = "V50.3"
V50_4 = "V50.4"
V50_5 = "V50.5"
V50_6 = "V50.6"
V50_7 = "V50.7"
V50_9 = "V50.9"
V51 = "V51"
V51_0 = "V51.0"
V51_1 = "V51.1"
V51_2 = "V51.2"
V51_3 = "V51.3"
V51_4 = "V51.4"
V51_5 = "V51.5"
V51_6 = "V51.6"
V51_7 = "V51.7"
V51_9 = "V51.9"
V52 = "V52"
V52_0 = "V52.0"
V52_1 = "V52.1"
V52_2 = "V52.2"
V52_3 = "V52.3"
V52_4 = "V52.4"
V52_5 = "V52.5"
V52_6 = "V52.6"
V52_7 = "V52.7"
V52_9 = "V52.9"
V53 = "V53"
V53_0 = "V53.0"
V53_1 = "V53.1"
V53_2 = "V53.2"
V53_3 = "V53.3"
V53_4 = "V53.4"
V53_5 = "V53.5"
V53_6 = "V53.6"
V53_7 = "V53.7"
V53_9 = "V53.9"
V54 = "V54"
V54_0 = "V54.0"
V54_1 = "V54.1"
V54_2 = "V54.2"
V54_3 = "V54.3"
V54_4 = "V54.4"
V54_5 = "V54.5"
V54_6 = "V54.6"
V54_7 = "V54.7"
V54_9 = "V54.9"
V55 = "V55"
V55_0 = "V55.0"
V55_1 = "V55.1"
V55_2 = "V55.2"
V55_3 = "V55.3"
V55_4 = "V55.4"
V55_5 = "V55.5"
V55_6 = "V55.6"
V55_7 = "V55.7"
V55_9 = "V55.9"
V56 = "V56"
V56_0 = "V56.0"
V56_1 = "V56.1"
V56_2 = "V56.2"
V56_3 = "V56.3"
V56_4 = "V56.4"
V56_5 = "V56.5"
V56_6 = "V56.6"
V56_7 = "V56.7"
V56_9 = "V56.9"
V57 = "V57"
V57_0 = "V57.0"
V57_1 = "V57.1"
V57_2 = "V57.2"
V57_3 = "V57.3"
V57_4 = "V57.4"
V57_5 = "V57.5"
V57_6 = "V57.6"
V57_7 = "V57.7"
V57_9 = "V57.9"
V58 = "V58"
V58_0 = "V58.0"
V58_1 = "V58.1"
V58_2 = "V58.2"
V58_3 = "V58.3"
V58_4 = "V58.4"
V58_5 = "V58.5"
V58_6 = "V58.6"
V58_7 = "V58.7"
V58_9 = "V58.9"
V59 = "V59"
V59_0 = "V59.0"
V59_1 = "V59.1"
V59_2 = "V59.2"
V59_3 = "V59.3"
V59_4 = "V59.4"
V59_5 = "V59.5"
V59_6 = "V59.6"
V59_8 = "V59.8"
V59_9 = "V59.9"
V60_V69 = "V60-V69"
V60 = "V60"
V60_0 = "V60.0"
V60_1 = "V60.1"
V60_2 = "V60.2"
V60_3 = "V60.3"
V60_4 = "V60.4"
V60_5 = "V60.5"
V60_6 = "V60.6"
V60_7 = "V60.7"
V60_9 = "V60.9"
V61 = "V61"
V61_0 = "V61.0"
V61_1 = "V61.1"
V61_2 = "V61.2"
V61_3 = "V61.3"
V61_4 = "V61.4"
V61_5 = "V61.5"
V61_6 = "V61.6"
V61_7 = "V61.7"
V61_9 = "V61.9"
V62 = "V62"
V62_0 = "V62.0"
V62_1 = "V62.1"
V62_2 = "V62.2"
V62_3 = "V62.3"
V62_4 = "V62.4"
V62_5 = "V62.5"
V62_6 = "V62.6"
V62_7 = "V62.7"
V62_9 = "V62.9"
V63 = "V63"
V63_0 = "V63.0"
V63_1 = "V63.1"
V63_2 = "V63.2"
V63_3 = "V63.3"
V63_4 = "V63.4"
V63_5 = "V63.5"
V63_6 = "V63.6"
V63_7 = "V63.7"
V63_9 = "V63.9"
V64 = "V64"
V64_0 = "V64.0"
V64_1 = "V64.1"
V64_2 = "V64.2"
V64_3 = "V64.3"
V64_4 = "V64.4"
V64_5 = "V64.5"
V64_6 = "V64.6"
V64_7 = "V64.7"
V64_9 = "V64.9"
V65 = "V65"
V65_0 = "V65.0"
V65_1 = "V65.1"
V65_2 = "V65.2"
V65_3 = "V65.3"
V65_4 = "V65.4"
V65_5 = "V65.5"
V65_6 = "V65.6"
V65_7 = "V65.7"
V65_9 = "V65.9"
V66 = "V66"
V66_0 = "V66.0"
V66_1 = "V66.1"
V66_2 = "V66.2"
V66_3 = "V66.3"
V66_4 = "V66.4"
V66_5 = "V66.5"
V66_6 = "V66.6"
V66_7 = "V66.7"
V66_9 = "V66.9"
V67 = "V67"
V67_0 = "V67.0"
V67_1 = "V67.1"
V67_2 = "V67.2"
V67_3 = "V67.3"
V67_4 = "V67.4"
V67_5 = "V67.5"
V67_6 = "V67.6"
V67_7 = "V67.7"
V67_9 = "V67.9"
V68 = "V68"
V68_0 = "V68.0"
V68_1 = "V68.1"
V68_2 = "V68.2"
V68_3 = "V68.3"
V68_4 = "V68.4"
V68_5 = "V68.5"
V68_6 = "V68.6"
V68_7 = "V68.7"
V68_9 = "V68.9"
V69 = "V69"
V69_0 = "V69.0"
V69_1 = "V69.1"
V69_2 = "V69.2"
V69_3 = "V69.3"
V69_4 = "V69.4"
V69_5 = "V69.5"
V69_6 = "V69.6"
V69_8 = "V69.8"
V69_9 = "V69.9"
V70_V79 = "V70-V79"
V70 = "V70"
V70_0 = "V70.0"
V70_1 = "V70.1"
V70_2 = "V70.2"
V70_3 = "V70.3"
V70_4 = "V70.4"
V70_5 = "V70.5"
V70_6 = "V70.6"
V70_7 = "V70.7"
V70_9 = "V70.9"
V71 = "V71"
V71_0 = "V71.0"
V71_1 = "V71.1"
V71_2 = "V71.2"
V71_3 = "V71.3"
V71_4 = "V71.4"
V71_5 = "V71.5"
V71_6 = "V71.6"
V71_7 = "V71.7"
V71_9 = "V71.9"
V72 = "V72"
V72_0 = "V72.0"
V72_1 = "V72.1"
V72_2 = "V72.2"
V72_3 = "V72.3"
V72_4 = "V72.4"
V72_5 = "V72.5"
V72_6 = "V72.6"
V72_7 = "V72.7"
V72_9 = "V72.9"
V73 = "V73"
V73_0 = "V73.0"
V73_1 = "V73.1"
V73_2 = "V73.2"
V73_3 = "V73.3"
V73_4 = "V73.4"
V73_5 = "V73.5"
V73_6 = "V73.6"
V73_7 = "V73.7"
V73_9 = "V73.9"
V74 = "V74"
V74_0 = "V74.0"
V74_1 = "V74.1"
V74_2 = "V74.2"
V74_3 = "V74.3"
V74_4 = "V74.4"
V74_5 = "V74.5"
V74_6 = "V74.6"
V74_7 = "V74.7"
V74_9 = "V74.9"
V75 = "V75"
V75_0 = "V75.0"
V75_1 = "V75.1"
V75_2 = "V75.2"
V75_3 = "V75.3"
V75_4 = "V75.4"
V75_5 = "V75.5"
V75_6 = "V75.6"
V75_7 = "V75.7"
V75_9 = "V75.9"
V76 = "V76"
V76_0 = "V76.0"
V76_1 = "V76.1"
V76_2 = "V76.2"
V76_3 = "V76.3"
V76_4 = "V76.4"
V76_5 = "V76.5"
V76_6 = "V76.6"
V76_7 = "V76.7"
V76_9 = "V76.9"
V77 = | |
card_to_player = self.card_to_player
player_to_card = self.player_to_card
cards = r.h[player].cards
# this is never called on a player's own hand
assert self == r.PlayerRecord[cluer]
assert player != cluer
x = self.standard_action(cluer, player, dont_play, progress, card_to_player, player_to_card, r.discardpile, r)
if not MODIFIEDACTION:
return x
# If you were instructed to play, and you can play a 5 which will help
# a future person to clue, do that.
if x[0] == 'play':
if (self.min_futurehints < 0 or self.futurehints == 0) and cards[x[1]]['name'][0] != '5':
playablefives = [card for card in get_plays(cards, progress)
if card['name'][0] == '5']
if playablefives:
if DEBUG:
r.debug['play 5 instead'] += 1
return 'play', cards.index(playablefives[0])
else:
if DEBUG:
r.debug["someone cannot clue, but I have a play"] += 1
if self.endgame <= 0:
action = self.safe_discard(cards, progress)
if action[0] == 'discard': return action
action = self.modified_safe_discard(cluer, player, cards, dont_play, r)
action = self.modified_discard(action, cards, progress, r)
if action[0] == 'discard': return action
return x
# If you are at 8 hints, hint
if self.modified_hints >= 8:
return 'hint', 0
# The modified player can look a bit harder for a safe discard
if x[0] == 'hint':
x = self.modified_safe_discard(cluer, player, cards, dont_play, r)
# If we are out of hints, you must discard
if self.min_futurehints <= 0 or self.futurehints <= 1:
action = self.modified_discard(x, cards, progress, r)
if action[0] == 'hint' and self.modified_hints <= 0:
return self.critical_discard(cards, r) # probably I should try to discard instead?
return action
# Probably hint if everyone else is playing and you can instruct two players to do something
if self.futureplays == r.nPlayers - 2 and self.actions_before_modified and self.futurehints >= 2:
return 'hint', 0
# Can you get a new card to play?
i = next(player, r)
for atype, _ in self.next_player_actions:
if atype != 'play' and [card for card in r.h[i].cards if self.want_to_play(cluer, i, [], self.modified_progress, self.modified_dic, card['name'], r)]:
return 'hint', 0
i = next(i, r)
# todo: in the late endgame, if player can play a card in dont_play, strikes is 1 or lower, just let him play it
# If you can safely discard, discard in the following cases:
# - we are not in the endgame
# - nobody can play
# - in some situations in the late game
if x[0] == 'discard':
if self.endgame <= 0:
return x
if not self.futureplays:
return x
# early in the endgame, discard a bit more
if self.late_game_discard(r):
return x
# Also discard if there are few plays not in the endgame
if self.futureplays <= 1 and self.endgame <= 0:
return self.modified_discard(x, cards, progress, r)
# we are in the endgame, and there is no emergency, so we stall
return 'hint', 0
def late_game_discard(self, r):
"""Should the modified player discard in the late game?"""
cluer = r.whoseTurn
cards = r.h[self.modified_player].cards
if not (self.futureplays == 1 and self.endgame == 1 and not [card for card in cards if not has_been_played(card, r.progress)]):
return False
# discard unless you see all useful cards and have enough hints
all_cards_visible = is_subset(get_all_useful_cardnames(r), names(get_all_visible_cards(cluer, r)))
if not all_cards_visible:
if self.modified_hints < r.nPlayers - 2:
#r.debug['stop'] = 0
return True
else:
return False
# this should capture: without discarding we cannot reach the first player with a useful card that is not going to play this round
useful_players = [pl for pl in other_players(self.modified_player, r) if pl != cluer and\
len([card for card in r.h[pl].cards if not has_been_played(card, r.progress)]) > int(pl in self.player_to_card_current)]
if not useful_players: # we win this round
# if sum(r.progress.values()) != len(r.suits) * 5 - 1: r.debug['stop'] = 0
return True
# we need one hint for every player between self.modified_player and useful_player[0]
needed_hints = ((useful_players[0] - self.modified_player) % r.nPlayers)
playing_player, = self.player_to_card_current.keys()
#print("we need",needed_hints,"hints to reach",useful_players,"from",self.modified_player,"and",playing_player,"is playing. We have",self.modified_hints)
# we need 1 fewer hint if someone inbetween plays
if is_between(playing_player, self.modified_player, useful_players[0]):
needed_hints -= 1
if DEBUG:
r.debug['we can use the clue from a 5 to reach another player in endgame'] += 1
#this seems to rarely happen
#r.debug['stop'] = 0
#print("one fewer")
# we need even 1 fewer hint if that player plays a 5 whose hint can be used.
if self.player_to_card_current[playing_player][1][0] == '5' and next(playing_player, r) != useful_players[0]:
needed_hints -= 1
#print("no, two fewer")
if self.modified_hints < needed_hints:
#if sum(r.progress.values()) != len(r.suits) * 5 - 1: r.debug['stop'] = 0
return True
return False
def modified_safe_discard(self, cluer, player, cards, dont_play, r):
"""Discards which don't hurt for the modified player"""
# this code is only called when safe_discard returns 'hint'
# discard a card which will be played by this clue
discardCards = [card for card in cards if card['name'] in dont_play]
if discardCards:
return 'discard', cards.index(discardCards[0])
# discard a card visible in someone else's hand
visibleCards = [card['name'] for i in range(r.nPlayers) if i != cluer and i != player for card in r.h[i].cards]
discardCards = [card for card in cards if card['name'] in visibleCards]
if discardCards:
discardCard = discardCards[0] # do we want to find the lowest or highest such card?
return 'discard', cards.index(discardCard)
return 'hint', 0
def modified_discard(self, action, cards, progress, r):
"""Find the least bad non-critical card to discard"""
# discard a card which is not yet in the discard pile, and will not be discarded between the cluer and the player
if action[0] == 'discard': return action
discardCards = get_nonvisible_cards(cards, r.discardpile)
discardCards = [card for card in discardCards if card['name'][0] != '5' and not is_playable(card, progress)]
assert all([card['name'][0] != '1' for card in discardCards])
if discardCards:
discardCard = find_highest(discardCards)
return 'discard', cards.index(discardCard)
return 'hint', 0
def critical_discard(self, cards, r):
"""Find the card with the highest rank card to discard.
This function is only called when all cards are critical."""
if DEBUG:
r.debug['instructing to discard critical card'] += 1
return 'discard', cards.index(find_highest(cards))
### The main function which is called every turn
def play(self, r):
me = r.whoseTurn
n = r.nPlayers
# Some first turn initialization
if r.turnNumber < n:
for p in r.PlayerRecord:
if p.__class__.__name__ != 'HatPlayer':
raise NameError('Hat AI must only play with other hatters')
if r.turnNumber == 0:
if r.nPlayers <= 3:
raise NameError('This AI works only with at least 4 players.')
if DEBUG:
for s in DEBUGVALUES:
if s not in r.debug:
r.debug[s] = 0
# for i in [0, 1]:
# for j in range(4):
# s = 'yolo: played ' + str(i) + ', correct was ' + str(j)
# if s not in r.debug:
# r.debug[s] = 0
for i in range(n):
# initialize variables which contain the memory of this player.
# These are updated after every move of any player
r.PlayerRecord[i].initialize_memory(r)
else:
# everyone takes some time to think about the meaning of previously
# given clues
for i in range(n):
r.PlayerRecord[i].think_at_turn_start(i, r)
# Is there a clue aimed at me?
myaction = 'hint', 0
if self.given_clues:
myaction = self.resolve_given_clues(me, r)
if myaction[0] == 'play':
return self.execute_action(myaction, r)
# in the endgame, yolo a card if you don't see all playable
if not r.deck and r.lightning < 2 and not is_subset(get_all_playable_cardnames(r), names(get_all_visible_cards(me, r)) + r.discardpile):
slot = 0
# if I have a known useless card in slot 0, play slot 1
if self.useless_card is not None and self.useless_card in r.h[me].cards and not r.h[me].cards.index(self.useless_card):
slot = 1
if DEBUG:
r.debug['yolo'] += 1
# peek at my hand to test if my yolo is successful for debugging (I cannot check it next turn if this is the last turn of the game)
if is_playable(r.h[me].cards[slot], r.progress):
r.debug['successful yolo'] += 1
else:
r.debug['unsuccessful yolo'] += 1
# s = 'yolo: played ' + str(slot) + ', correct was ' + str([is_playable(card, r.progress) for card in r.h[me].cards].index(True))
# r.debug[s] += 1
return self.execute_action(('play', slot), r)
if myaction[0] == 'discard' and (not r.hints or (me == self.modified_player and MODIFIEDACTION)):
if r.hints != 8:
return self.execute_action(myaction, r)
elif | |
return cell_grid
def cells_to_relspan_grid(cells):
if len(cells) == 0:
return [[]]
num_rows = max([max(cell['row_nums']) for cell in cells])+1
num_columns = max([max(cell['column_nums']) for cell in cells])+1
cell_grid = np.zeros((num_rows, num_columns)).tolist()
for cell in cells:
min_row_num = min(cell['row_nums'])
min_column_num = min(cell['column_nums'])
max_row_num = max(cell['row_nums']) + 1
max_column_num = max(cell['column_nums']) + 1
for row_num in cell['row_nums']:
for column_num in cell['column_nums']:
cell_grid[row_num][column_num] = [
min_column_num - column_num,
min_row_num - row_num,
max_column_num - column_num,
max_row_num - row_num,
]
return cell_grid
def align_cells_outer(true_cells, pred_cells, reward_function):
'''
Dynamic programming sequence alignment between two sequences
Traceback convention: -1 = up, 1 = left, 0 = diag up-left
'''
scores = np.zeros((len(true_cells) + 1, len(pred_cells) + 1))
pointers = np.zeros((len(true_cells) + 1, len(pred_cells) + 1))
# Initialize first column
for row_idx in range(1, len(true_cells) + 1):
pointers[row_idx, 0] = -1
# Initialize first row
for col_idx in range(1, len(pred_cells) + 1):
pointers[0, col_idx] = 1
for row_idx in range(1, len(true_cells) + 1):
for col_idx in range(1, len(pred_cells) + 1):
reward = align_1d(true_cells[row_idx-1], pred_cells[col_idx-1], reward_function)
diag_score = scores[row_idx - 1, col_idx - 1] + reward
same_row_score = scores[row_idx, col_idx - 1]
same_col_score = scores[row_idx - 1, col_idx]
max_score = max(diag_score, same_col_score, same_row_score)
scores[row_idx, col_idx] = max_score
if diag_score == max_score:
pointers[row_idx, col_idx] = 0
elif same_col_score == max_score:
pointers[row_idx, col_idx] = -1
else:
pointers[row_idx, col_idx] = 1
score = scores[len(true_cells), len(pred_cells)]
if len(pred_cells) > 0:
precision = score / len(pred_cells)
else:
precision = 1
if len(true_cells) > 0:
recall = score / len(true_cells)
else:
recall = 1
score = 2 * precision * recall / (precision + recall)
#score = 2 * score / (len(true_cells) + len(pred_cells))
cur_row = len(true_cells)
cur_col = len(pred_cells)
aligned_true_indices = []
aligned_pred_indices = []
while not (cur_row == 0 and cur_col == 0):
if pointers[cur_row, cur_col] == -1:
cur_row -= 1
elif pointers[cur_row, cur_col] == 1:
cur_col -= 1
else:
cur_row -= 1
cur_col -= 1
aligned_pred_indices.append(cur_col)
aligned_true_indices.append(cur_row)
aligned_true_indices = aligned_true_indices[::-1]
aligned_pred_indices = aligned_pred_indices[::-1]
return aligned_true_indices, aligned_pred_indices, score
def factored_2dlcs(true_cell_grid, pred_cell_grid, reward_function):
true_row_nums, pred_row_nums, row_score = align_cells_outer(true_cell_grid,
pred_cell_grid,
reward_function)
true_column_nums, pred_column_nums, column_score = align_cells_outer(transpose(true_cell_grid),
transpose(pred_cell_grid),
reward_function)
score = 0
for true_row_num, pred_row_num in zip(true_row_nums, pred_row_nums):
for true_column_num, pred_column_num in zip(true_column_nums, pred_column_nums):
score += reward_function(true_cell_grid[true_row_num][true_column_num],
pred_cell_grid[pred_row_num][pred_column_num])
if true_cell_grid.shape[0] > 0 and true_cell_grid.shape[1] > 0:
recall = score / (true_cell_grid.shape[0]*true_cell_grid.shape[1])
else:
recall = 1
if pred_cell_grid.shape[0] > 0 and pred_cell_grid.shape[1] > 0:
precision = score / (pred_cell_grid.shape[0]*pred_cell_grid.shape[1])
else:
precision = 1
if precision > 0 and recall > 0:
fscore = 2 * precision * recall / (precision + recall)
else:
fscore = 0
return fscore, precision, recall, row_score, column_score
def lcs_similarity(string1, string2):
if len(string1) == 0 and len(string2) == 0:
return 1
s = SequenceMatcher(None, string1, string2)
lcs = ''.join([string1[block.a:(block.a + block.size)] for block in s.get_matching_blocks()])
return 2*len(lcs)/(len(string1)+len(string2))
def output_to_dilatedbbox_grid(bboxes, labels, scores):
rows = [{'bbox': bbox} for bbox, label in zip(bboxes, labels) if label == 2]
columns = [{'bbox': bbox} for bbox, label in zip(bboxes, labels) if label == 1]
supercells = [{'bbox': bbox, 'score': 1} for bbox, label in zip(bboxes, labels) if label in [4, 5]]
rows.sort(key=lambda x: x['bbox'][1]+x['bbox'][3])
columns.sort(key=lambda x: x['bbox'][0]+x['bbox'][2])
supercells.sort(key=lambda x: -x['score'])
cell_grid = []
for row_num, row in enumerate(rows):
column_grid = []
for column_num, column in enumerate(columns):
bbox = Rect(row['bbox']).intersect(column['bbox'])
column_grid.append(list(bbox))
cell_grid.append(column_grid)
matches_by_supercell = get_supercell_rows_and_columns(supercells, rows, columns)
for matches, supercell in zip(matches_by_supercell, supercells):
for match in matches:
cell_grid[match[0]][match[1]] = supercell['bbox']
return cell_grid
def compute_metrics(true_bboxes, true_labels, true_scores, true_cells,
pred_bboxes, pred_labels, pred_scores, pred_cells):
# Compute grids/matrices for comparison
true_cell_dilatedbbox_grid = np.array(output_to_dilatedbbox_grid(true_bboxes, true_labels, true_scores))
true_relspan_grid = np.array(cells_to_relspan_grid(true_cells))
true_bbox_grid = np.array(cells_to_grid(true_cells, key='bbox'))
true_text_grid = np.array(cells_to_grid(true_cells, key='cell_text'), dtype=object)
pred_cell_dilatedbbox_grid = np.array(output_to_dilatedbbox_grid(pred_bboxes, pred_labels, pred_scores))
pred_relspan_grid = np.array(cells_to_relspan_grid(pred_cells))
pred_bbox_grid = np.array(cells_to_grid(pred_cells, key='bbox'))
pred_text_grid = np.array(cells_to_grid(pred_cells, key='cell_text'), dtype=object)
#---Compute each of the metrics
metrics = {}
(metrics['grits_rawloc'], metrics['grits_precision_rawloc'],
metrics['grits_recall_rawloc'], metrics['grits_rawloc_rowbased'],
metrics['grits_rawloc_columnbased']) = factored_2dlcs(true_cell_dilatedbbox_grid,
pred_cell_dilatedbbox_grid,
reward_function=eval_utils.iou)
(metrics['grits_top'], metrics['grits_precision_top'],
metrics['grits_recall_top'], metrics['grits_top_rowbased'],
metrics['grits_top_columnbased']) = factored_2dlcs(true_relspan_grid,
pred_relspan_grid,
reward_function=eval_utils.iou)
(metrics['grits_loc'], metrics['grits_precision_loc'],
metrics['grits_recall_loc'], metrics['grits_loc_rowbased'],
metrics['grits_loc_columnbased']) = factored_2dlcs(true_bbox_grid,
pred_bbox_grid,
reward_function=eval_utils.iou)
(metrics['grits_cont'], metrics['grits_precision_cont'],
metrics['grits_recall_cont'], metrics['grits_cont_rowbased'],
metrics['grits_cont_columnbased']) = factored_2dlcs(true_text_grid,
pred_text_grid,
reward_function=lcs_similarity)
(metrics['adjacency_nonblank_recall'], metrics['adjacency_nonblank_precision'],
metrics['adjacency_nonblank_fscore']) = adjacency_metric(true_cells, pred_cells)
(metrics['adjacency_withblank_recall'], metrics['adjacency_withblank_precision'],
metrics['adjacency_withblank_fscore']) = adjacency_with_blanks_metric(true_cells, pred_cells)
return metrics
def compute_statistics(structures, cells):
statistics = {}
statistics['num_rows'] = len(structures['rows'])
statistics['num_columns'] = len(structures['columns'])
statistics['num_cells'] = len(cells)
statistics['num_spanning_cells'] = len([cell for cell in cells if len(cell['row_nums']) > 1
or len(cell['column_nums']) > 1])
header_rows = set()
for cell in cells:
if cell['header']:
header_rows = header_rows.union(set(cell['row_nums']))
statistics['num_header_rows'] = len(header_rows)
row_heights = [float(row['bbox'][3]-row['bbox'][1]) for row in structures['rows']]
if len(row_heights) >= 2:
statistics['row_height_coefficient_of_variation'] = stat.stdev(row_heights) / stat.mean(row_heights)
else:
statistics['row_height_coefficient_of_variation'] = 0
column_widths = [float(column['bbox'][2]-column['bbox'][0]) for column in structures['columns']]
if len(column_widths) >= 2:
statistics['column_width_coefficient_of_variation'] = stat.stdev(column_widths) / stat.mean(column_widths)
else:
statistics['column_width_coefficient_of_variation'] = 0
return statistics
# for output bounding box post-processing
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=1)
def rescale_bboxes(out_bbox, size):
img_w, img_h = size
b = box_cxcywh_to_xyxy(out_bbox)
b = b * torch.tensor([img_w, img_h, img_w, img_h], dtype=torch.float32)
return b
def get_bbox_decorations(label, score):
colors = [
'brown', 'red', 'blue', 'magenta', 'cyan', 'green', 'orange', 'green',
'orange', 'yellow', 'brown', 'red', 'blue', 'magenta', 'cyan', 'green',
'orange', 'green', 'orange', 'yellow'
]
if label == 0 or label == 8:
alpha = 0
linewidth = 3
elif label == 3:
alpha = score / 3
linewidth = 3
elif label == 4 or label == 5:
alpha = score / 3
linewidth = 4
else:
alpha = score / 9
linewidth = 2
color = colors[label]
return color, alpha, linewidth
def plot_graph(metric_1, metric_2, metric_1_name, metric_2_name):
plt.scatter(metric_1, metric_2, s=40, c='red', marker='o')
plt.title(metric_1_name + " vs. " + metric_2_name)
plt.xlim([0.5, 1])
plt.ylim([0.5, 1])
plt.plot([0, 1], [0, 1])
plt.xlabel(metric_1_name)
plt.ylabel(metric_2_name)
plt.gcf().set_size_inches((8, 8))
plt.show()
def grits(args, model, dataset_test, device, evaluate_on_gt = True):
"""
This function runs the GriTS proposed in the paper. We also have a debug
mode which let's you see the outputs of a model on the pdf pages.
"""
structure_class_names = [
'table', 'table column', 'table row', 'table column header',
'table projected row header', 'table spanning cell', 'no object'
]
structure_class_map = {k: v for v, k in enumerate(structure_class_names)}
structure_class_thresholds = {
"table": 0.5,
"table column": 0.5,
"table row": 0.5,
"table column header": 0.5,
"table projected row header": 0.5,
"table spanning cell": 0.5,
"no object": 10
}
if args.debug:
max_samples = min(50, len(dataset_test))
else:
max_samples = len(dataset_test)
print(max_samples)
normalize = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
model.eval()
all_metrics = []
st_time = datetime.now()
for idx in range(0, max_samples):
print(idx, end='\r')
#---Read source data: image, objects, and word bounding boxes
img, gt, orig_img, img_path = dataset_test[idx]
img_filename = img_path.split("/")[-1]
img_words_filepath = os.path.join(args.table_words_dir, img_filename.replace(".jpg", "_words.json"))
if evaluate_on_gt:
with open(img_words_filepath, 'r') as f:
page_tokens = json.load(f)
img_test = img
scale = 1000 / max(orig_img.size)
img = normalize(img)
if evaluate_on_gt:
for word in page_tokens:
word['bbox'] = [elem * scale for elem in word['bbox']]
#---Compute ground truth features
if evaluate_on_gt:
true_bboxes = [list(elem) for elem in gt['boxes'].cpu().numpy()]
true_labels = gt['labels'].cpu().numpy()
true_scores = [1 for elem in true_bboxes]
true_table_structures, true_cells, true_confidence_score = objects_to_cells(true_bboxes, true_labels, true_scores,
page_tokens, structure_class_names,
structure_class_thresholds, structure_class_map)
#---Compute predicted features
# Propagate through the model
with torch.no_grad():
outputs = model([img.to(device)])
boxes = outputs['pred_boxes']
m = outputs['pred_logits'].softmax(-1).max(-1)
scores = m.values
labels = m.indices
#rescaled_bboxes = rescale_bboxes(torch.tensor(boxes[0], dtype=torch.float32), img_test.size)
rescaled_bboxes = rescale_bboxes(boxes[0].cpu(), img_test.size)
pred_bboxes = [bbox.tolist() for bbox in rescaled_bboxes]
pred_labels = labels[0].tolist()
pred_scores = scores[0].tolist()
if evaluate_on_gt:
pred_table_structures, pred_cells, pred_confidence_score = objects_to_cells(pred_bboxes, pred_labels,
pred_scores,
page_tokens,
structure_class_names,
structure_class_thresholds,
structure_class_map)
metrics = compute_metrics(true_bboxes, true_labels, true_scores, true_cells,
pred_bboxes, pred_labels, pred_scores, pred_cells)
statistics = compute_statistics(true_table_structures, true_cells)
metrics.update(statistics)
metrics['id'] = img_path.split('/')[-1].split('.')[0]
all_metrics.append(metrics)
if idx%1000==0:
with open(args.metrics_save_filepath, 'w') as outfile:
json.dump(all_metrics, outfile)
print("Total time taken for {} samples: {}".format(idx, datetime.now() - st_time))
#---Display output for debugging
if args.debug:
fig,ax = plt.subplots(1)
ax.imshow(img_test, interpolation='lanczos')
fig.set_size_inches((15, 18))
if args.images_output_path:
plt.savefig(os.path.join(args.images_output_path, img_filename.split('.')[0] + '.jpg'), dpi=300, bbox_inches='tight')
else:
plt.show()
fig,ax = plt.subplots(1)
ax.imshow(img_test, interpolation='lanczos')
rescaled_bboxes = rescale_bboxes(torch.tensor(boxes[0], dtype=torch.float32), img_test.size)
for bbox, label, score in zip(rescaled_bboxes, | |
<filename>net.py
# -*- coding: utf-8 -*-
import sys
sys.path.append('./lib')
import theano
theano.config.on_unused_input = 'warn'
import theano.tensor as T
import numpy as np
from layers import Weight, DataLayer, ConvPoolLayer, DropoutLayer, FCLayer, MaxoutLayer
def cosine(x, y, epsilon=np.array(1e-6).astype(np.float32)):
norm_x = T.sqrt(T.sum(x ** 2, 1)) + epsilon
norm_y = T.sqrt(T.sum(y ** 2, 1)) + epsilon
return T.sum(x * y, 1) / (norm_x * norm_y)
class AlexNet(object):
def __init__(self, config):
self.config = config
batch_size = config['batch_size']
n_images = config['n_images']
# ##################### BUILD NETWORK ##########################
# allocate symbolic variables for the data
# 'rand' is a random array used for random cropping/mirroring of data
xquery = T.ftensor4('xquery') # Trying to find colour variant of this image
xp = T.ftensor4('xp') # Correct colour variant image
xns = [] # Non-variant images
for i in xrange(n_images-1):
xns.append( T.ftensor4('xn'+str(i+1)) )
rands = []
for i in xrange(n_images+1):
rands.append( T.fvector('rand'+str(i)) )
layers, params, weight_types = [], [], []
print '... building the model'
# Get the representations of all input images
query_repr, query_layers, query_params, query_weight_types = \
self.image_repr(xquery, rands[0], config)
layers += query_layers
params += query_params
weight_types += query_weight_types
p_repr, p_layers, p_params, p_weight_types = \
self.image_repr(xp, rands[1], config)
layers += p_layers
params += p_params
weight_types += p_weight_types
n_reprs = []
for i in xrange(n_images-1):
n_repr, n_layers, n_params, n_weight_types = \
self.image_repr(xns[i], rands[i+2], config)
n_reprs.append( n_repr )
layers += n_layers
params += n_params
weight_types += n_weight_types
# Compute cosine distance from query image to target images
sims_ = []
sims_.append( cosine(query_repr.output,
p_repr.output ).dimshuffle(0,'x') )
for i in xrange(n_images-1):
sims_.append( cosine(query_repr.output,
n_reprs[i].output ).dimshuffle(0,'x') )
sims = T.concatenate(sims_, axis=1)
#sims = T.concatenate([ sims[:,1].dimshuffle(0,'x'), sims[:,0].dimshuffle(0,'x') ], axis=1)
# Temp: Permute location of correct colour variant, to check that improvements are real
#rng = T.shared_randomstreams.RandomStreams(12345)
#perm = rng.permutation(size=(sims.shape[0],), n=2)
#sims2 = T.concatenate([ sims[T.arange(sims.shape[0]),perm[:,0]].dimshuffle(0,'x'),
# sims[T.arange(sims.shape[0]),perm[:,1]].dimshuffle(0,'x') ], axis=1)
#index_of_variant = T.argmin(perm, axis=1)
# Compute probabilities
p_y_given_x = T.nnet.softmax(sims)
cost = -T.mean(T.log(p_y_given_x[0, :]))
y_pred = T.argmax(p_y_given_x, axis=1)
errors = T.neq(y_pred, 0) # index_of_variant) # 0)
# #################### NETWORK BUILT #######################
self.testfunc = query_repr.output.shape # sims # errors # T.extra_ops.bincount(y_pred)
self.cost = cost
self.errors = T.mean(errors)
self.errors_top_5 = None
self.xquery = xquery
self.xp = xp
self.xns = xns
self.rands = rands
self.layers = layers
self.params = params
self.weight_types = weight_types
self.batch_size = batch_size
self.n_images = n_images
def image_repr(self, x, rand, config):
batch_size = config['batch_size']
flag_datalayer = config['use_data_layer']
lib_conv = config['lib_conv']
layers = []
params = []
weight_types = []
if flag_datalayer:
data_layer = DataLayer(input=x, image_shape=(3, 256, 256,
batch_size),
cropsize=227, rand=rand, mirror=True,
flag_rand=config['rand_crop'])
layer1_input = data_layer.output
else:
layer1_input = x
convpool_layer1 = ConvPoolLayer(input=layer1_input,
image_shape=(3, 227, 227, batch_size),
filter_shape=(3, 11, 11, 96),
convstride=4, padsize=0, group=1,
poolsize=3, poolstride=2,
bias_init=0.0, lrn=True,
lib_conv=lib_conv,
)
layers.append(convpool_layer1)
params += convpool_layer1.params
weight_types += convpool_layer1.weight_type
convpool_layer2 = ConvPoolLayer(input=convpool_layer1.output,
image_shape=(96, 27, 27, batch_size),
filter_shape=(96, 5, 5, 256),
convstride=1, padsize=2, group=2,
poolsize=3, poolstride=2,
bias_init=0.1, lrn=True,
lib_conv=lib_conv,
)
layers.append(convpool_layer2)
params += convpool_layer2.params
weight_types += convpool_layer2.weight_type
convpool_layer3 = ConvPoolLayer(input=convpool_layer2.output,
image_shape=(256, 13, 13, batch_size),
filter_shape=(256, 3, 3, 384),
convstride=1, padsize=1, group=1,
poolsize=1, poolstride=0,
bias_init=0.0, lrn=False,
lib_conv=lib_conv,
)
layers.append(convpool_layer3)
params += convpool_layer3.params
weight_types += convpool_layer3.weight_type
convpool_layer4 = ConvPoolLayer(input=convpool_layer3.output,
image_shape=(384, 13, 13, batch_size),
filter_shape=(384, 3, 3, 384),
convstride=1, padsize=1, group=2,
poolsize=1, poolstride=0,
bias_init=0.1, lrn=False,
lib_conv=lib_conv,
)
layers.append(convpool_layer4)
params += convpool_layer4.params
weight_types += convpool_layer4.weight_type
convpool_layer5 = ConvPoolLayer(input=convpool_layer4.output,
image_shape=(384, 13, 13, batch_size),
filter_shape=(384, 3, 3, 256),
convstride=1, padsize=1, group=2,
poolsize=3, poolstride=2,
bias_init=0.0, lrn=False,
lib_conv=lib_conv,
)
layers.append(convpool_layer5)
params += convpool_layer5.params
weight_types += convpool_layer5.weight_type
fc_layer6_input = T.flatten(
convpool_layer5.output.dimshuffle(3, 0, 1, 2), 2)
fc_layer6 = MaxoutLayer(input=fc_layer6_input, n_in=9216, n_out=4096)
layers.append(fc_layer6)
params += fc_layer6.params
weight_types += fc_layer6.weight_type
dropout_layer6 = DropoutLayer(fc_layer6.output, n_in=4096, n_out=4096)
fc_layer7 = MaxoutLayer(input=dropout_layer6.output, n_in=4096, n_out=4096)
layers.append(fc_layer7)
params += fc_layer7.params
weight_types += fc_layer7.weight_type
#dropout_layer7 = DropoutLayer(fc_layer7.output, n_in=4096, n_out=4096)
# Rename weight types so that weights can be shared
new_weight_types = []
counter_W = 0
counter_b = 0
for w in weight_types:
if w == 'W':
new_weight_types.append('W'+str(counter_W))
counter_W += 1
elif w == 'b':
new_weight_types.append('b'+str(counter_b))
counter_b += 1
weight_types = new_weight_types
return fc_layer7, layers, params, weight_types
def compile_models(model, config, flag_top_5=False):
xquery = model.xquery
xp = model.xp
xns = model.xns
rands = model.rands
weight_types = model.weight_types
cost = model.cost
params = model.params
errors = model.errors
#errors_top_5 = model.errors_top_5
batch_size = model.batch_size
n_images = model.n_images
mu = config['momentum']
eta = config['weight_decay']
# create a list of gradients for all model parameters
grads = T.grad(cost, params)
updates = []
learning_rate = theano.shared(np.float32(config['learning_rate']))
lr = T.scalar('lr') # symbolic learning rate
if config['use_data_layer']:
raw_size = 256
else:
raw_size = 227
shared_xquery = theano.shared(np.zeros((3, raw_size, raw_size,
batch_size),
dtype=theano.config.floatX),
borrow=True)
shared_xp = theano.shared(np.zeros((3, raw_size, raw_size,
batch_size),
dtype=theano.config.floatX),
borrow=True)
shared_xns = []
for i in xrange(len(xns)):
shared_xn = theano.shared(np.zeros((3, raw_size, raw_size,
batch_size),
dtype=theano.config.floatX),
borrow=True)
shared_xns.append( shared_xn )
rand_arrs = []
for i in xrange(n_images+1):
rand_arr = theano.shared(np.zeros(3, dtype=theano.config.floatX),
borrow=True)
rand_arrs.append( rand_arr )
vels = [theano.shared(param_i.get_value() * 0.)
for param_i in params]
assert len(weight_types) == len(params)
# Shared weights between all image networks
iter_indexes = []
for i in xrange(20):
W_indexes = []
b_indexes = []
for j in xrange(len(weight_types)):
weight_type = weight_types[j]
if weight_type == 'W'+str(i):
W_indexes.append(j)
elif weight_type == 'b'+str(i):
b_indexes.append(j)
if len(W_indexes)>0:
iter_indexes.append(W_indexes)
if len(b_indexes)>0:
iter_indexes.append(b_indexes)
if len(W_indexes)==0 and len(b_indexes)==0:
break
for indexes in iter_indexes:
index_i = indexes[0]
weight_type = weight_types[index_i][0]
param_i = params[index_i]
grad_i = grads[index_i]
vel_i = vels[index_i]
change_i = 0
if config['use_momentum']:
if weight_type == 'W':
real_grad = grad_i + eta * param_i
real_lr = lr
elif weight_type == 'b':
real_grad = grad_i
real_lr = 2. * lr
else:
raise TypeError("Weight Type Error")
if config['use_nesterov_momentum']:
change_i = mu ** 2 * vel_i - (1 + mu) * real_lr * real_grad
else:
change_i = mu * vel_i - real_lr * real_grad
else:
if weight_type == 'W':
change_i = - lr * grad_i - eta * lr * param_i
elif weight_type == 'b':
change_i = - 2 * lr * grad_i
else:
raise TypeError("Weight Type Error")
newval = param_i + change_i
for index in indexes:
param = params[index]
updates.append((param, newval))
if config['use_momentum']:
vel = vels[index]
updates.append((vel, change_i))
#if config['use_momentum']:
# for param_i, grad_i, vel_i, weight_type in \
# zip(params, grads, vels, weight_types):
# if weight_type == 'W':
# real_grad = grad_i + eta * param_i
# real_lr = lr
# elif weight_type == 'b':
# real_grad = grad_i
# real_lr = 2. * lr
# else:
# raise TypeError("Weight Type Error")
# if config['use_nesterov_momentum']:
# vel_i_next = mu ** 2 * vel_i - (1 + mu) * real_lr * real_grad
# else:
# vel_i_next = mu * vel_i - real_lr * real_grad
# updates.append((vel_i, vel_i_next))
# updates.append((param_i, param_i + vel_i_next))
#else:
# for param_i, grad_i, weight_type in zip(params, grads, weight_types):
# #weight_type = weight_type[0]
# if weight_type == 'W':
# updates.append((param_i,
# param_i - lr * grad_i - eta * lr * param_i))
# elif weight_type == 'b':
# updates.append((param_i, param_i - 2 * lr * grad_i))
# else:
# continue
# #raise TypeError("Weight Type Error")
# Define Theano Functions
givens = []
givens.append((lr, learning_rate))
givens.append((xquery, shared_xquery))
givens.append((xp, shared_xp))
for i in xrange(len(xns)):
givens.append((xns[i], shared_xns[i]))
for i in xrange(len(rands)):
givens.append((rands[i], rand_arrs[i]))
train_model = theano.function([], cost, updates=updates,
givens=givens)
validate_outputs = [cost, errors]
#if flag_top_5:
# validate_outputs.append(errors_top_5)
validate_model = theano.function([], validate_outputs, givens=givens)
train_error = theano.function([], errors, givens=givens[1:])
if model.testfunc is not None:
testfunc = theano.function([], model.testfunc, givens=givens)
else:
testfunc = None
#
# Metrics that can be logged to understand cnn better:
#
# Variance & mean of weight matrices at each layer
# Norm of weight matrices along each of their dimensions
# Mean & variance of intermediate representations after each layer
# - Also, mean & variance per class label
# Mean, variance, norm of gradient
# - norm of gradient should not exceed 5 or 15
# Ratio between the update norm and weight norm -> should be around 0.001
#
#
return (train_model, validate_model, train_error, learning_rate,
shared_xquery, shared_xp, shared_xns, rand_arrs, vels, | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 26/06/18
@author: <NAME>
"""
import numpy as np
import scipy.sparse as sps
import time, sys, copy
from enum import Enum
from Utils.seconds_to_biggest_unit import seconds_to_biggest_unit
from Base.Evaluation.metrics import roc_auc, precision, precision_recall_min_denominator, recall, MAP, MRR, ndcg, arhr, rmse, \
Novelty, Coverage_Item, Metrics_Object, Coverage_User, Gini_Diversity, Shannon_Entropy, Diversity_MeanInterList, Diversity_Herfindahl, AveragePopularity
class EvaluatorMetrics(Enum):
ROC_AUC = "ROC_AUC"
PRECISION = "PRECISION"
PRECISION_RECALL_MIN_DEN = "PRECISION_RECALL_MIN_DEN"
RECALL = "RECALL"
MAP = "MAP"
MRR = "MRR"
NDCG = "NDCG"
F1 = "F1"
HIT_RATE = "HIT_RATE"
ARHR = "ARHR"
RMSE = "RMSE"
NOVELTY = "NOVELTY"
AVERAGE_POPULARITY = "AVERAGE_POPULARITY"
DIVERSITY_SIMILARITY = "DIVERSITY_SIMILARITY"
DIVERSITY_MEAN_INTER_LIST = "DIVERSITY_MEAN_INTER_LIST"
DIVERSITY_HERFINDAHL = "DIVERSITY_HERFINDAHL"
COVERAGE_ITEM = "COVERAGE_ITEM"
COVERAGE_USER = "COVERAGE_USER"
DIVERSITY_GINI = "DIVERSITY_GINI"
SHANNON_ENTROPY = "SHANNON_ENTROPY"
def create_empty_metrics_dict(n_items, n_users, URM_train, ignore_items, ignore_users, cutoff, diversity_similarity_object):
empty_dict = {}
# from Base.Evaluation.ResultMetric import ResultMetric
# empty_dict = ResultMetric()
for metric in EvaluatorMetrics:
if metric == EvaluatorMetrics.COVERAGE_ITEM:
empty_dict[metric.value] = Coverage_Item(n_items, ignore_items)
elif metric == EvaluatorMetrics.DIVERSITY_GINI:
empty_dict[metric.value] = Gini_Diversity(n_items, ignore_items)
elif metric == EvaluatorMetrics.SHANNON_ENTROPY:
empty_dict[metric.value] = Shannon_Entropy(n_items, ignore_items)
elif metric == EvaluatorMetrics.COVERAGE_USER:
empty_dict[metric.value] = Coverage_User(n_users, ignore_users)
elif metric == EvaluatorMetrics.DIVERSITY_MEAN_INTER_LIST:
empty_dict[metric.value] = Diversity_MeanInterList(n_items, cutoff)
elif metric == EvaluatorMetrics.DIVERSITY_HERFINDAHL:
empty_dict[metric.value] = Diversity_Herfindahl(n_items, ignore_items)
elif metric == EvaluatorMetrics.NOVELTY:
empty_dict[metric.value] = Novelty(URM_train)
elif metric == EvaluatorMetrics.AVERAGE_POPULARITY:
empty_dict[metric.value] = AveragePopularity(URM_train)
elif metric == EvaluatorMetrics.MAP:
empty_dict[metric.value] = MAP()
elif metric == EvaluatorMetrics.MRR:
empty_dict[metric.value] = MRR()
elif metric == EvaluatorMetrics.DIVERSITY_SIMILARITY:
if diversity_similarity_object is not None:
empty_dict[metric.value] = copy.deepcopy(diversity_similarity_object)
else:
empty_dict[metric.value] = 0.0
return empty_dict
def get_result_string(results_run, n_decimals=7):
output_str = ""
for cutoff in results_run.keys():
results_run_current_cutoff = results_run[cutoff]
output_str += "CUTOFF: {} - ".format(cutoff)
for metric in results_run_current_cutoff.keys():
output_str += "{}: {:.{n_decimals}f}, ".format(metric, results_run_current_cutoff[metric], n_decimals = n_decimals)
output_str += "\n"
return output_str
class Evaluator(object):
"""Abstract Evaluator"""
EVALUATOR_NAME = "Evaluator_Base_Class"
def __init__(self, URM_test_list, cutoff_list, minRatingsPerUser=1, exclude_seen=True,
diversity_object = None,
ignore_items = None,
ignore_users = None):
super(Evaluator, self).__init__()
if ignore_items is None:
self.ignore_items_flag = False
self.ignore_items_ID = np.array([])
else:
print("Ignoring {} Items".format(len(ignore_items)))
self.ignore_items_flag = True
self.ignore_items_ID = np.array(ignore_items)
self.cutoff_list = cutoff_list.copy()
self.max_cutoff = max(self.cutoff_list)
self.minRatingsPerUser = minRatingsPerUser
self.exclude_seen = exclude_seen
if not isinstance(URM_test_list, list):
self.URM_test = URM_test_list.copy()
URM_test_list = [URM_test_list]
else:
raise ValueError("List of URM_test not supported")
self.diversity_object = diversity_object
self.n_users, self.n_items = URM_test_list[0].shape
# Prune users with an insufficient number of ratings
# During testing CSR is faster
self.URM_test_list = []
usersToEvaluate_mask = np.zeros(self.n_users, dtype=np.bool)
for URM_test in URM_test_list:
URM_test = sps.csr_matrix(URM_test)
self.URM_test_list.append(URM_test)
rows = URM_test.indptr
numRatings = np.ediff1d(rows)
new_mask = numRatings >= minRatingsPerUser
usersToEvaluate_mask = np.logical_or(usersToEvaluate_mask, new_mask)
self.usersToEvaluate = np.arange(self.n_users)[usersToEvaluate_mask]
if ignore_users is not None:
print("Ignoring {} Users".format(len(ignore_users)))
self.ignore_users_ID = np.array(ignore_users)
self.usersToEvaluate = set(self.usersToEvaluate) - set(ignore_users)
else:
self.ignore_users_ID = np.array([])
self.usersToEvaluate = list(self.usersToEvaluate)
def evaluateRecommender(self, recommender_object):
"""
:param recommender_object: the trained recommender object, a BaseRecommender subclass
:param URM_test_list: list of URMs to test the recommender against, or a single URM object
:param cutoff_list: list of cutoffs to be use to report the scores, or a single cutoff
"""
raise NotImplementedError("The method evaluateRecommender not implemented for this evaluator class")
def get_user_relevant_items(self, user_id):
assert self.URM_test.getformat() == "csr", "Evaluator_Base_Class: URM_test is not CSR, this will cause errors in getting relevant items"
return self.URM_test.indices[self.URM_test.indptr[user_id]:self.URM_test.indptr[user_id+1]]
def get_user_test_ratings(self, user_id):
assert self.URM_test.getformat() == "csr", "Evaluator_Base_Class: URM_test is not CSR, this will cause errors in relevant items ratings"
return self.URM_test.data[self.URM_test.indptr[user_id]:self.URM_test.indptr[user_id+1]]
class EvaluatorHoldout(Evaluator):
"""EvaluatorHoldout"""
EVALUATOR_NAME = "EvaluatorHoldout"
def __init__(self, URM_test_list, cutoff_list, minRatingsPerUser=1, exclude_seen=True,
diversity_object = None,
ignore_items = None,
ignore_users = None):
super(EvaluatorHoldout, self).__init__(URM_test_list, cutoff_list,
diversity_object = diversity_object,
minRatingsPerUser=minRatingsPerUser, exclude_seen=exclude_seen,
ignore_items = ignore_items, ignore_users = ignore_users)
def _run_evaluation_on_selected_users(self, recommender_object, usersToEvaluate, block_size = None):
if block_size is None:
block_size = min(1000, int(1e8/self.n_items))
start_time = time.time()
start_time_print = time.time()
results_dict = {}
for cutoff in self.cutoff_list:
results_dict[cutoff] = create_empty_metrics_dict(self.n_items, self.n_users,
recommender_object.get_URM_train(),
self.ignore_items_ID,
self.ignore_users_ID,
cutoff,
self.diversity_object)
n_users_evaluated = 0
# Start from -block_size to ensure it to be 0 at the first block
user_batch_start = 0
user_batch_end = 0
while user_batch_start < len(self.usersToEvaluate):
user_batch_end = user_batch_start + block_size
user_batch_end = min(user_batch_end, len(usersToEvaluate))
test_user_batch_array = np.array(usersToEvaluate[user_batch_start:user_batch_end])
user_batch_start = user_batch_end
# Compute predictions for a batch of users using vectorization, much more efficient than computing it one at a time
recommended_items_batch_list, scores_batch = recommender_object.recommend(test_user_batch_array,
remove_seen_flag=self.exclude_seen,
cutoff = self.max_cutoff,
remove_top_pop_flag=False,
remove_CustomItems_flag=self.ignore_items_flag,
return_scores = True
)
assert len(recommended_items_batch_list) == len(test_user_batch_array), "{}: recommended_items_batch_list contained recommendations for {} users, expected was {}".format(
self.EVALUATOR_NAME, len(recommended_items_batch_list), len(test_user_batch_array))
assert scores_batch.shape[0] == len(test_user_batch_array), "{}: scores_batch contained scores for {} users, expected was {}".format(
self.EVALUATOR_NAME, scores_batch.shape[0], len(test_user_batch_array))
assert scores_batch.shape[1] == self.n_items, "{}: scores_batch contained scores for {} items, expected was {}".format(
self.EVALUATOR_NAME, scores_batch.shape[1], self.n_items)
# Compute recommendation quality for each user in batch
for batch_user_index in range(len(recommended_items_batch_list)):
test_user = test_user_batch_array[batch_user_index]
relevant_items = self.get_user_relevant_items(test_user)
relevant_items_rating = self.get_user_test_ratings(test_user)
all_items_predicted_ratings = scores_batch[batch_user_index]
user_rmse = rmse(all_items_predicted_ratings, relevant_items, relevant_items_rating)
# Being the URM CSR, the indices are the non-zero column indexes
recommended_items = recommended_items_batch_list[batch_user_index]
is_relevant = np.in1d(recommended_items, relevant_items, assume_unique=True)
n_users_evaluated += 1
for cutoff in self.cutoff_list:
results_current_cutoff = results_dict[cutoff]
is_relevant_current_cutoff = is_relevant[0:cutoff]
recommended_items_current_cutoff = recommended_items[0:cutoff]
results_current_cutoff[EvaluatorMetrics.ROC_AUC.value] += roc_auc(is_relevant_current_cutoff)
results_current_cutoff[EvaluatorMetrics.PRECISION.value] += precision(is_relevant_current_cutoff)
results_current_cutoff[EvaluatorMetrics.PRECISION_RECALL_MIN_DEN.value] += precision_recall_min_denominator(is_relevant_current_cutoff, len(relevant_items))
results_current_cutoff[EvaluatorMetrics.RECALL.value] += recall(is_relevant_current_cutoff, relevant_items)
results_current_cutoff[EvaluatorMetrics.NDCG.value] += ndcg(recommended_items_current_cutoff, relevant_items, relevance=self.get_user_test_ratings(test_user), at=cutoff)
results_current_cutoff[EvaluatorMetrics.HIT_RATE.value] += is_relevant_current_cutoff.sum()
results_current_cutoff[EvaluatorMetrics.ARHR.value] += arhr(is_relevant_current_cutoff)
results_current_cutoff[EvaluatorMetrics.RMSE.value] += user_rmse
results_current_cutoff[EvaluatorMetrics.MRR.value].add_recommendations(is_relevant_current_cutoff)
results_current_cutoff[EvaluatorMetrics.MAP.value].add_recommendations(is_relevant_current_cutoff, relevant_items)
results_current_cutoff[EvaluatorMetrics.NOVELTY.value].add_recommendations(recommended_items_current_cutoff)
results_current_cutoff[EvaluatorMetrics.AVERAGE_POPULARITY.value].add_recommendations(recommended_items_current_cutoff)
results_current_cutoff[EvaluatorMetrics.DIVERSITY_GINI.value].add_recommendations(recommended_items_current_cutoff)
results_current_cutoff[EvaluatorMetrics.SHANNON_ENTROPY.value].add_recommendations(recommended_items_current_cutoff)
results_current_cutoff[EvaluatorMetrics.COVERAGE_ITEM.value].add_recommendations(recommended_items_current_cutoff)
results_current_cutoff[EvaluatorMetrics.COVERAGE_USER.value].add_recommendations(recommended_items_current_cutoff, test_user)
results_current_cutoff[EvaluatorMetrics.DIVERSITY_MEAN_INTER_LIST.value].add_recommendations(recommended_items_current_cutoff)
results_current_cutoff[EvaluatorMetrics.DIVERSITY_HERFINDAHL.value].add_recommendations(recommended_items_current_cutoff)
if EvaluatorMetrics.DIVERSITY_SIMILARITY.value in results_current_cutoff:
results_current_cutoff[EvaluatorMetrics.DIVERSITY_SIMILARITY.value].add_recommendations(recommended_items_current_cutoff)
if time.time() - start_time_print > 30 or n_users_evaluated==len(self.usersToEvaluate):
elapsed_time = time.time()-start_time
new_time_value, new_time_unit = seconds_to_biggest_unit(elapsed_time)
print("{}: Processed {} ( {:.2f}% ) in {:.2f} {}. Users per second: {:.0f}".format(
self.EVALUATOR_NAME,
n_users_evaluated,
100.0* float(n_users_evaluated)/len(self.usersToEvaluate),
new_time_value, new_time_unit,
float(n_users_evaluated)/elapsed_time))
sys.stdout.flush()
sys.stderr.flush()
start_time_print = time.time()
return results_dict, n_users_evaluated
def evaluateRecommender(self, recommender_object):
"""
:param recommender_object: the trained recommender object, a BaseRecommender subclass
:param URM_test_list: list of URMs to test the recommender against, or a single URM object
:param cutoff_list: list of cutoffs to be use to report the scores, or a single cutoff
"""
if self.ignore_items_flag:
recommender_object.set_items_to_ignore(self.ignore_items_ID)
results_dict, n_users_evaluated = self._run_evaluation_on_selected_users(recommender_object, self.usersToEvaluate)
if (n_users_evaluated > 0):
for cutoff in self.cutoff_list:
results_current_cutoff = results_dict[cutoff]
for key in results_current_cutoff.keys():
value = results_current_cutoff[key]
if isinstance(value, Metrics_Object):
results_current_cutoff[key] = value.get_metric_value()
else:
results_current_cutoff[key] = value/n_users_evaluated
precision_ = results_current_cutoff[EvaluatorMetrics.PRECISION.value]
recall_ = results_current_cutoff[EvaluatorMetrics.RECALL.value]
if precision_ + recall_ != 0:
# F1 micro averaged: http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.104.8244&rep=rep1&type=pdf
results_current_cutoff[EvaluatorMetrics.F1.value] = 2 * (precision_ * recall_) / (precision_ + recall_)
else:
print("WARNING: No users had a sufficient number of relevant items")
results_run_string = get_result_string(results_dict)
if self.ignore_items_flag:
recommender_object.reset_items_to_ignore()
return (results_dict, results_run_string)
class EvaluatorNegativeItemSample(Evaluator):
"""EvaluatorNegativeItemSample"""
EVALUATOR_NAME = "EvaluatorNegativeItemSample"
def __init__(self, URM_test_list, URM_test_negative, cutoff_list, minRatingsPerUser=1, exclude_seen=True,
diversity_object = None,
ignore_items = None,
ignore_users = None):
"""
The EvaluatorNegativeItemSample computes the recommendations by sorting the test items as well as the test_negative items
It ensures that each item appears only once even if it is listed in both matrices
:param URM_test_list:
:param URM_test_negative: Items to rank together with the test items
:param cutoff_list:
:param minRatingsPerUser:
:param exclude_seen:
:param diversity_object:
:param ignore_items:
:param ignore_users:
"""
super(EvaluatorNegativeItemSample, self).__init__(URM_test_list, cutoff_list,
diversity_object = diversity_object,
minRatingsPerUser=minRatingsPerUser, exclude_seen=exclude_seen,
ignore_items = ignore_items, ignore_users = ignore_users)
self.URM_items_to_rank = sps.csr_matrix(self.URM_test.copy().astype(np.bool)) + sps.csr_matrix(URM_test_negative.copy().astype(np.bool))
self.URM_items_to_rank.eliminate_zeros()
self.URM_items_to_rank.data = np.ones_like(self.URM_items_to_rank.data)
def _get_user_specific_items_to_compute(self, user_id):
start_pos = self.URM_items_to_rank.indptr[user_id]
end_pos = self.URM_items_to_rank.indptr[user_id+1]
items_to_compute = self.URM_items_to_rank.indices[start_pos:end_pos]
return items_to_compute
def evaluateRecommender(self, recommender_object):
"""
:param recommender_object: the trained recommender object, a BaseRecommender subclass
:param URM_test_list: list of URMs to test the recommender against, or a single URM object
:param cutoff_list: list of cutoffs to be use to report the scores, or a single cutoff
"""
results_dict = {}
for cutoff in self.cutoff_list:
results_dict[cutoff] = create_empty_metrics_dict(self.n_items, self.n_users,
recommender_object.URM_train,
self.ignore_items_ID,
self.ignore_users_ID,
cutoff,
self.diversity_object)
start_time = time.time()
start_time_print = time.time()
n_users_evaluated = 0
if self.ignore_items_flag:
recommender_object.set_items_to_ignore(self.ignore_items_ID)
for test_user in self.usersToEvaluate:
# Being the URM CSR, the indices are the non-zero column indexes
relevant_items = self.get_user_relevant_items(test_user)
relevant_items_rating = self.get_user_test_ratings(test_user)
n_users_evaluated += 1
items_to_compute = self._get_user_specific_items_to_compute(test_user)
recommended_items, all_items_predicted_ratings = recommender_object.recommend(np.atleast_1d(test_user),
remove_seen_flag=self.exclude_seen,
cutoff = self.max_cutoff,
remove_top_pop_flag=False,
items_to_compute = items_to_compute,
remove_CustomItems_flag=self.ignore_items_flag,
return_scores = True
)
assert len(recommended_items) == 1, "{}: recommended_items contained recommendations for {} users, expected was {}".format(
self.EVALUATOR_NAME, len(recommended_items), 1)
assert all_items_predicted_ratings.shape[0] == 1, "{}: all_items_predicted_ratings contained scores for {} users, expected was {}".format(
self.EVALUATOR_NAME, all_items_predicted_ratings.shape[0], 1)
assert all_items_predicted_ratings.shape[1] == self.n_items, "{}: all_items_predicted_ratings contained scores for {} items, expected was {}".format(
self.EVALUATOR_NAME, all_items_predicted_ratings.shape[1], self.n_items)
recommended_items = np.array(recommended_items[0])
user_rmse = | |
# -*- coding: utf-8 -*-
"""F
Created on Wed Feb 26 10:24:21 2020
@author: <NAME>
"""
import sys
import math
import random
import numpy as np
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import cbook
from matplotlib import cm
from matplotlib.colors import LightSource
from matplotlib.colors import Normalize
from scipy import signal
from scipy import stats
from sklearn.cross_decomposition import PLSRegression
#from pls import PLSRegression #own SIMPLS based alternative to sklearn
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
from sklearn.svm import SVR
from sklearn.pipeline import Pipeline
import warnings
from fssreg import FSSRegression
from ipls import IntervalPLSRegression
from class_mcw_pls import mcw_pls_sklearn
from osc import OSC
warnings.filterwarnings('ignore')
class InputError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class NIRData:
def __init__(self, df, y_name="value",date_name="refdate",
cval="MD",cval_param=None):
# The class takes input dataframe in the following format:
# -it needs to be a pandas dataframe
# -it can only have the following columns: spectra variables,
# measurement date, single dependent variable
# -the measurement date and dpeendent variable column's name needs to be specified
# -the CV method needs to be defined, it supports MD and kfold, for kfold
# the number of folds needs to be defined with cval_param
self.df0=df.copy()
self.df=df.copy()
#Column with dependent variable
self.y_name=y_name
#Date column
self.date_name=date_name
#Columns with predictors
self.freqs = [col for col in df.columns if col not in [date_name, y_name]]
#If frequency columns are not all numeric, convert them
if len([x for x in self.freqs if isinstance(x, float)])<len(self.freqs):
self.freqs=[float(freq) for freq in self.freqs]
self.df0.columns=[float(col) if col not in [date_name, y_name] else col for col in df.columns]
self.df.columns=[float(col) if col not in [date_name, y_name] else col for col in df.columns]
self.cval=cval
if cval!="MD":
if cval_param==None:
raise InputError("Missing cross validation parameter!")
self.cval_param=cval_param
#Changing the cross validation method without reinstantiating the class
def set_cval(self,cval_new):
self.cval=cval_new
################# Preprocessing techniques
# Resetting the pre-processing to the raw spectra
def reset(self):
self.df=self.df0.copy()
# Preprocessing methods (detrending, SG filter, SNV, MSC)
def to_percent(self):
f = lambda x: x/100
a=np.vectorize(f)(self.df[self.freqs].to_numpy())
self.df.loc[:, self.freqs]=a
# Convert transmittance/reflectance to absorbance
def to_absorb(self,mode="R",percent=False):
# If source is transmittance, use mode="T", if reflectance mode="R"
# Functions only valid if data is between 0-1 (percent=True)
# otherwise convert the T/R values to percent
if not percent:
self.to_percent()
if mode=="T":
f = lambda x: math.log10(1/x)
elif mode=="R":
f = lambda x: ((1-x)**2)/x
else:
raise Exception("Invalid mode, has to be either T or R")
a=np.vectorize(f)(self.df[self.freqs].to_numpy())
self.df.loc[:, self.freqs]=a
# Detrending
def detrend(self, degree=1):
# Calculates a linear trend or a constant (the mean) for every
# spectral line and subtracts it
# Result is slightly different from manually implementing it!!!
x=np.array([self.freqs]).reshape(-1,)
Y=self.df[self.freqs].to_numpy()
for i in range(Y.shape[0]):
y=Y[i,:]
fit = np.polyfit(x, y, degree)
trend=np.polyval(fit, x)
y=y-trend
Y[i,:]=y
self.df.loc[:, self.freqs]=Y
# Savitzky-Golay filter
def sgfilter(self,window_length=13,polyorder=2,deriv=1):
a=signal.savgol_filter(self.df[self.freqs]
,window_length, polyorder, deriv, delta=1.0,axis=-1, mode='interp', cval=0.0)
self.df[self.freqs]=a
# SNV
def snv(self):
scaler = StandardScaler(with_mean=True, with_std=True)
scaler.fit(self.df[self.freqs].T)
self.df.loc[:, self.freqs]=scaler.transform(
self.df[self.freqs].T).T
# MSC
def msc(self):
ref=np.mean(self.df[self.freqs],axis=0)
X=np.matrix(self.df[self.freqs],dtype='float')
for i in range(self.df.shape[0]):
A=np.vstack([np.matrix(ref,dtype='float'),
np.ones(X.shape[1])]).T
coef, resids, rank, s = np.linalg.lstsq(
A,X[i,:].T)
X[i,:]=(X[i,:]-coef[1])/coef[0]
self.df[self.freqs]=X
# OSC is supervised preprocessing, so it needs CV, for which a joint modeling step is needed
# this method only crossvalidates using PLS, for other models use the built in osc_params
def osc_cv(self,nicomp_range=range(10,130,10),ncomp_range=range(1,5),epsilon = 10e-6,
max_iters = 20,model="pls",model_parameter_range=range(1,11)):
# Separating X from Y for PLS
# Needs to be converted to numpy array from pandas df
X=self.df[self.freqs].to_numpy()
# Y need to be converted to numpy array from pandas series and reshaped to (N,1) from (N,)
Y=self.df[self.y_name].to_numpy().reshape(-1, 1)
# CV based on measurement day
if self.cval=="MD":
cv = LeaveOneGroupOut()
folds=list(cv.split(X=X,y=Y,groups=self.df[self.date_name]))
# kfold CV
elif self.cval=="kfold":
cv = KFold(n_splits=self.cval_param)
folds=list(cv.split(X))
else:
raise InputError("Invalid CV type!")
#Matrix for cv values for all the possible parameter combinations
cv_RMSE_all=np.zeros([len(folds),len(model_parameter_range),len(nicomp_range),len(ncomp_range)])
i=0
#possible internal component values for osc
for nicomp in nicomp_range:
j=0
#possible removed component values for osc
for ncomp in ncomp_range:
k=0
for train, val in folds:
# train osc
osc_obj=OSC("SWosc",nicomp,ncomp,epsilon, max_iters)
X_osc_train, W,P,mu_x=osc_obj.fit(X[train],Y[train])
# apply osc on validation set
# mean center data, alternatively the training set's mean can be used
# if you think it is a better estimate by mean="training"
X_osc_val=osc_obj.transform(X[val],mean="estimate")
l=0
#possible model patrameter values for pls
for param in model_parameter_range:
#setup pls model
pls = PLSRegression(param,scale=False)
#train pls
pls.fit(X_osc_train, Y[train])
#predict with pls and calculate error
cv_RMSE_all[k,l,i,j]=metrics.mean_squared_error(
Y[val], pls.predict(X_osc_val))**0.5
l=l+1
k=k+1
j=j+1
i=i+1
# Calculate mean performance across the folds
cv_RMSE_mean=np.mean(cv_RMSE_all,axis=0)
# Find maximum for every osc paremeter combination
cv_RMSE=np.amax(cv_RMSE_mean, axis=0)
cv_RPD=np.std(self.df[self.y_name])/cv_RMSE
fig = plt.figure(figsize=(10,5))
ax = plt.axes(projection="3d")
# Cartesian indexing (x,y) transposes matrix indexing (i,j)
x, y = np.meshgrid(list(ncomp_range),list(nicomp_range))
z=cv_RPD
ls = LightSource(200, 45)
rgb = ls.shade(z, cmap=cm.gist_earth, vert_exag=0.1, blend_mode='soft')
surf = ax.plot_surface(x, y, z, rstride=1, cstride=1, facecolors=rgb,
linewidth=0, antialiased=False, shade=False)
plt.show()
# Best model
print("Best RMSE: ",np.amin(cv_RMSE))
print("Best RPD: ",np.std(self.df[self.y_name])/np.amin(cv_RMSE))
print("Number of internal components: ",nicomp_range[np.where(
cv_RMSE==np.amin(cv_RMSE))[0][0]])
print("Number of removed components: ",ncomp_range[np.where(
cv_RMSE==np.amin(cv_RMSE))[1][0]])
return cv_RMSE
############### Plotting methods
# Plotting the current processed version of the spectra
def plot_spectra(self, processed=True, savefig=False, *args):
fig,ax = plt.subplots(figsize=(12, 8))
if processed:
# Plotting unprocessed spectra
ax.plot(self.df[self.freqs].T)
else:
# Plotting processed spectra
ax.plot(self.df0[self.freqs].T)
for arg in args:
ax.axvline(x=arg)
if savefig:
plt.savefig('plot_spectra.pdf')
# Plotting the fitted PLS model's regression weights on the spectra
def plot_pls(self):
#r=self.pls_obj.x_rotations_
r=self.pls_obj.coef_
fig, ax = plt.subplots(figsize=(12, 8))
ax.plot(self.df[self.freqs].T,c="grey",alpha=1)
ax.pcolorfast((np.min(self.freqs),np.max(self.freqs)), ax.get_ylim(),
r.T,cmap='seismic',vmin=-1,vmax=1, alpha=1)
norm = Normalize(vmin=-1, vmax=1)
scalarmappaple = cm.ScalarMappable(norm=norm,cmap='seismic')
scalarmappaple.set_array(r.T)
fig.colorbar(scalarmappaple)
# Plotting the fitted MCW-PLS model's sample weights for the individual spectra
def plot_mcw_pls(self):
a=np.diagonal(self.mcw_pls_obj.sample_weights)
cmap = plt.cm.get_cmap('seismic')
fig, ax = plt.subplots(figsize=(6, 4))
for i in range(self.df[self.freqs].shape[0]):
row=self.df[self.freqs].iloc[i]
ax.plot(row,c=cmap(a[i]),alpha=1)
scalarmappaple = cm.ScalarMappable(cmap=cmap)
scalarmappaple.set_array(a)
plt.colorbar(scalarmappaple)
r=self.mcw_pls_obj.BPLS
fig, ax = plt.subplots(figsize=(6, 4))
ax.plot(self.df[self.freqs].T,c="grey",alpha=1)
ax.pcolorfast((np.min(self.freqs),np.max(self.freqs)), ax.get_ylim(),
r.T,cmap='seismic',vmin=-1,vmax=1, alpha=1)
norm = Normalize(vmin=-1, vmax=1)
scalarmappaple = cm.ScalarMappable(norm=norm,cmap='seismic')
scalarmappaple.set_array(r.T)
fig.colorbar(scalarmappaple)
######################### Modeling methods
# Support vector regression
# For fitting a model with given parameters
def svr_pipe(self,gam,c,eps):
X=self.df[self.freqs].to_numpy()
Y=self.df[self.y_name].to_numpy().reshape(-1, 1)
self.svr_pipe_obj = Pipeline([('scaler', StandardScaler()),
('support vector regression',
SVR(kernel="rbf",gamma=gam,C=c,epsilon=eps))])
self.svr_pipe_obj.fit(X, Y)
# For evaluating a model with given parameters
def svr_eval(self, gam,c,eps):
X=self.df[self.freqs].to_numpy()
Y=self.df[self.y_name].to_numpy().reshape(-1, 1)
pipe = Pipeline([('scaler', StandardScaler()),
('support vector regression',
SVR(kernel="rbf",gamma=gam,C=c,epsilon=eps))])
self.eval_df=pd.DataFrame(columns = ["estimated","true"])
if self.cval=="MD":
cv = LeaveOneGroupOut()
folds=list(cv.split(X=X,y=Y,groups=self.df[self.date_name]))
cv_RMSE=np.zeros(len(folds))
i=0
for train, val in folds:
pipe.fit(X[train], Y[train])
cv_RMSE[i]=metrics.mean_squared_error(
Y[val], pipe.predict(X[val]))**0.5
eval_new=pd.DataFrame({'estimated': pipe.predict(X[val]).reshape((-1,)),
'true': Y[val].reshape((-1,))})
self.eval_df=self.eval_df.append(eval_new, ignore_index = True)
i=i+1
y_true=self.eval_df["true"]
y_est=self.eval_df["estimated"]
print(np.std(y_true)/metrics.mean_squared_error(y_true,y_est)**0.5)
print(np.std(y_true)/np.mean(cv_RMSE))
residuals=y_true-y_est
linreg = stats.linregress(y_true, y_est)
blue='#1f77b4'
# Observed vs predicted
fig,ax = plt.subplots(figsize=(5, 5))
ax.scatter(x=y_true,y=y_est)
# Perfect prediction
ax.plot([np.min(Y), np.max(Y)], [np.min(Y), np.max(Y)], 'k--', color = 'r',label='Perfect fit')
# Model fit
ax.plot(y_true, linreg.intercept + linreg.slope*y_true, blue,label='Predicted fit')
# Text location needs to be picked manually
#ax.text(48, 56, 'R$^2$ = %0.002f' % linreg.rvalue,color=blue)
ax.text(93, 95, 'R$^2$ = %0.002f' % linreg.rvalue,color=blue)
ax.set(xlabel="Observed (%)",ylabel="Predicted (%)")
ax.legend()
# Predicted vs residuals
fig,ax = plt.subplots(figsize=(5, 5))
ax.scatter(x=y_est,y=residuals)
ax.axhline(y=np.mean(residuals), color='r', linestyle='--',label='Mean = %0.6f' % np.mean(residuals))
ax.set(xlabel="Predicted (%)",ylabel="Residuals (%)")
ax.legend()
# QQ plot
fig,ax = plt.subplots(figsize=(5, 5))
stats.probplot(residuals,plot=ax)
ax.get_lines()[0].set_markerfacecolor(blue)
ax.get_lines()[0].set_markeredgecolor(blue)
ax.get_figure().gca().set_title("")
ax.get_figure().gca().set_ylabel("Residuals (%)")
# Residual density plot with normal density
normx = np.linspace(-8,8,1000)
normy = stats.norm.pdf(normx, loc=np.mean(residuals), scale=np.std(residuals))
fig,ax = plt.subplots(figsize=(5, 5))
sns.distplot(residuals,norm_hist=True,ax=ax,color=blue)
ax.plot(normx,normy,color='r')
sns.set_style("white")
# Sorted alphas plot
# Get alphas
alphas=self.svr_pipe_obj['support vector regression'].dual_coef_
# Take abs value and sort
alphas=abs(alphas)
alphas=np.sort(alphas)
# Add zero alphas
alphas=np.vstack((np.zeros((X.shape[0]-len(alphas.T),1)),alphas.T))
fig,ax = plt.subplots(figsize=(5, 5))
ax.plot(alphas)
ax.set(xlabel="Sample ranking",ylabel="SV absolute α value")
# Method for tuning an SVM regression's free parameters based on CV
# OSC built in option, as this preprocessing is supervised so needs to be validated at the same time
def svr_cv(self,gam_start=0.001,
c_start=100,
eps_start=0.1,
optimization="grid",gridscale=5,non_improve_lim=10,verbose=False,
osc_params=None):
# Separating X from Y for PLS
X=self.df[self.freqs].to_numpy()
Y=self.df[self.y_name].to_numpy().reshape(-1, 1)
sample_std=np.std(self.df[self.y_name])
# CV based on measurement day
if self.cval=="MD":
cv = LeaveOneGroupOut()
folds=list(cv.split(X=X,y=Y,groups=self.df[self.date_name]))
# kfold CV
elif self.cval=="kfold":
cv = KFold(n_splits=self.cval_param)
folds=list(cv.split(X))
else:
raise InputError("Invalid CV type!")
if optimization=="none":
cv_RMSE=np.zeros(len(folds))
# Only use RBF kernels, also standardize data
pipe = Pipeline([('scaler', StandardScaler()),
('support vector regression',
SVR(kernel="rbf",gamma=gam_start,C=c_start,epsilon=eps_start))])
l=0
for | |
= api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501
return api_client.call_api(
"/settings/ldap/user_schema",
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def post_test_ldap_user_schema_settings(
api_client,
user_base=None,
user_id_attribute=None,
user_list_filter=None,
limit=None,
**kwargs
):
"""Test LDAP user schema settings # noqa: E501
Test LDAP user schema settings # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> response = await api.post_test_ldap_user_schema_settings(token_id, async_req=True)
:param VNS3Client api_client: (required)
:param str user_base: Base DN from which to search for Users (required)
:param str user_id_attribute: Attribute type for the Users (required)
:param str user_list_filter: Search filter for Users
:param str limit: Number of records to return. Default = 100
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: APIResponse or awaitable if async
"""
local_var_params = locals()
request_params = ["user_base", "user_id_attribute", "user_list_filter", "limit"]
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = {}
for param in [p for p in request_params if local_var_params.get(p) is not None]:
body_params[param] = local_var_params[param]
# HTTP header `Accept`
header_params["Accept"] = api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params["Content-Type"] = api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501
return api_client.call_api(
"/settings/ldap/user_schema",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_ldap_user_schema_settings(api_client, **kwargs): # noqa: E501
"""Get LDAP user schema settings # noqa: E501
Get LDAP user schema settings # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> response = await api.get_ldap_user_schema_settings(async_req=True)
:param VNS3Client api_client: (required)
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: APIResponse or awaitable if async
"""
local_var_params = locals()
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501
return api_client.call_api(
"/settings/ldap/user_schema",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def put_ldap_vpn_schema_settings(
api_client,
vpn_auth_enabled=None,
vpn_group_base=None,
vpn_group_id_attribute=None,
vpn_group_list_filter=None,
vpn_group_member_attribute=None,
vpn_group_member_attr_format=None,
vpn_group_search_scope=None,
vpn_group_otp=None,
**kwargs
):
"""Put LDAP VPN schema settings # noqa: E501
Put LDAP VPN schema settings # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> response = await api.put_ldap_vpn_schema_settings(token_id, async_req=True)
:param VNS3Client api_client: (required)
:param bool vpn_auth_enabled: Enable VPN LDAP auth (required)
:param str vpn_group_base: Base DN from which to search for VPN Group (required)
:param str vpn_group_id_attribute: Attribute type for the VPN Group (required)
:param str vpn_group_list_filter: Search filter for VPN Groups
:param str vpn_group_member_attribute: Attribute used to search for a user within the VPN Group
:param str vpn_group_member_attr_format: Format of the Group VPN Member attribute
:param str vpn_group_search_scope: Search scope for filter
:param bool vpn_group_otp: Use Google authenticator OTP (default false). New with 4.11.3
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: APIResponse or awaitable if async
"""
local_var_params = locals()
request_params = [
"vpn_auth_enabled",
"vpn_group_base",
"vpn_group_id_attribute",
"vpn_group_list_filter",
"vpn_group_member_attribute",
"vpn_group_member_attr_format",
"vpn_group_search_scope",
"vpn_group_otp",
]
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = {}
for param in [p for p in request_params if local_var_params.get(p) is not None]:
body_params[param] = local_var_params[param]
# HTTP header `Accept`
header_params["Accept"] = api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params["Content-Type"] = api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501
return api_client.call_api(
"/settings/ldap/vpn_schema",
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_ldap_vpn_schema_settings(api_client, **kwargs): # noqa: E501
"""Get LDAP VPN schema settings # noqa: E501
Get LDAP VPN schema settings # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> response = await api.get_ldap_vpn_schema_settings(async_req=True)
:param VNS3Client api_client: (required)
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: APIResponse or awaitable if async
"""
local_var_params = locals()
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501
return api_client.call_api(
"/settings/ldap/vpn_schema",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def post_test_ldap_vpn_schema_settings(
api_client,
vpn_group_base=None,
vpn_group_id_attribute=None,
vpn_group_list_filter=None,
vpn_group_member_attribute=None,
vpn_group_member_attr_format=None,
vpn_group_search_scope=None,
limit=None,
**kwargs
):
"""Test LDAP VPN schema settings # noqa: E501
Test LDAP VPN schema settings # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> response = await api.post_test_ldap_vpn_schema_settings(token_id, async_req=True)
:param VNS3Client api_client: (required)
:param str vpn_group_base: Base DN from which to search for VPN Group (required)
:param str vpn_group_id_attribute: Attribute type for the VPN Group (required)
:param str vpn_group_list_filter: Search filter for VPN Groups
:param str vpn_group_member_attribute: Attribute used to search for a user within the VPN Group
:param str vpn_group_member_attr_format: Format of the Group VPN Member attribute
:param str vpn_group_search_scope: Search scope for filter
:param str limit: Number of records to return. Default = 100
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: APIResponse or awaitable if async
"""
local_var_params = locals()
request_params = [
"vpn_group_base",
"vpn_group_id_attribute",
"vpn_group_list_filter",
"vpn_group_member_attribute",
"vpn_group_member_attr_format",
"vpn_group_search_scope",
"limit",
]
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = {}
for param in [p for p in request_params if local_var_params.get(p) is not None]:
body_params[param] = local_var_params[param]
# HTTP header `Accept`
header_params["Accept"] = api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params["Content-Type"] = api_client.select_header_content_type( # | |
"compositions",
"afterthe",
"volkoff",
"chinks",
"non-violent",
"eleni",
"kristine",
"croupier",
"chiseled",
"35th",
"baskerville",
"soundproof",
"serrated",
"shallows",
"tompkins",
"nye",
"criminology",
"girlies",
"horsing",
"temperate",
"disarming",
"drachmas",
"awesomeness",
"dingle",
"governs",
"dottore",
"bohemia",
"gunk",
"steinberg",
"appetizing",
"mccallister",
"baum",
"odell",
"realy",
"zoltan",
"circe",
"contraire",
"torturer",
"jaeger",
"insurmountable",
"hon.",
"wee-wee",
"excavator",
"nobler",
"centennial",
"purging",
"mainwaring",
"analysing",
"torrente",
"taeko",
"ferrets",
"softy",
"clea",
"sarita",
"frazzled",
"haim",
"sagging",
"merch",
"beefy",
"hillbillies",
"spiraling",
"capes",
"ayodhya",
"huggy",
"pms",
"uncontrollably",
"gona",
"servicemen",
"eveybody",
"dredging",
"organist",
"amina",
"footnote",
"jacoby",
"monotony",
"vastness",
"hocus-pocus",
"dietz",
"milkshakes",
"bandstand",
"palladium",
"anecdotes",
"shins",
"jk",
"preyed",
"shalu",
"chiquita",
"shia",
"commited",
"whinny",
"g-string",
"spousal",
"lettering",
"propriety",
"cirie",
"hook-up",
"slovakia",
"maldives",
"excitable",
"bonn",
"kidnaps",
"eriksson",
"bachman",
"xiong",
"corrosive",
"absorption",
"curley",
"pendejo",
"contradicts",
"foreclosed",
"'about",
"hauptmann",
"litt",
"exasperated",
"exhume",
"inebriated",
"newsman",
"avinash",
"awoken",
"himalayan",
"sian",
"breeches",
"amitabh",
"slattery",
"side-by-side",
"adjournment",
"aretha",
"bellini",
"ballads",
"paraded",
"appleton",
"skewer",
"marchand",
"isotopes",
"alfalfa",
"pickups",
"newsweek",
"speculated",
"mage",
"guerillas",
"whopping",
"ici",
"skier",
"mckee",
"packers",
"old-timers",
"hiked",
"swarms",
"sidecar",
"mazda",
"affirmation",
"roomy",
"contradicting",
"kiln",
"discoloration",
"raff",
"plummeting",
"saz",
"steamboat",
"marchioness",
"hopelessness",
"on-stage",
"darin",
"habla",
"polygamy",
"pieced",
"defrost",
"implicitly",
"morwenna",
"vauxhall",
"resonate",
"marni",
"laredo",
"allegra",
"cammy",
"shibata",
"redone",
"whassup",
"10pm",
"gothenburg",
"occurrences",
"tangier",
"gaspar",
"dol",
"blaire",
"más",
"vos",
"layoffs",
"corbyn",
"anniversaries",
"exhibitions",
"depicts",
"evander",
"ambiance",
"droll",
"speedo",
"ploughed",
"estrella",
"lilacs",
"mind-boggling",
"greenpeace",
"outposts",
"hijo",
"macduff",
"tenuous",
"dignify",
"cautionary",
"prejudicial",
"veeru",
"evaded",
"pioneered",
"boarders",
"chisholm",
"subdural",
"flamethrower",
"evemhing",
"ginnie",
"vinaigrette",
"mecha",
"undisciplined",
"prance",
"sanctified",
"caymans",
"mauritius",
"forthright",
"redacted",
"tawny",
"boer",
"geary",
"fertilize",
"distal",
"noche",
"willfully",
"anil",
"vivienne",
"m.i.t.",
"poppet",
"misbehaving",
"lightsaber",
"lz",
"ill-fated",
"kingman",
"20-minute",
"but--but",
"georgy",
"cumin",
"establishments",
"amped",
"merv",
"synthesis",
"nazarene",
"poul",
"mikayla",
"tana",
"nether",
"pasolini",
"volodya",
"shanks",
"faithfulness",
"atavus",
"redfern",
"feasts",
"asli",
"father-daughter",
"ralf",
"laudanum",
"ettore",
"understaffed",
"cholo",
"pulsar",
"amitabha",
"exhumed",
"a.s.a.p.",
"'al",
"scrounge",
"shota",
"stunted",
"sobering",
"iben",
"rickety",
"kazan",
"ghana",
"nanking",
"four-year",
"unwashed",
"scuttle",
"wiggly",
"astonishingly",
"eeny",
"brenna",
"hookfang",
"effortlessly",
"boaz",
"timeout",
"cordero",
"taxicab",
"scusi",
"ragging",
"peta",
"salinas",
"houseguest",
"deveraux",
"ganging",
"loathing",
"squelching",
"boswell",
"teaser",
"gehrig",
"high-quality",
"scurrying",
"super-duper",
"ten-minute",
"mountie",
"divorcee",
"celestine",
"synthesizer",
"forsaking",
"parlors",
"koga",
"dukat",
"vite",
"ismael",
"lfl",
"huber",
"nourished",
"n.y.p.d.",
"mojitos",
"cranked",
"bhau",
"campground",
"kitano",
"kass",
"mutagen",
"scalps",
"wyler",
"willy-nilly",
"shrubs",
"damme",
"governance",
"johansen",
"overthinking",
"menthol",
"letdown",
"limey",
"supposition",
"gantry",
"beacons",
"saboteurs",
"outgunned",
"savino",
"lackeys",
"sectionals",
"rager",
"lite",
"radu",
"adulterer",
"disguising",
"jenni",
"timings",
"chandramukhi",
"carsten",
"expectant",
"poxy",
"lela",
"we`ve",
"onslow",
"sympathise",
"somewheres",
"effendi",
"pg",
"n-not",
"muss",
"gru",
"samara",
"halloran",
"rekindle",
"airbag",
"mclntyre",
"foosball",
"swooped",
"stateroom",
"easygoing",
"rochefort",
"transmits",
"job-",
"knievel",
"fielder",
"talk-",
"gussie",
"ruffles",
"horseradish",
"orchards",
"garak",
"liston",
"manna",
"fras",
"plummeted",
"streamlined",
"chalky",
"shipmates",
"evel",
"curvature",
"predetermined",
"boatload",
"coed",
"choco",
"secretarial",
"colouring",
"dragan",
"jaggi",
"tweety",
"squeezes",
"bolster",
"pissed-off",
"uncomplicated",
"contemplated",
"josette",
"bovary",
"overland",
"fevers",
"porgy",
"scavenging",
"balmy",
"incidental",
"madoff",
"minos",
"pongo",
"not-not",
"jung-hwa",
"igt",
"klicks",
"palme",
"implode",
"girth",
"overriding",
"goop",
"capitaine",
"avatars",
"beholden",
"savour",
"newsflash",
"mastering",
"immeasurable",
"dinero",
"retrograde",
"chiara",
"matsu",
"cleverest",
"huntsman",
"insinuate",
"progressively",
"tiana",
"patris",
"hallam",
"rediscover",
"glancing",
"remodeled",
"g-man",
"alka",
"b.o.b.",
"srt",
"jérôme",
"bequeathed",
"ricans",
"rutger",
"chihiro",
"perspiration",
"tributes",
"manav",
"geo",
"paramilitary",
"psychoanalyst",
"fergie",
"flunking",
"clauses",
"32nd",
"deej",
"artur",
"halil",
"slobbering",
"jiu",
"ascot",
"nerys",
"geezers",
"wasnt",
"spearhead",
"scooters",
"slabs",
"decomp",
"toppled",
"blackest",
"itto",
"duplicated",
"hypnotism",
"degeneration",
"fallujah",
"wriggling",
"write-off",
"cantrell",
"baseless",
"humpback",
"deader",
"ncaa",
"tatters",
"antagonize",
"rowed",
"after-hours",
"discontinue",
"did.",
"cask",
"mobius",
"eval",
"burg",
"erie",
"ufc",
"suckered",
"adderall",
"whos",
"shriveled",
"cheetahs",
"kk",
"couldnt",
"generalissimo",
"eco",
"toiled",
"rance",
"liliana",
"piggies",
"forgo",
"macleish",
"squatter",
"telex",
"eko",
"make-out",
"haνe",
"khartoum",
"diagonal",
"anti-matter",
"petrus",
"recheck",
"heart-shaped",
"sustains",
"multiverse",
"ramesses",
"rewrote",
"a.p.",
"senegal",
"yugoslav",
"seep",
"heisenberg",
"tp",
"vinod",
"else-",
"mosey",
"capoeira",
"descendents",
"sepsis",
"bolin",
"corrie",
"don.t",
"thickens",
"higher-ups",
"aslan",
"miseries",
"contrition",
"kamini",
"romp",
"polack",
"applicable",
"in-in",
"leonor",
"additions",
"rickman",
"arbiter",
"unclaimed",
"smock",
"sizzles",
"mcintyre",
"dejected",
"retention",
"juke",
"dilation",
"manya",
"tycho",
"molar",
"sacking",
"savant",
"f-",
"heyday",
"loosening",
"paperweight",
"keenly",
"grossed",
"cervantes",
"zan",
"shut-eye",
"barstow",
"greenie",
"mlle",
"strong-willed",
"westbound",
"moreland",
"crawfish",
"batiatus",
"juveniles",
"lawyered",
"partnerships",
"catch-up",
"k-9",
"wife-",
"authorizing",
"mementos",
"haggling",
"impressionist",
"stillson",
"minoru",
"abla",
"felled",
"harken",
"bernhardt",
"pipsqueak",
"kline",
"directives",
"collard",
"janko",
"unexplainable",
"lapdog",
"do-gooder",
"rectangle",
"seducer",
"deets",
"slavic",
"ipek",
"undergraduate",
"crawler",
"freemasons",
"co-ed",
"mountainous",
"asthmatic",
"hüseyin",
"renown",
"feel-",
"playmates",
"friendlier",
"mandible",
"handprint",
"brogan",
"cranberries",
"long-standing",
"wino",
"rabbis",
"burnout",
"janaki",
"itt",
"spalding",
"kiku",
"paterson",
"fireballs",
"ebba",
"scaly",
"notifying",
"deliberations",
"bunt",
"issa",
"subsidy",
"susy",
"fagin",
"bama",
"drafty",
"enchantress",
"kismet",
"metallica",
"delacroix",
"binford",
"unintentional",
"tomahawk",
"estep",
"gael",
"queensland",
"stoy",
"avocados",
"vir",
"conlon",
"amis",
"acrylic",
"car-",
"supremely",
"repented",
"sirloin",
"caulfield",
"retains",
"gorgon",
"ine",
"huston",
"beatboxing",
"anata",
"globalization",
"wor",
"florins",
"cates",
"americana",
"mitsuru",
"fathoms",
"tangiers",
"liddy",
"powwow",
"rescuers",
"hier",
"dogging",
"one-bedroom",
"chil",
"unbiased",
"yukimura",
"corwin",
"comparisons",
"wolsey",
"ako",
"promissory",
"whoopi",
"ji-won",
"24-year-old",
"airliner",
"ridgeway",
"stratford",
"nonsensical",
"osvaldo",
"agustin",
"finito",
"commended",
"monocle",
"xanadu",
"ara",
"villainy",
"rumple",
"doi",
"boor",
"euphoric",
"angina",
"boyo",
"bayonne",
"haute",
"disloyalty",
"payoffs",
"verbatim",
"boisterous",
"ddr",
"budgie",
"by-",
"roop",
"caregiver",
"synaptic",
"keira",
"cojones",
"bicarbonate",
"jeet",
"18th-century",
"gordie",
"incinerate",
"intervening",
"junta",
"diggs",
"strategize",
"spiky",
"applebee",
"spurned",
"copping",
"prescribing",
"orly",
"daydreams",
"heroically",
"jaipur",
"identifiable",
"barbary",
"after-party",
"decompose",
"non-profit",
"inverse",
"misused",
"swayze",
"weevil",
"excusez-moi",
"ishmael",
"doo-dah",
"dooku",
"siena",
"screamin",
"cussing",
"manoj",
"tamales",
"dyes",
"'rejust",
"rumsfeld",
"yore",
"langer",
"gush",
"jalopy",
"perrier",
"stiletto",
"coyle",
"drunkenly",
"asako",
"tthe",
"booyah",
"gated",
"blowfish",
"buzzkill",
"crockery",
"basilica",
"bloodstained",
"coburn",
"lapsed",
"spitz",
"sparse",
"fitzroy",
"'re-we",
"rescind",
"first-name",
"kimmel",
"roche",
"outbid",
"provenance",
"swifty",
"underdogs",
"tearfully",
"fritter",
"ste",
"clogging",
"converging",
"dec",
"kuen",
"capra",
"pavlov",
"suleyman",
"gov",
"groped",
"3pm",
"slits",
"bellman",
"fledgling",
"erectile",
"proportional",
"linoleum",
"operatic",
"vel",
"asakura",
"sardinia",
"xerxes",
"puddings",
"mildew",
"glaucoma",
"anaphylactic",
"them.",
"lobbyists",
"beluga",
"unconsciousness",
"sunnyvale",
"vying",
"catechism",
"mysticism",
"meeny",
"mustaches",
"mcbain",
"pbht",
"rovers",
"hassled",
"eda",
"addictions",
"azure",
"emphatically",
"tereza",
"wean",
"lovey-dovey",
"piedmont",
"disorientation",
"fosters",
"candied",
"skupin",
"engages",
"tentacle",
"shouidn",
"legality",
"winging",
"telepath",
"thorndyke",
"growers",
"unfeeling",
"touker",
"narnia",
"kuroda",
"mayko",
"roadhouse",
"moonstone",
"hails",
"kourtney",
"harkonnen",
"choe",
"a-list",
"humphries",
"pettigrew",
"enright",
"phrasing",
"doruk",
"peepers",
"facedown",
"ashford",
"fortify",
"kike",
"seashells",
"pharma",
"léo",
"commendatore",
"nuance",
"machado",
"krampus",
"flurry",
"ticktock",
"powerfully",
"refresher",
"tigh",
"fervor",
"compensating",
"bandana",
"fortresses",
"decommissioned",
"immorality",
"reactivate",
"commencement",
"tripathi",
"synchronization",
"gangbanger",
"squished",
"innocuous",
"authenticate",
"apologetic",
"yutaka",
"wiggins",
"typewriters",
"jailbreak",
"cherub",
"grime",
"jyoti",
"enchilada",
"run-ins",
"interception",
"tablecloths",
"millimetre",
"bolus",
"blow-up",
"sena",
"fook",
"maeve",
"shrivel",
"chloé",
"-it",
"replicated",
"slow-motion",
"ariana",
"hieroglyphs",
"empower",
"girardi",
"eugenics",
"τhe",
"sapporo",
"taunts",
"flesh-eating",
"rationalize",
"hak",
"one-day",
"stasi",
"mooch",
"deductible",
"stepsister",
"reproduced",
"wisteria",
"aster",
"comte",
"harbinger",
"dennison",
"stoves",
"skilful",
"vlado",
"didrt",
"wounding",
"zoran",
"thistle",
"fornell",
"conducive",
"flexing",
"fourteenth",
"togo",
"fedora",
"rin-kun",
"delightfully",
"jettison",
"dissatisfaction",
"-that",
"mulch",
"buyout",
"moxie",
"giordano",
"culpepper",
"henny",
"trieste",
"imperfection",
"gees",
"insincere",
"co-star",
"overpaid",
"takuya",
"mete",
"'out",
"catnip",
"dunlop",
"propped",
"vindicated",
"eggshells",
"tune-up",
"panisse",
"forearms",
"redevelopment",
"gamers",
"javert",
"kublai",
"cowering",
"rashes",
"jassi",
"nudie",
"asta",
"vena",
"earphones",
"kurds",
"greenfield",
"raghavan",
"muffy",
"hostesses",
"fenced",
"snitching",
"seizes",
"huntress",
"evaporates",
"divvy",
"castration",
"kickbacks",
"conny",
"earnestly",
"fiberglass",
"invents",
"stopper",
"foetus",
"desecration",
"ryota",
"ith",
"disapproved",
"truant",
"weaklings",
"skimpy",
"freeways",
"ex-girlfriends",
"carry-on",
"fuming",
"ambien",
"whitelighter",
"humboldt",
"v.a.",
"hales",
"00pm",
"harvester",
"fedya",
"requisitioned",
"wheelie",
"galan",
"wrongfully",
"mose",
"fatale",
"snarl",
"giorno",
"dit",
"ascendant",
"braithwaite",
"siya",
"flirtatious",
"merchandising",
"driftwood",
"stauffenberg",
"worsen",
"glimpsed",
"shuffled",
"bailout",
"overt",
"blech",
"affidavits",
"ks",
"kilmer",
"yuh",
"arent",
"emy",
"inlet",
"trojans",
"comas",
"bef",
"bombard",
"greenway",
"deus",
"astonishment",
"chandigarh",
"mino",
"weekdays",
"tunis",
"escapade",
"speakeasy",
"kami",
"griswold",
"ackroyd",
"dawdling",
"idea-",
"fairytales",
"mothering",
"napier",
"assuredly",
"vaccinations",
"budging",
"infraction",
"blockers",
"softest",
"engrossed",
"propellers",
"pakistanis",
"vaporize",
"broussard",
"weirdoes",
"ock",
"gyp",
"achieves",
"hoppy",
"wilmington",
"fuckface",
"waterways",
"petar",
"wolfblood",
"treadwell",
"ind",
"minute-",
"biscotti",
"modus",
"asterix",
"slaw",
"gacy",
"mmh",
"nestled",
"pitted",
"lusty",
"embalmed",
"-the",
"wouldnt",
"fok",
"coffeehouse",
"starks",
"slinky",
"woodman",
"streamers",
"admittance",
"kenshin",
"i-l",
"zooey",
"stillborn",
"truffaut",
"macaroons",
"sandbags",
"constructions",
"axelrod",
"butchie",
"unduly",
"ecosystems",
"aly",
"oshima",
"letitia",
"scudder",
"cul-de-sac",
"anti-semitism",
"crevice",
"drumsticks",
"anomalous",
"lita",
"misstep",
"hairdressers",
"21-year-old",
"ouyang",
"slava",
"colada",
"ravan",
"tought",
"headlong",
"sameera",
"indignant",
"neckline",
"aaaaaah",
"kosuke",
"aaahh",
"yim",
"shukla",
"stallions",
"cordially",
"hammy",
"beauchamp",
"deference",
"toph",
"terraces",
"hovercraft",
"fables",
"adric",
"kaan",
"duffer",
"walther",
"hygienist",
"rearing",
"bunyan",
"beasley",
"nimrod",
"tilting",
"yakov",
"deficient",
"hatun",
"loxley",
"ignites",
"schrader",
"neumann",
"ploughing",
"vassals",
"waylon",
"fantasia",
"6pm",
"usman",
"trager",
"bec",
"pampering",
"'ha",
"home-",
"breakdowns",
"wal",
"denser",
"coldly",
"mandolin",
"glacial",
"hjalmar",
"'twant",
"bekir",
"masonry",
"comandante",
"half-time",
"wejust",
"subpoenas",
"electroshock",
"vespa",
"temptress",
"arvid",
"keisuke",
"lovett",
"codename",
"balconies",
"clouseau",
"breadcrumbs",
"preys",
"tonga",
"hokage",
"garrick",
"brassiere",
"lemmy",
"hang-ups",
"f1nc0",
"switchblade",
"madhav",
"mie",
"pasteur",
"cafés",
"zionist",
"rodin",
"voldemort",
"instantaneously",
"norrie",
"unfolded",
| |
# sqlite.py
# Copyright (C) 2005, 2006, 2007, 2008, 2009 <NAME> <EMAIL>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the SQLite database.
Driver
------
When using Python 2.5 and above, the built in ``sqlite3`` driver is
already installed and no additional installation is needed. Otherwise,
the ``pysqlite2`` driver needs to be present. This is the same driver as
``sqlite3``, just with a different name.
The ``pysqlite2`` driver will be loaded first, and if not found, ``sqlite3``
is loaded. This allows an explicitly installed pysqlite driver to take
precedence over the built in one. As with all dialects, a specific
DBAPI module may be provided to :func:`~sqlalchemy.create_engine()` to control
this explicitly::
from sqlite3 import dbapi2 as sqlite
e = create_engine('sqlite:///file.db', module=sqlite)
Full documentation on pysqlite is available at:
`<http://www.initd.org/pub/software/pysqlite/doc/usage-guide.html>`_
Connect Strings
---------------
The file specification for the SQLite database is taken as the "database" portion of
the URL. Note that the format of a url is::
driver://user:pass@host/database
This means that the actual filename to be used starts with the characters to the
**right** of the third slash. So connecting to a relative filepath looks like::
# relative path
e = create_engine('sqlite:///path/to/database.db')
An absolute path, which is denoted by starting with a slash, means you need **four**
slashes::
# absolute path
e = create_engine('sqlite:////path/to/database.db')
To use a Windows path, regular drive specifications and backslashes can be used.
Double backslashes are probably needed::
# absolute path on Windows
e = create_engine('sqlite:///C:\\\\path\\\\to\\\\database.db')
The sqlite ``:memory:`` identifier is the default if no filepath is present. Specify
``sqlite://`` and nothing else::
# in-memory database
e = create_engine('sqlite://')
Threading Behavior
------------------
Pysqlite connections do not support being moved between threads, unless
the ``check_same_thread`` Pysqlite flag is set to ``False``. In addition,
when using an in-memory SQLite database, the full database exists only within
the scope of a single connection. It is reported that an in-memory
database does not support being shared between threads regardless of the
``check_same_thread`` flag - which means that a multithreaded
application **cannot** share data from a ``:memory:`` database across threads
unless access to the connection is limited to a single worker thread which communicates
through a queueing mechanism to concurrent threads.
To provide a default which accomodates SQLite's default threading capabilities
somewhat reasonably, the SQLite dialect will specify that the :class:`~sqlalchemy.pool.SingletonThreadPool`
be used by default. This pool maintains a single SQLite connection per thread
that is held open up to a count of five concurrent threads. When more than five threads
are used, a cleanup mechanism will dispose of excess unused connections.
Two optional pool implementations that may be appropriate for particular SQLite usage scenarios:
* the :class:`sqlalchemy.pool.StaticPool` might be appropriate for a multithreaded
application using an in-memory database, assuming the threading issues inherent in
pysqlite are somehow accomodated for. This pool holds persistently onto a single connection
which is never closed, and is returned for all requests.
* the :class:`sqlalchemy.pool.NullPool` might be appropriate for an application that
makes use of a file-based sqlite database. This pool disables any actual "pooling"
behavior, and simply opens and closes real connections corresonding to the :func:`connect()`
and :func:`close()` methods. SQLite can "connect" to a particular file with very high
efficiency, so this option may actually perform better without the extra overhead
of :class:`SingletonThreadPool`. NullPool will of course render a ``:memory:`` connection
useless since the database would be lost as soon as the connection is "returned" to the pool.
Date and Time Types
-------------------
SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does not provide
out of the box functionality for translating values between Python `datetime` objects
and a SQLite-supported format. SQLAlchemy's own :class:`~sqlalchemy.types.DateTime`
and related types provide date formatting and parsing functionality when SQlite is used.
The implementation classes are :class:`SLDateTime`, :class:`SLDate` and :class:`SLTime`.
These types represent dates and times as ISO formatted strings, which also nicely
support ordering. There's no reliance on typical "libc" internals for these functions
so historical dates are fully supported.
Unicode
-------
In contrast to SQLAlchemy's active handling of date and time types for pysqlite, pysqlite's
default behavior regarding Unicode is that all strings are returned as Python unicode objects
in all cases. So even if the :class:`~sqlalchemy.types.Unicode` type is
*not* used, you will still always receive unicode data back from a result set. It is
**strongly** recommended that you do use the :class:`~sqlalchemy.types.Unicode` type
to represent strings, since it will raise a warning if a non-unicode Python string is
passed from the user application. Mixing the usage of non-unicode objects with returned unicode objects can
quickly create confusion, particularly when using the ORM as internal data is not
always represented by an actual database result string.
"""
import datetime, re, time
from sqlalchemy import sql, schema, exc, pool, DefaultClause
from sqlalchemy.engine import default
import sqlalchemy.types as sqltypes
import sqlalchemy.util as util
from sqlalchemy.sql import compiler, functions as sql_functions
from types import NoneType
class SLNumeric(sqltypes.Numeric):
def bind_processor(self, dialect):
type_ = self.asdecimal and str or float
def process(value):
if value is not None:
return type_(value)
else:
return value
return process
def get_col_spec(self):
if self.precision is None:
return "NUMERIC"
else:
return "NUMERIC(%(precision)s, %(scale)s)" % {'precision': self.precision, 'scale' : self.scale}
class SLFloat(sqltypes.Float):
def bind_processor(self, dialect):
type_ = self.asdecimal and str or float
def process(value):
if value is not None:
return type_(value)
else:
return value
return process
def get_col_spec(self):
return "FLOAT"
class SLInteger(sqltypes.Integer):
def get_col_spec(self):
return "INTEGER"
class SLSmallInteger(sqltypes.Smallinteger):
def get_col_spec(self):
return "SMALLINT"
class DateTimeMixin(object):
def _bind_processor(self, format, elements):
def process(value):
if not isinstance(value, (NoneType, datetime.date, datetime.datetime, datetime.time)):
raise TypeError("SQLite Date, Time, and DateTime types only accept Python datetime objects as input.")
elif value is not None:
return format % tuple([getattr(value, attr, 0) for attr in elements])
else:
return None
return process
def _result_processor(self, fn, regexp):
def process(value):
if value is not None:
return fn(*[int(x or 0) for x in regexp.match(value).groups()])
else:
return None
return process
class SLDateTime(DateTimeMixin, sqltypes.DateTime):
__legacy_microseconds__ = False
def get_col_spec(self):
return "TIMESTAMP"
def bind_processor(self, dialect):
if self.__legacy_microseconds__:
return self._bind_processor(
"%4.4d-%2.2d-%2.2d %2.2d:%2.2d:%2.2d.%s",
("year", "month", "day", "hour", "minute", "second", "microsecond")
)
else:
return self._bind_processor(
"%4.4d-%2.2d-%2.2d %2.2d:%2.2d:%2.2d.%06d",
("year", "month", "day", "hour", "minute", "second", "microsecond")
)
_reg = re.compile(r"(\d+)-(\d+)-(\d+)(?: (\d+):(\d+):(\d+)(?:\.(\d+))?)?")
def result_processor(self, dialect):
return self._result_processor(datetime.datetime, self._reg)
class SLDate(DateTimeMixin, sqltypes.Date):
def get_col_spec(self):
return "DATE"
def bind_processor(self, dialect):
return self._bind_processor(
"%4.4d-%2.2d-%2.2d",
("year", "month", "day")
)
_reg = re.compile(r"(\d+)-(\d+)-(\d+)")
def result_processor(self, dialect):
return self._result_processor(datetime.date, self._reg)
class SLTime(DateTimeMixin, sqltypes.Time):
__legacy_microseconds__ = False
def get_col_spec(self):
return "TIME"
def bind_processor(self, dialect):
if self.__legacy_microseconds__:
return self._bind_processor(
"%2.2d:%2.2d:%2.2d.%s",
("hour", "minute", "second", "microsecond")
)
else:
return self._bind_processor(
"%2.2d:%2.2d:%2.2d.%06d",
("hour", "minute", "second", "microsecond")
)
_reg = re.compile(r"(\d+):(\d+):(\d+)(?:\.(\d+))?")
def result_processor(self, dialect):
return self._result_processor(datetime.time, self._reg)
class SLUnicodeMixin(object):
def bind_processor(self, dialect):
if self.convert_unicode or dialect.convert_unicode:
if self.assert_unicode is None:
assert_unicode = dialect.assert_unicode
else:
assert_unicode = self.assert_unicode
if not assert_unicode:
return None
def process(value):
if not isinstance(value, (unicode, NoneType)):
if assert_unicode == 'warn':
util.warn("Unicode type received non-unicode bind "
"param value %r" % value)
return value
else:
raise exc.InvalidRequestError("Unicode type received non-unicode bind param value %r" % value)
else:
return value
return process
else:
return None
def result_processor(self, dialect):
return None
class SLText(SLUnicodeMixin, sqltypes.Text):
def get_col_spec(self):
return "TEXT"
class SLString(SLUnicodeMixin, sqltypes.String):
def get_col_spec(self):
return "VARCHAR" + (self.length and "(%d)" % self.length or "")
class SLChar(SLUnicodeMixin, sqltypes.CHAR):
def get_col_spec(self):
return "CHAR" + (self.length and "(%d)" % self.length or "")
class SLBinary(sqltypes.Binary):
def get_col_spec(self):
return "BLOB"
class SLBoolean(sqltypes.Boolean):
def get_col_spec(self):
return "BOOLEAN"
def bind_processor(self, dialect):
def process(value):
if value is None:
return None
return value and 1 or 0
return process
def result_processor(self, dialect):
def process(value):
if value is None:
return None
return value == 1
return process
colspecs = {
sqltypes.Binary: SLBinary,
sqltypes.Boolean: SLBoolean,
sqltypes.CHAR: SLChar,
sqltypes.Date: SLDate,
sqltypes.DateTime: SLDateTime,
sqltypes.Float: SLFloat,
sqltypes.Integer: SLInteger,
sqltypes.NCHAR: SLChar,
sqltypes.Numeric: SLNumeric,
sqltypes.Smallinteger: SLSmallInteger,
sqltypes.String: SLString,
sqltypes.Text: SLText,
sqltypes.Time: SLTime,
}
ischema_names = {
'BLOB': SLBinary,
'BOOL': SLBoolean,
'BOOLEAN': SLBoolean,
'CHAR': SLChar,
'DATE': SLDate,
'DATETIME': SLDateTime,
'DECIMAL': SLNumeric,
'FLOAT': SLFloat,
'INT': SLInteger,
'INTEGER': SLInteger,
'NUMERIC': SLNumeric,
'REAL': SLNumeric,
'SMALLINT': SLSmallInteger,
'TEXT': SLText,
'TIME': SLTime,
'TIMESTAMP': SLDateTime,
'VARCHAR': SLString,
}
class SQLiteExecutionContext(default.DefaultExecutionContext):
def post_exec(self):
if self.compiled.isinsert and not self.executemany:
if not len(self._last_inserted_ids) or self._last_inserted_ids[0] is None:
self._last_inserted_ids = [self.cursor.lastrowid] + self._last_inserted_ids[1:]
class SQLiteDialect(default.DefaultDialect):
name = 'sqlite'
supports_alter = | |
from __future__ import annotations
import datetime
import json
import typing
import urllib.parse
import attr
from cattr import Converter
from bloom.ll._compat import Literal
from bloom.ll.models.application import Application
from bloom.ll.models.application_commands import (
ApplicationCommand,
ApplicationCommandOption,
ApplicationCommandPermissions,
CommandTypes,
GuildApplicationCommandPermissions,
InteractionResponse,
)
from bloom.ll.models.audit_log import AuditLog, AuditLogEvents
from bloom.ll.models.base import UNKNOWN, UNKNOWN_TYPE, Snowflake, Unknownish
from bloom.ll.models.channel import (
AllowedMentions,
Attachment,
Channel,
ChannelTypes,
Embed,
FollowedChannel,
MessageFlags,
MessageReference,
Overwrite,
ThreadMember,
VideoQualityModes,
)
from bloom.ll.models.emoji import Emoji
from bloom.ll.models.gateway import DetailedGatewayResponse, GatewayResponse
from bloom.ll.models.guild import (
Ban,
DefaultMessageNotificationLevel,
ExplicitContentFilterLevel,
Guild,
GuildFeatures,
GuildMember,
GuildPreview,
GuildScheduledEventUser,
GuildWidget,
Integration,
ModifyGuildChannelPositionsParameters,
ModifyGuildRolePositionsParameters,
PruneCount,
SystemChannelFlags,
UserConnection,
VerificationLevel,
WelcomeScreen,
WelcomeScreenChannel,
WidgetStyleOptions,
)
from bloom.ll.models.guild_scheduled_events import (
EventStatus,
GuildScheduledEvent,
GuildScheduledEventEntityMetadata,
GuildScheduledEventEntityType,
GuildScheduledEventPrivacyLevel,
)
from bloom.ll.models.guild_template import GuildTemplate
from bloom.ll.models.invite import Invite, InviteMetadata, InviteTargetTypes
from bloom.ll.models.message import Message
from bloom.ll.models.message_components import Component
from bloom.ll.models.oauth2 import AuthorizationInformation
from bloom.ll.models.permissions import BitwisePermissionFlags, Role
from bloom.ll.models.stage_instance import PrivacyLevel, StageInstance
from bloom.ll.models.sticker import NitroStickerPacks, Sticker
from bloom.ll.models.user import User
from bloom.ll.models.voice import VoiceRegion
from bloom.ll.models.webhook import Webhook
from bloom.ll.rest.models import Request
def prepare(rest: RawRest, input_dict: typing.Dict[str, object]) -> typing.Dict[str, object]:
res: typing.Dict[str, object] = rest.conv.unstructure(
{k: v for k, v in input_dict.items() if v is not UNKNOWN}
)
return res
T = typing.TypeVar('T')
def tuple_(
it: Unknownish[typing.Optional[typing.Iterable[T]]],
) -> Unknownish[typing.Optional[typing.Tuple[T, ...]]]:
if isinstance(it, UNKNOWN_TYPE):
return UNKNOWN
else:
if it is None:
return None
else:
return tuple(it)
@typing.overload
def parse_reason(reason: str) -> str:
...
@typing.overload
def parse_reason(reason: Unknownish[str]) -> Unknownish[str]:
...
def parse_reason(reason: Unknownish[str]) -> Unknownish[str]:
if isinstance(reason, UNKNOWN_TYPE):
return reason
else:
return urllib.parse.quote(reason, safe=":/?#[]@!$&'()*+,;=")
@attr.define()
class RawRest:
# every single API method.
conv: Converter
def get_guild_audit_log(
self,
guild_id: Snowflake,
*,
user_id: Snowflake,
action_type: AuditLogEvents,
before: Snowflake,
limit: int,
) -> Request[AuditLog]:
return Request[AuditLog](
'GET',
'/guilds/{guild_id}/audit-logs',
{'guild_id': guild_id},
params=prepare(
self,
{
'user_id': user_id,
'action_type': action_type,
'before': before,
'limit': limit,
},
),
)
def get_channel(self, channel_id: Snowflake) -> Request[Channel]:
return Request[Channel]('GET', '/channels/{channel_id}', {'channel_id': channel_id})
def modify_channel(
self,
channel_id: Snowflake,
*,
# TODO: mypy_extensions.Expand[TypedDict] might help.
name: Unknownish[str] = UNKNOWN,
# base64 encoded icon
icon: Unknownish[str] = UNKNOWN,
type: Unknownish[ChannelTypes] = UNKNOWN,
position: Unknownish[typing.Optional[int]] = UNKNOWN,
topic: Unknownish[typing.Optional[str]] = UNKNOWN,
nsfw: Unknownish[typing.Optional[bool]] = UNKNOWN,
rate_limit_per_user: Unknownish[typing.Optional[int]] = UNKNOWN,
bitrate: Unknownish[typing.Optional[int]] = UNKNOWN,
user_limit: Unknownish[typing.Optional[int]] = UNKNOWN,
permission_overwrites: Unknownish[typing.Optional[typing.Iterable[Overwrite]]] = UNKNOWN,
parent_id: Unknownish[typing.Optional[Snowflake]] = UNKNOWN,
rtc_region: Unknownish[typing.Optional[str]] = UNKNOWN,
video_quality_mode: Unknownish[typing.Optional[VideoQualityModes]] = UNKNOWN,
default_auto_archive_duration: Unknownish[typing.Optional[int]] = UNKNOWN,
# thread options (TODO: an ADT method?)
archived: Unknownish[bool] = UNKNOWN,
auto_archive_duration: Unknownish[int] = UNKNOWN,
locked: Unknownish[bool] = UNKNOWN,
# audit log
reason: Unknownish[str] = UNKNOWN,
) -> Request[Channel]:
return Request[Channel](
'PATCH',
'/channels/{channel_id}',
{'channel_id': channel_id},
json=prepare(
self,
{
'name': name,
'icon': icon,
'type': type,
'position': position,
'topic': topic,
'nsfw': nsfw,
'rate_limit_per_user': rate_limit_per_user,
'bitrate': bitrate,
'user_limit': user_limit,
'permission_overwrites': tuple_(permission_overwrites),
'parent_id': parent_id,
'rtc_region': rtc_region,
'video_quality_mode': video_quality_mode,
'default_auto_archive_duration': default_auto_archive_duration,
'archived': archived,
'auto_archive_duration': auto_archive_duration,
'locked': locked,
'rate_limit_per_user': rate_limit_per_user,
},
),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def delete_channel(
self, channel_id: Snowflake, *, reason: Unknownish[str] = UNKNOWN
) -> Request[Channel]:
return Request[Channel](
'DELETE',
'/channels/{channel_id}',
{'channel_id': channel_id},
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def get_channel_messages(
self,
channel_id: Snowflake,
*,
around: Unknownish[Snowflake] = UNKNOWN,
before: Unknownish[Snowflake] = UNKNOWN,
after: Unknownish[Snowflake] = UNKNOWN,
limit: Unknownish[int] = UNKNOWN,
) -> Request[typing.Tuple[Message]]:
return Request[typing.Tuple[Message]](
'GET',
'/channels/{channel_id}/messages',
{'channel_id': channel_id},
params=prepare(
self,
{
'around': around,
'before': before,
'after': after,
'limit': limit,
},
),
)
def get_channel_message(
self, channel_id: Snowflake, message_id: Snowflake
) -> Request[Message]:
return Request[Message](
'GET',
'/channels/{channel.id}/messages/{message.id}',
{'channel_id': channel_id, 'message_id': message_id},
)
def create_message(
self,
channel_id: Snowflake,
*,
# one of these is required:
content: Unknownish[str] = UNKNOWN,
files: Unknownish[typing.Iterable[object]] = UNKNOWN, # TODO: better file type?
embeds: Unknownish[typing.Iterable[Embed]] = UNKNOWN,
sticker_ids: Unknownish[typing.Iterable[Snowflake]] = UNKNOWN,
# optional
tts: Unknownish[bool] = UNKNOWN,
allowed_mentions: Unknownish[AllowedMentions] = UNKNOWN,
message_reference: Unknownish[MessageReference] = UNKNOWN,
components: Unknownish[typing.Iterable[Component]] = UNKNOWN,
# TODO: partial attachments
attachments: Unknownish[typing.Iterable[typing.Dict[str, typing.Any]]] = UNKNOWN,
) -> Request[Message]:
json_payload = prepare(
self,
{
'content': content,
'embeds': tuple_(embeds),
'sticker_ids': tuple_(sticker_ids),
'tts': tts,
'allowed_mentions': allowed_mentions,
'message_reference': message_reference,
'components': tuple_(components),
'attachments': tuple_(attachments),
},
)
return Request[Message](
'POST',
'/channels/{channel_id}/messages',
{'channel_id': channel_id},
data={'payload_json': json.dumps(json_payload)} if json_payload else None,
files={f'files[{i}]': file for i, file in enumerate(files)}
if not isinstance(files, UNKNOWN_TYPE)
else None,
)
def crosspost_message(self, channel_id: Snowflake, message_id: Snowflake) -> Request[Message]:
return Request[Message](
'POST',
'/channels/{channel_id}/messages/{message_id}/crosspost',
{'channel_id': channel_id, 'message_id': message_id},
)
# TODO: better emoji type?
def create_reaction(
self, channel_id: Snowflake, message_id: Snowflake, *, emoji: str
) -> Request[None]:
return Request[None](
'PUT',
'/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/@me',
{'channel_id': channel_id, 'message_id': message_id, 'emoji': emoji},
)
def delete_own_reaction(
self, channel_id: Snowflake, message_id: Snowflake, *, emoji: str
) -> Request[None]:
return Request[None](
'DELETE',
'/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/@me',
{'channel_id': channel_id, 'message_id': message_id, 'emoji': emoji},
)
def delete_user_reaction(
self, channel_id: Snowflake, message_id: Snowflake, *, emoji: str, user_id: Snowflake
) -> Request[None]:
return Request[None](
'DELETE',
'/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/{user_id}',
{
'channel_id': channel_id,
'message_id': message_id,
'emoji': emoji,
'user_id': user_id,
},
)
def get_reactions(
self,
channel_id: Snowflake,
message_id: Snowflake,
*,
emoji: str,
after: Unknownish[Snowflake] = UNKNOWN,
limit: Unknownish[int] = UNKNOWN,
) -> Request[typing.Tuple[User]]:
return Request[typing.Tuple[User]](
'GET',
'/channels/{channel_id}/messages/{message_id}/reactions/{emoji}',
{'channel_id': channel_id, 'message_id': message_id, 'emoji': emoji},
params=prepare(self, {'after': after, 'limit': limit}),
)
def delete_all_reactions(self, channel_id: Snowflake, message_id: Snowflake) -> Request[None]:
return Request[None](
'DELETE',
'/channels/{channel_id}/messages/{message_id}/reactions',
{'channel_id': channel_id, 'message_id': message_id},
)
def delete_all_reactions_for_emoji(
self, channel_id: Snowflake, message_id: Snowflake, *, emoji: str
) -> Request[None]:
return Request[None](
'DELETE',
'/channels/{channel_id}/messages/{message_id}/reactions/{emoji}',
{'channel_id': channel_id, 'message_id': message_id, 'emoji': emoji},
)
def edit_message(
self,
channel_id: Snowflake,
message_id: Snowflake,
*,
content: Unknownish[typing.Optional[str]] = UNKNOWN,
embeds: Unknownish[typing.Optional[typing.Iterable[Embed]]] = UNKNOWN,
flags: Unknownish[typing.Optional[MessageFlags]] = UNKNOWN,
# TODO: better file type
files: Unknownish[typing.Iterable[object]] = UNKNOWN,
allowed_mentions: Unknownish[typing.Optional[AllowedMentions]] = UNKNOWN,
# TODO: are partial attachments allowed?
attachments: Unknownish[typing.Optional[typing.Iterable[Attachment]]] = UNKNOWN,
components: Unknownish[typing.Optional[typing.Iterable[Component]]] = UNKNOWN,
) -> Request[Message]:
json_payload = prepare(
self,
{
'content': content,
'embeds': tuple_(embeds),
'flags': flags,
'allowed_mentions': allowed_mentions,
'attachments': tuple_(attachments),
'components': tuple_(components),
},
)
return Request[Message](
'POST',
'/channels/{channel_id}/messages',
{'channel_id': channel_id},
data={'payload_json': json.dumps(json_payload)} if json_payload else None,
files={f'files[{i}]': file for i, file in enumerate(files)}
if not isinstance(files, UNKNOWN_TYPE)
else None,
)
def delete_message(self, channel_id: Snowflake, message_id: Snowflake) -> Request[None]:
return Request[None](
'DELETE',
'/channels/{channel_id}/messages/{message_id}',
{'channel_id': channel_id, 'message_id': message_id},
)
def bulk_delete_messages(
self,
channel_id: Snowflake,
*,
messages: typing.Iterable[Snowflake],
reason: Unknownish[str] = UNKNOWN,
) -> Request[None]:
return Request[None](
'POST',
'/channels/{channel_id}/messages/bulk-delete',
{'channel_id': channel_id},
json=prepare(self, {'messages': tuple_(messages)}),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def edit_channel_permissions(
self,
channel_id: Snowflake,
overwrite_id: Snowflake,
*,
allow: BitwisePermissionFlags,
deny: BitwisePermissionFlags,
type: Literal[0, 1],
reason: Unknownish[str] = UNKNOWN,
) -> Request[None]:
return Request[None](
'PUT',
'/channels/{channel_id}/permissions/{overwrite_id}',
{'channel_id': channel_id, 'overwrite_id': overwrite_id},
json=prepare(self, {'allow': allow, 'deny': deny, 'type': type}),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def get_channel_invites(self, channel_id: Snowflake) -> Request[typing.Tuple[InviteMetadata]]:
return Request[typing.Tuple[InviteMetadata]](
'GET', '/channels/{channel_id}/invites', {'channel_id': channel_id}
)
def create_channel_invite(
self,
channel_id: Snowflake,
*,
max_age: Unknownish[int] = UNKNOWN,
max_uses: Unknownish[int] = UNKNOWN,
temporary: Unknownish[bool] = UNKNOWN,
unique: Unknownish[bool] = UNKNOWN,
target_type: Unknownish[InviteTargetTypes] = UNKNOWN,
target_user_id: Unknownish[Snowflake] = UNKNOWN,
target_application_id: Unknownish[Snowflake] = UNKNOWN,
reason: Unknownish[str] = UNKNOWN,
) -> Request[Invite]:
return Request[Invite](
'POST',
'/channels/{channel_id}/invites',
{'channel_id': channel_id},
json=prepare(
self,
{
'max_age': max_age,
'max_uses': max_uses,
'temporary': temporary,
'unique': unique,
'target_type': target_type,
'target_user_id': target_user_id,
'target_application_id': target_application_id,
},
),
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def delete_channel_permission(
self, channel_id: Snowflake, overwrite_id: Snowflake, *, reason: Unknownish[str] = UNKNOWN
) -> Request[None]:
return Request[None](
'DELETE',
'/channels/{channel_id}/permissions/{overwrite_id}',
{'channel_id': channel_id, 'overwrite_id': overwrite_id},
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def follow_news_channel(
self, channel_id: Snowflake, *, webhook_channel_id: Snowflake
) -> Request[FollowedChannel]:
return Request[FollowedChannel](
'POST',
'/channels/{channel_id}/followers',
{'channel_id': channel_id},
json=prepare(self, {'webhook_channel_id': webhook_channel_id}),
)
def trigger_typing_indicator(self, channel_id: Snowflake) -> Request[None]:
return Request[None]('POST', '/channels/{channel_id}/typing', {'channel_id': channel_id})
def get_pinned_messages(self, channel_id: Snowflake) -> Request[typing.Tuple[Message]]:
return Request[typing.Tuple[Message]](
'GET', '/channels/{channel_id}/pins', {'channel_id': channel_id}
)
def pin_message(
self, channel_id: Snowflake, message_id: Snowflake, *, reason: Unknownish[str] = UNKNOWN
) -> Request[None]:
return Request[None](
'PUT',
'/channels/{channel_id}/pins/{message_id}',
{'channel_id': channel_id, 'message_id': message_id},
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
def unpin_message(
self, channel_id: Snowflake, message_id: Snowflake, *, reason: Unknownish[str] = UNKNOWN
) -> Request[None]:
return Request[None](
'DELETE',
'/channels/{channel_id}/pins/{message_id}',
{'channel_id': channel_id, 'message_id': message_id},
headers=prepare(self, {'X-Audit-Log-Reason': parse_reason(reason)}),
)
# TODO: what does this return?
def group_dm_add_recipient(
self,
channel_id: Snowflake,
user_id: Snowflake,
*,
access_token: str,
# ????????? I think this is optional (Unknownish)
# TODO: test.
nick: str,
) -> Request[None]:
return Request[None](
'PUT',
'/channels/{channel_id}/recipients/{user_id}',
{'channel_id': channel_id, 'user_id': user_id},
json=prepare(self, {'access_token': access_token, 'nick': nick}),
)
# TODO: what does this return?
def group_dm_remove_recipient(
self, channel_id: Snowflake, user_id: Snowflake
) -> Request[None]:
return Request[None](
'DELETE',
'/channels/{channel_id}/recipients/{user_id}',
{'channel_id': channel_id, 'user_id': user_id},
)
def start_thread_with_message(
self,
channel_id: Snowflake,
message_id: Snowflake,
*,
name: str,
auto_archive_duration: Unknownish[int] = UNKNOWN,
rate_limit_per_user: Unknownish[typing.Optional[int]] = UNKNOWN,
reason: Unknownish[str] = UNKNOWN,
) -> Request[Channel]:
return Request[Channel](
'POST',
'/channels/{channel_id}/messages/{message_id}/threads',
{'channel_id': channel_id, 'message_id': | |
<filename>haul3/haul/platforms/psion/builder_psion.py
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Builder for PSION (CM/XP)
* translates to OPL (using Basic language)
* compiles in VM using PSION DevKit for DOS
* emulates using ORG2BETA for DOS
"""
from haul.utils import *
from haul.builder import *
from haul.langs.py.reader_py import *
from haul.langs.opl.writer_opl import *
class HAULBuilder_psion(HAULBuilder):
def __init__(self):
HAULBuilder.__init__(self, platform='psion', lang='opl')
self.set_translator(HAULTranslator(HAULReader_py, HAULWriter_opl, dialect=DIALECT_OPL3))
def build(self, project):
HAULBuilder.build(self, project=project)
name = self.project.name
data_libs_path = os.path.abspath(os.path.join(self.data_path, 'platforms', 'psion', 'libs'))
vm_path = os.path.abspath(os.path.join(self.data_path, 'platforms', 'psion', 'vm'))
qemu_path = self.get_path('QEMU_PATH', os.path.abspath(os.path.join(self.tools_path, 'qemu')))
#@FIXME: Bootable packs can be created using BLDPACK. But for some reason it then does not include all binaries!
#@FIXME: When disabling bootable, I use MAKEPACK which seems to handle multiple files easily, but can not handle BIN files needed for bootable.
bootable = not True # Make it auto-run by including a BOOT.BIN and renaming the main proc BOOT
lcd_lines = 2 # 2 for CM/XP, 4 for LZ etc.
name8 = self.name_to_8(name).upper()
opl_filename = name8 + '.OPL'
#ob3Filename = name8 + '.ob3'
opl_filename_full = os.path.abspath(os.path.join(self.staging_path, opl_filename))
opk_filename = name8 + '.OPK'
put('Preparing path names...')
for s in self.project.sources:
s.dest_filename = self.staging_path + '/' + self.name_to_8(s.name).upper() + '.OPL'
for s in self.project.libs:
s.dest_filename = self.staging_path + '/' + self.name_to_8(s.name).upper() + '.OPL'
put('Copying libraries...')
for s in self.project.libs:
self.copy(os.path.join(data_libs_path, s.name + '.opl'), os.path.join(self.staging_path, self.name_to_8(s.name).upper() + '.OPL'))
put('Translating sources to OPL3...')
#m = self.translate(name=name, source_filename=os.path.join(source_path, source_filename), SourceReaderClass=HAULReader_py, dest_filename=opl_filename_full, DestWriterClass=HAULWriter_opl, dialect=DIALECT_OPL3)
m = self.translate_project(output_path=self.staging_path)
### Split main module into separate files
func_libs = []
# OPL3 (XP/CM) does not support multiple procs in one file.
for f in m.funcs:
#put(str(f.id.name) + ': ' + str(f))
# Select module name
func_name = f.id.name
func_name8 = func_name[0:8].upper()
#@TODO: Ensure that this name is unique! Or just give random name (lame)
func_filename = func_name8 + '.OPL'
func_filename_full = os.path.abspath(os.path.join(self.staging_path, func_filename))
streamOut = StringWriter()
writer = HAULWriter_opl(streamOut, dialect=DIALECT_OPL3)
m = writer.write_function(f) # That's where the magic happens!
put('Writing function "%s" to "%s"...' % (f.id.name, func_filename_full))
write_file(func_filename_full, streamOut.r)
self.copy(func_filename_full, func_filename_full+'.bak') # Backup (compiler deletes it?!)
# Add to compile files
func_libs.append(func_filename)
"""
### For testing: Create a (valid) dummy OPL file
NL = chr(0) #'\r\n'
if bootable:
oplDummySource = 'BOOT:' + NL # Must be called "BOOT" to be called by BOOT.BIN on start-up
else:
oplDummySource = name8 + ':' + NL
oplDummySource += 'PRINT"Hello from HAUL"' + NL
oplDummySource += 'BEEP 250,440' + NL
oplDummySource += 'PAUSE 40' + NL
self.touch(opl_filename_full, oplDummySource)
"""
# Check if translation worked
if not os.path.isfile(opl_filename_full):
raise HAULBuildError('Main OPL file "%s" was not created!'.format(opl_filename_full))
return False
self.copy(opl_filename_full, opl_filename_full + '.bak') # Backup main file for testing (source file gets deleted somehow...)
put('Preparing VM automation...')
disk_sys = os.path.join(vm_path, 'sys_msdos622.disk')
disk_compiler = os.path.join(vm_path, 'app_devkit.disk')
disk_empty = os.path.join(vm_path, 'empty.disk')
disk_temp = os.path.abspath(os.path.join(self.staging_path, 'tmp.disk'))
# Create/clear temp scratch disk
self.copy(disk_empty, disk_temp)
build_log_file = os.path.abspath(os.path.join(self.staging_path, 'build.log'))
#self.touch(build_log_file, '# Build log')
self.rm_if_exists(build_log_file)
self.rm_if_exists(os.path.abspath(os.path.join(self.staging_path, opk_filename)))
DOS_SYS_DIR = 'C:'
DOS_COMPILER_DRIVE = 'D'
DOS_COMPILER_DIR = DOS_COMPILER_DRIVE + ':'
DOS_STAGING_DIR = 'F:'
DOS_TEMP_DRIVE = 'E'
DOS_TEMP_DIR = DOS_TEMP_DRIVE + ':'
DOS_LOG_FILE = DOS_TEMP_DIR + '\\build.log' #DOS_LOG_FILE = DOS_STAGING_DIR + '\\build.log'
DEVKIT_PATH = DOS_COMPILER_DIR + '\\DEVKIT'
CRLF = '\r\n'
# Startup prolog...
autoexec = 'ECHO.' + CRLF
autoexec += 'SMARTDRV /C /X' + CRLF
autoexec = 'CLS' + CRLF
autoexec += 'SET TEMP=E:' + CRLF
autoexec += 'ECHO haulBuilder_psion' + CRLF
autoexec += 'ECHO.' + CRLF
# Compile...
autoexec += ':COMPILE' + CRLF
autoexec += 'ECHO ----------------------------------------' + CRLF
#autoexec += 'ECHO Staging dir:' + CRLF
#autoexec += 'DIR ' + DOS_STAGING_DIR + ' /B' + CRLF
autoexec += 'ECHO Staging...' + CRLF
#autoexec += 'COPY ' + DOS_STAGING_DIR + '\*.opl ' + DOS_TEMP_DIR + CRLF
DOS_IN_FILE = DOS_TEMP_DIR + '\\' + opl_filename
DOS_OUT_FILE = DOS_TEMP_DIR + '\\' + opk_filename
autoexec += 'ECHO Build log >' + DOS_LOG_FILE + CRLF
autoexec += DOS_COMPILER_DRIVE + ':' + CRLF
autoexec += 'CD ' + DEVKIT_PATH + CRLF
autoexec += DOS_TEMP_DRIVE + ':' + CRLF
autoexec += 'CD ' + DOS_TEMP_DIR + CRLF
### List all source files
#oplFiles = []
source_list_filename = DOS_TEMP_DIR + '\\' + name8 + '.lst'
"""
autoexec += 'COPY ' + DOS_STAGING_DIR + '\\' + opl_filename + ' ' + DOS_TEMP_DIR + CRLF
#oplFiles.append(DOS_TEMP_DIR + '\\' + opl_filename)
autoexec += 'ECHO ' + DOS_TEMP_DIR + '\\' + opl_filename + '>' + source_list_filename + CRLF
for l in libs:
autoexec += 'COPY ' + DOS_STAGING_DIR + '\\' + l + '.OPL ' + DOS_TEMP_DIR + CRLF
#oplFiles.append(DOS_TEMP_DIR + '\\' + l + '.OPL')
autoexec += 'ECHO ' + DOS_TEMP_DIR + '\\' + l + '.OPL>>' + source_list_filename + CRLF
"""
autoexec += 'COPY NUL ' + source_list_filename + CRLF
for s in self.project.sources:
n = self.name_to_8(s.name).upper() + '.OPL'
autoexec += 'COPY ' + DOS_STAGING_DIR + '\\' + n + ' ' + DOS_TEMP_DIR + CRLF
#oplFiles.append(DOS_TEMP_DIR + '\\' + n)
autoexec += 'ECHO ' + DOS_TEMP_DIR + '\\' + n + '>>' + source_list_filename + CRLF
for s in self.project.libs:
n = self.name_to_8(s.name).upper() + '.OPL'
autoexec += 'COPY ' + DOS_STAGING_DIR + '\\' + n + ' ' + DOS_TEMP_DIR + CRLF
#oplFiles.append(DOS_TEMP_DIR + '\\' + n)
autoexec += 'ECHO ' + DOS_TEMP_DIR + '\\' + n + '>>' + source_list_filename + CRLF
for n in func_libs:
autoexec += 'COPY ' + DOS_STAGING_DIR + '\\' + n + ' ' + DOS_TEMP_DIR + CRLF
#oplFiles.append(DOS_TEMP_DIR + '\\' + n)
autoexec += 'ECHO ' + DOS_TEMP_DIR + '\\' + n + '>>' + source_list_filename + CRLF
if bootable:
# Add BOOT procedure that calls the main file
autoexec += 'ECHO BOOT:>' + DOS_TEMP_DIR + '\\BOOT.OPL' + CRLF
autoexec += 'ECHO ' + name8 + ':>>' + DOS_TEMP_DIR + '\\BOOT.OPL' + CRLF
autoexec += 'ECHO GET>>' + DOS_TEMP_DIR + '\\BOOT.OPL' + CRLF
#oplFiles.append(DOS_TEMP_DIR + '\\BOOT.OPL')
autoexec += 'ECHO ' + DOS_TEMP_DIR + '\\BOOT.OPL>>' + source_list_filename + CRLF
#autoexec += 'ECHO ---------- All sources ----------' + CRLF
#autoexec += 'TYPE ' + source_list_filename + CRLF
#autoexec += 'ECHO --------------------' + CRLF
### Compile
OPLTRAN_CMD = DEVKIT_PATH + '\\OPLTRAN @' + source_list_filename
OPLTRAN_CMD += ' -t' # Include source and object
if lcd_lines == 2:
# Two-line LCD driver
OPLTRAN_CMD += ' -x'
autoexec += 'ECHO Executing "' + OPLTRAN_CMD + '"...' + CRLF
autoexec += 'ECHO ' + OPLTRAN_CMD + ' >>' + DOS_LOG_FILE + CRLF
autoexec += OPLTRAN_CMD + ' >>' + DOS_LOG_FILE + CRLF
autoexec += 'TYPE ' + DOS_LOG_FILE + CRLF
#@TODO: Check for compilation errors
autoexec += 'IF ERRORLEVEL 1 GOTO ERROR' + CRLF
### Create pack list (.bld file that tells which records to put in the OPK file)
bld_name = name8
bld_filename = bld_name+'.bld'
# Header line
pakSize = 16 # in kB, either 8 or 16
#l = '%s %d NOCOPY NOWRITE' % (bld_name, pakSize)
l = '%s %d' % (bld_name, pakSize)
autoexec += 'ECHO ' + l + '>' + bld_filename + CRLF
if bootable:
# Add boot binary to pak
autoexec += 'COPY ' + DEVKIT_PATH + '\\BOOT.BIN' + ' ' + DOS_TEMP_DIR + CRLF
l = 'BOOT BIN'
#l += ' !Boot file'
autoexec += 'ECHO ' + l + '>>' + bld_filename + CRLF
# Lib files preceed the main file
for s in self.project.libs:
l = self.name_to_8(s.name).upper()
#while len(l) < 8: l += ' '
l += ' OB3'
#l = l + ' OB3 ' + l
#@FIXME: They must be renamed according to their returnType, e.g. FOO%
#l += ' ' + func_name8 + typeIndicator
#l += ' ' + l.upper() + ' !Function/Lib'
#l += ' !Function/Lib'
autoexec += 'ECHO ' + l + '>>' + bld_filename + CRLF
# main OB3 file
l = name8
#while len(l) < 8: l += ' '
l += ' OB3'
#l += ' !Main module'
#if bootable: l += ' BOOT !Boot file' # Rename it "BOOT" to be called by BOOT.BIN
autoexec += 'ECHO ' + l + '>>' + bld_filename + CRLF
if bootable:
l = 'BOOT OB3'
#l += ' !Boot file'
#l = 'BOOT OPL !Boot file' #@FIXME: This lets BLDPACK crash/hang
autoexec += 'ECHO ' + l + '>>' + bld_filename + CRLF
# Empty line
#autoexec += 'ECHO>>' + bld_filename + CRLF
### Build pack
if bootable:
# Using BLDPACK
BLD_CMD = DEVKIT_PATH + '\\BLDPACK @' + bld_name + ' -map'
else:
# Using MAKEPACK (does not support BIN files, but is good)
BLD_CMD = DEVKIT_PATH + '\\MAKEPACK ' + bld_filename
autoexec += 'ECHO Executing "' + BLD_CMD + '"...' + CRLF
autoexec += 'ECHO ' + BLD_CMD + ' >>' + DOS_LOG_FILE + CRLF
autoexec += BLD_CMD + ' >>' + DOS_LOG_FILE + CRLF
autoexec += 'TYPE ' + DOS_LOG_FILE + CRLF
autoexec += 'ECHO ----------------------------------------' + CRLF
autoexec += 'ECHO ---------------------------------------- >>' + DOS_LOG_FILE + CRLF
autoexec += 'ECHO Publishing...' + CRLF
#autoexec += 'SMARTDRV /C /X' + CRLF
#autoexec += 'COPY /B ' + DOS_OUT_FILE + ' ' + DOS_STAGING_DIR + CRLF
#autoexec += 'COPY /B ' + DOS_LOG_FILE + ' ' + DOS_STAGING_DIR + CRLF
autoexec += 'COPY /B ' + DOS_TEMP_DIR + '\*.* ' + DOS_STAGING_DIR + CRLF
autoexec += 'ECHO ----------------------------------------' + CRLF
if (self.project.run_test == True):
# Test result
autoexec += 'CLS' + | |
= rest_urls['bgp_advertise_config'].format(vrf_name)
if not delete_rest(dut, rest_url=url):
st.banner('FAIL-OCYANG: BGP EVPN advertise-ipv6 in vrf:{} delete Failed'.format(vrf_name))
return False
else:
my_cmd += "address-family l2vpn {}\n".format(addr_family_modifier)
my_cmd += '{} advertise ipv6 {}\n'.format(config_cmd,advertise_ipv6)
if cli_type == 'klish':
my_cmd += "exit\n"
elif type1 == 'default_originate_ipv4_vrf':
# convert to REST as and when used
my_cmd = 'router bgp {} vrf {}\n'.format(kwargs['local_as'],vrf_name)
my_cmd += 'address-family l2vpn evpn\n'
my_cmd += '{} default-originate ipv4\n'.format(config_cmd)
if cli_type == 'klish':
my_cmd += "exit\n"
elif type1 == 'default_originate_ipv6_vrf':
# convert to REST as and when used
my_cmd = 'router bgp {} vrf {}\n'.format(kwargs['local_as'],vrf_name)
my_cmd += 'address-family l2vpn evpn\n'
my_cmd += '{} default-originate ipv6\n'.format(config_cmd)
if cli_type == 'klish':
my_cmd += "exit\n"
elif type1 == 'advertise_ipv4_vrf':
if cli_type in ["rest-put", "rest-patch"]:
if config.lower() == 'yes':
url = rest_urls['bgp_advertise_config'].format(vrf_name)
payload = {'openconfig-bgp-evpn-ext:advertise-list':['IPV4_UNICAST']}
if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload):
st.banner('FAIL-OCYANG: BGP EVPN advertise-ipv4 in vrf:{} config Failed'.format(vrf_name))
return False
elif config.lower() == 'no':
url = rest_urls['bgp_advertise_config'].format(vrf_name)
if not delete_rest(dut, rest_url=url):
st.banner('FAIL-OCYANG: BGP EVPN advertise-ipv4 in vrf:{} delete Failed'.format(vrf_name))
return False
else:
my_cmd = 'router bgp {} vrf {}\n'.format(kwargs['local_as'],vrf_name)
my_cmd += 'address-family l2vpn evpn\n'
my_cmd += '{} advertise ipv4 {}\n'.format(config_cmd,advertise_ipv4)
if cli_type == 'klish':
my_cmd += "exit\n"
elif type1 == 'advertise_ipv6_vrf':
if cli_type in ["rest-put", "rest-patch"]:
if config.lower() == 'yes':
url = rest_urls['bgp_advertise_config'].format(vrf_name)
payload = {'openconfig-bgp-evpn-ext:advertise-list': ['IPV6_UNICAST']}
if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload):
st.banner('FAIL-OCYANG: BGP EVPN advertise-ipv6 in vrf:{} config Failed'.format(vrf_name))
return False
elif config.lower() == 'no':
url = rest_urls['bgp_advertise_config'].format(vrf_name)
if not delete_rest(dut, rest_url=url):
st.banner('FAIL-OCYANG: BGP EVPN advertise-ipv6 in vrf:{} delete Failed'.format(vrf_name))
return False
else:
my_cmd = 'router bgp {} vrf {}\n'.format(kwargs['local_as'],vrf_name)
my_cmd += 'address-family l2vpn evpn\n'
my_cmd += '{} advertise ipv6 {}\n'.format(config_cmd,advertise_ipv6)
if cli_type == 'klish':
my_cmd += "exit\n"
elif type1 == 'vrf_rd_rt':
if cli_type in ["rest-put", "rest-patch"]:
if 'l3_rd' in kwargs:
url = rest_urls['bgp_route_distinguisher'].format(vrf_name)
if config.lower() == 'yes':
payload = {'openconfig-bgp-evpn-ext:route-distinguisher': kwargs['l3_rd']}
if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload):
st.banner('FAIL-OCYANG: BGP EVPN route-distinguisher config Failed')
return False
elif config.lower() == 'no':
if not delete_rest(dut, rest_url=url):
st.banner('FAIL-OCYANG: BGP EVPN route-distinguisher delete Failed')
return False
if 'l3_both_rt' in kwargs:
url_i = rest_urls['bgp_import_rt'].format(vrf_name)
url_e = rest_urls['bgp_export_rt'].format(vrf_name)
if config.lower() == 'yes':
payload = {'openconfig-bgp-evpn-ext:import-rts': [kwargs['l3_both_rt']]}
if not config_rest(dut, http_method=cli_type, rest_url=url_i, json_data=payload):
st.banner('FAIL-OCYANG: BGP EVPN import rt config Failed')
return False
payload = {'openconfig-bgp-evpn-ext:export-rts': [kwargs['l3_both_rt']]}
if not config_rest(dut, http_method=cli_type, rest_url=url_e, json_data=payload):
st.banner('FAIL-OCYANG: BGP EVPN export rt config Failed')
return False
elif config.lower() == 'no':
if not delete_rest(dut, rest_url=url_i):
st.banner('FAIL-OCYANG: BGP EVPN import rt delete Failed')
return False
if not delete_rest(dut, rest_url=url_e):
st.banner('FAIL-OCYANG: BGP EVPN export rt delete Failed')
return False
if 'l3_import_rt' in kwargs:
url = rest_urls['bgp_import_rt'].format(vrf_name)
if config.lower() == 'yes':
payload = {'openconfig-bgp-evpn-ext:import-rts': [kwargs['l3_import_rt']]}
if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload):
st.banner('FAIL-OCYANG: BGP EVPN import rt config Failed')
return False
elif config.lower() == 'no':
if not delete_rest(dut, rest_url=url):
st.banner('FAIL-OCYANG: BGP EVPN import rt delete Failed')
return False
if 'l3_export_rt' in kwargs:
url = rest_urls['bgp_export_rt'].format(vrf_name)
if config.lower() == 'yes':
payload = {'openconfig-bgp-evpn-ext:export-rts': [kwargs['l3_export_rt']]}
if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload):
st.banner('FAIL-OCYANG: BGP EVPN export rt config Failed')
return False
elif config.lower() == 'no':
if not delete_rest(dut, rest_url=url):
st.banner('FAIL-OCYANG: BGP EVPN export rt delete Failed')
return False
else:
my_cmd = 'router bgp {} vrf {}\n'.format(kwargs['local_as'],vrf_name)
my_cmd += 'address-family l2vpn evpn\n'
if 'l3_rd' in kwargs:
my_cmd += '{} rd {}\n'.format(config_cmd,kwargs['l3_rd'])
if 'l3_both_rt' in kwargs:
my_cmd += '{} route-target both {}\n'.format(config_cmd,kwargs['l3_both_rt'])
if 'l3_import_rt' in kwargs:
my_cmd += '{} route-target import {}\n'.format(config_cmd,kwargs['l3_import_rt'])
if 'l3_export_rt' in kwargs:
my_cmd += '{} route-target export {}\n'.format(config_cmd,kwargs['l3_export_rt'])
if cli_type == 'klish':
my_cmd += "exit\n"
elif type1 == 'advertise_all_vni':
if cli_type in ["rest-put", "rest-patch"]:
if config.lower() == 'yes':
url = rest_urls['bgp_advertise_all_vni'].format(vrf_name)
payload = { 'openconfig-bgp-evpn-ext:advertise-all-vni': True}
if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload):
st.banner('FAIL-OCYANG: BGP EVPN advertise-all-vni config Failed')
return False
elif config.lower() == 'no':
url = rest_urls['bgp_advertise_all_vni'].format(vrf_name)
if not delete_rest(dut, rest_url=url):
st.banner('FAIL-OCYANG: BGP EVPN advertise-all-vni delete Failed')
return False
else:
my_cmd += "address-family l2vpn {}\n".format(addr_family_modifier)
my_cmd += '{} advertise-all-vni\n'.format(config_cmd)
if cli_type == 'klish':
my_cmd += "exit\n"
elif type1 == 'advertise_default_gw':
if cli_type in ["rest-put", "rest-patch"]:
if config.lower() == 'yes':
url = rest_urls['bgp_advertise_default_gw'].format(vrf_name)
payload = {'openconfig-bgp-evpn-ext:advertise-default-gw': True}
if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload):
st.banner('FAIL-OCYANG: BGP EVPN advertise-default-gw config Failed')
return False
elif config.lower() == 'no':
url = rest_urls['bgp_advertise_default_gw'].format(vrf_name)
if not delete_rest(dut, rest_url=url):
st.banner('FAIL-OCYANG: BGP EVPN advertise-default-gw delete Failed')
return False
else:
my_cmd += "address-family l2vpn {}\n".format(addr_family_modifier)
my_cmd += '{} advertise-default-gw\n'.format(config_cmd)
if cli_type == 'klish':
my_cmd += "exit\n"
elif type1 == 'autort':
# convert to REST as and when used
my_cmd += "address-family l2vpn {}\n".format(addr_family_modifier)
my_cmd += '{} autort rfc8365-compatible\n'.format(config_cmd)
if cli_type == 'klish':
my_cmd += "exit\n"
elif type1 == 'default_originate_ipv4':
# convert to REST as and when used
my_cmd += "address-family l2vpn {}\n".format(addr_family_modifier)
my_cmd += '{} default-originate ipv4\n'.format(config_cmd)
if cli_type == 'klish':
my_cmd += "exit\n"
elif type1 == 'default_originate_ipv6':
# convert to REST as and when used
my_cmd += "address-family l2vpn {}\n".format(addr_family_modifier)
my_cmd += '{} default-originate ipv6\n'.format(config_cmd)
if cli_type == 'klish':
my_cmd += "exit\n"
elif type1 == 'dup_addr_detection':
# convert to REST as and when used
my_cmd += "address-family l2vpn {}\n".format(addr_family_modifier)
my_cmd += '{} dup-addr-detection {}\n'.format(config_cmd,dup_addr_detection)
if cli_type == 'klish':
my_cmd += "exit\n"
elif type1 == 'flooding_disable':
# convert to REST as and when used
my_cmd += "address-family l2vpn {}\n".format(addr_family_modifier)
my_cmd += '{} flooding disable\n'.format(config_cmd)
if cli_type == 'klish':
my_cmd += "exit\n"
elif type1 == 'flooding_head_end_replication':
# convert to REST as and when used
my_cmd += "address-family l2vpn {}\n".format(addr_family_modifier)
my_cmd += '{} flooding head-end-replication\n'.format(config_cmd)
if cli_type == 'klish':
my_cmd += "exit\n"
elif type1 == 'network' and config_cmd == '':
# convert to REST as and when used
if cli_type not in ['klish']:
my_cmd += 'network {} rd {} ethtag {} label {} esi {} gwip {} routermac {}\n'.format(network,rd,ethtag,bgp_label,esi_id,gw_ip,router_mac)
else:
st.error("Support not added to config - 'network'")
elif type1 == 'network' and config_cmd == 'no':
# convert to REST as and when used
if cli_type not in ['klish']:
my_cmd += '{} network {} rd {} ethtag {} label {} esi {} gwip {}\n'.format(config_cmd,network,rd,ethtag,bgp_label,esi_id,gw_ip)
else:
st.error("Support not added to config - 'network'")
elif type1 == 'route_target':
# convert to REST as and when used
if 'both_rt' in kwargs:
my_cmd += '{} route-target both {}\n'.format(config_cmd,kwargs['both_rt'])
if 'import_rt' in kwargs:
my_cmd += '{} route-target import {}\n'.format(config_cmd,kwargs['import_rt'])
if 'export_rt' in kwargs:
my_cmd += '{} route-target export {}\n'.format(config_cmd,kwargs['export_rt'])
if cli_type == 'klish':
my_cmd += "exit\n"
elif type1 == 'vni':
if cli_type in ["rest-put", "rest-patch"]:
if config.lower() == 'yes':
url_vni = rest_urls['bgp_vni_config'].format(vrf_name)
payload = {'openconfig-bgp-evpn-ext:vni': [{
'vni-number': int(kwargs['vni']) ,
'config':{
'vni-number': int(kwargs['vni']) ,
'advertise-default-gw': True
}
}]
}
if not config_rest(dut, http_method=cli_type, rest_url=url_vni, json_data=payload):
st.banner('FAIL-OCYANG: BGP EVPN vni config Failed')
return False
if vni_unconfig == 'no':
url_vni = rest_urls['bgp_vni_unconfig'].format(vrf_name,kwargs['vni'])
if not delete_rest(dut, rest_url=url_vni):
st.banner('FAIL-OCYANG: BGP EVPN vni delete Failed')
return False
if 'vni_rd' in kwargs and vni_unconfig == '':
url = rest_urls['bgp_vni_route_distinguisher'].format(vrf_name,kwargs['vni'])
if config.lower() == 'yes':
payload = {'openconfig-bgp-evpn-ext:route-distinguisher': kwargs['vni_rd']}
if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload):
st.banner('FAIL-OCYANG: BGP EVPN vni route-distinguisher config Failed')
return False
elif config.lower() == 'no':
if not delete_rest(dut, rest_url=url):
st.banner('FAIL-OCYANG: BGP EVPN vni route-distinguisher delete Failed')
return False
if 'vni_both_rt' in kwargs and vni_unconfig == '':
url_i = rest_urls['bgp_vni_import_rt'].format(vrf_name,kwargs['vni'])
url_e = rest_urls['bgp_vni_export_rt'].format(vrf_name,kwargs['vni'])
if config.lower() == 'yes':
payload = {'openconfig-bgp-evpn-ext:import-rts': [kwargs['vni_both_rt']]}
if not config_rest(dut, http_method=cli_type, rest_url=url_i, json_data=payload):
st.banner('FAIL-OCYANG: BGP EVPN vni import rt config Failed')
return False
payload = {'openconfig-bgp-evpn-ext:export-rts': [kwargs['vni_both_rt']]}
if not config_rest(dut, http_method=cli_type, rest_url=url_e, json_data=payload):
st.banner('FAIL-OCYANG: BGP EVPN vni export rt config Failed')
return False
elif config.lower() == 'no':
if not delete_rest(dut, rest_url=url_i):
st.banner('FAIL-OCYANG: BGP EVPN vni import rt delete Failed')
return False
if not delete_rest(dut, rest_url=url_e):
st.banner('FAIL-OCYANG: BGP EVPN vni export rt delete Failed')
return False
if 'vni_import_rt' in kwargs and vni_unconfig == '':
url_i = rest_urls['bgp_vni_import_rt'].format(vrf_name, kwargs['vni'])
if config.lower() == 'yes':
payload = {'openconfig-bgp-evpn-ext:import-rts': [kwargs['vni_import_rt']]}
if not config_rest(dut, http_method=cli_type, rest_url=url_i, json_data=payload):
st.banner('FAIL-OCYANG: BGP EVPN vni import rt config Failed')
return False
elif config.lower() == 'no':
if not delete_rest(dut, rest_url=url_i):
st.banner('FAIL-OCYANG: BGP EVPN vni import rt delete Failed')
return False
if 'vni_export_rt' in kwargs and vni_unconfig | |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 26 07:55:52 2019
@author: buriona
"""
import sys
import logging
import json
import pandas as pd
import numpy as np
from pathlib import Path
import plotly
from os import makedirs, path
from calendar import monthrange
from datetime import datetime as dt
from dateutil.relativedelta import relativedelta
from logging.handlers import TimedRotatingFileHandler
from crmms_charts import get_comp_fig
from crmms_nav import create_nav
from crmms_maps import create_map
from crmms_subplots import make_all_subplots
from crmms_help import create_help_page
from crmms_utils import get_crmms_hdb_site_map, get_crmms_hdb_datatype_map
from crmms_utils import get_favicon, get_plotly_js, get_hdb_alias_map
from crmms_utils import serial_to_trace, is_url_safe
from crmms_utils import res_display_names, print_and_log
from hdb_api.hdb_utils import get_eng_config
from hdb_api.hdb_api import Hdb, HdbTables, HdbTimeSeries
def get_use_datetypes():
datatypes = [
"Outflow",
"Inflow",
"Pool Elevation",
"Storage",
"Outflow_cfs",
"Inflow_cfs",
]
return [x.lower() for x in datatypes]
def get_plot_config(img_filename):
return {
"modeBarButtonsToRemove": ["sendDataToCloud", "lasso2d", "select2d"],
"showAxisDragHandles": True,
"showAxisRangeEntryBoxes": True,
"displaylogo": False,
"toImageButtonOptions": {
"filename": img_filename,
"width": 1200,
"height": 700,
},
}
def create_log(path="crmms_viz_gen.log"):
logger = logging.getLogger("crmms_viz_gen rotating log")
logger.setLevel(logging.INFO)
handler = TimedRotatingFileHandler(path, when="D", interval=28, backupCount=1)
logger.addHandler(handler)
return logger
def make_comp_chart(
df_slot,
df_obs,
site_meta,
chart_filename,
img_filename,
date_str,
watermark=False,
no_esp=True,
logger=None,
plotly_js=None,
):
if not plotly_js:
plotly_js = get_plotly_js()
try:
site_name = site_meta["site_metadata.site_name"].upper()
common_name = "datatype_metadata.datatype_common_name"
datatype_name = f"{site_meta[common_name].upper().replace('VOLUME', '')}"
fig = get_comp_fig(
df_slot=df_slot,
df_obs=df_obs,
site_name=site_name,
datatype_name=datatype_name,
units=units,
date_str=date_str,
watermark=watermark,
no_esp=no_esp,
)
plotly.offline.plot(
fig,
include_plotlyjs=plotly_js,
config=get_plot_config(img_filename),
filename=chart_filename,
auto_open=False,
)
flavicon = f'<link rel="shortcut icon" ' f'href="{get_favicon()}"></head>'
with open(chart_filename, "r") as html_file:
chart_file_str = html_file.read()
with open(chart_filename, "w") as html_file:
html_file.write(chart_file_str.replace(r"</head>", flavicon))
return fig
except Exception as err:
err_str = (
f" Error creating chart - "
f'{chart_filename.split("flat_files")[-1]} - {err}'
)
print_and_log(err_str, logger)
def get_meta(sids, dids):
tbls = HdbTables
hdb_alias_list = ["uc"]
dfs_meta = {}
sids[:] = [i for i in sids if i]
dids[:] = [i for i in dids if i]
for hdb_alias in hdb_alias_list:
hdb_config = get_eng_config(db=hdb_alias)
hdb = Hdb(hdb_config)
df_temp = tbls.sitedatatypes(hdb, sid_list=sids, did_list=dids)
dfs_meta[hdb_alias] = df_temp
return dfs_meta["uc"]
def get_model_data(sdi, hdb_alias, model_id, datatype_str, t1, t2):
ts = HdbTimeSeries
hdb_config = get_eng_config(db=hdb_alias)
hdb = Hdb(hdb_config)
df = ts.series(
hdb, sdi=sdi, mrid=model_id, label_overide=datatype_str, t1=t1, t2=t2
)
return df
def get_real_data(sdi, hdb_alias, datatype_str, t1, t2):
ts = HdbTimeSeries
hdb_config = get_eng_config(db=hdb_alias)
hdb = Hdb(hdb_config)
df = ts.series(hdb, sdi=sdi, label_overide=datatype_str, t1=t1, t2=t2)
return df
def make_csv(df, csv_filename, logger=None):
try:
df.to_csv(csv_filename, index=True)
except Exception as err:
csv_err = f"Error saving {csv_filename} - {err}"
print_and_log(csv_err, logger)
def make_json(df, json_filename, logger):
try:
df.to_json(json_filename, orient="split", index=True)
except Exception as err:
json_err = f"Error saving {json_filename} - {err}"
print_and_log(json_err, logger)
def make_nav(year_str, data_dir, logger=None):
try:
nav_str = create_nav(year_str, data_dir)
print_and_log(nav_str, logger)
except Exception as err:
nav_err = f"Error creating ff_nav.html file for {data_dir} - {err}"
print_and_log(nav_err, logger)
def make_sitemap(date_str, site_type, df_meta, data_dir, logger=None):
try:
map_str = create_map(date_str, site_type, df_meta, data_dir)
print_and_log(map_str, logger)
except Exception as err:
map_err = f"Error creating leaflet map file for {site_type} - {err}"
print_and_log(map_err, logger)
def map_traces(trace):
trace_int = int(trace)
trace_dict = {1: "ESP Max", 2: "ESP Min", 3: "ESP Most"}
return trace_dict.get(trace_int, str(trace_int - 4 + 1991))
def af_to_cfs(df, date_col="datetime", val_col="value"):
days = df[date_col].apply(lambda x: monthrange(x.year, x.month)[1])
conv_factor = days * 1.983
return df[val_col] / conv_factor
def convert_to_start_of_month(x):
x = x + np.timedelta64(1, "m")
month = x.month
year = x.year
if month == 1:
return dt(year - 1, 12, 1)
return dt(year, month - 1, 1)
def parse_model_ids(model_arg):
model_ids = model_arg.split(",")
try:
if len(model_ids) == 1:
models = {"max": None, "most": model_ids[0], "min": None}
elif len(model_ids) == 3:
models = {"max": model_ids[0], "most": model_ids[1], "min": model_ids[2]}
else:
raise Exception
except Exception as err:
print(
f"--models {args.models} is improperly formatted, must be a "
f"single integer for a MOST only run or list of 3 integers "
f"seperated by commas for MAX, MOST, MIN runs. - {err}"
)
sys.exit(1)
return models
def get_config(config_name, config_path=None):
if not path.isfile(config_path):
print(f'"{config_path}" is not a valid filepath, try again.')
sys.exit(1)
with Path(config_path).open("r") as pub_config:
all_configs = json.load(pub_config)
config = all_configs.get(config_name, None)
if config:
pub_name = config.get("name", "UNKNOWN_PUB_DATE")
models = config.get("mrids", {"max": None, "most": None, "min": None})
data_filename = config.get("data", DEFAULT_DATA_PATH)
else:
print(f'"{args.config}" not found in {config_path}, try again.')
sys.exit(1)
return {"name": pub_name, "mrids": models, "data": data_filename}
def set_ouput_path(output_path):
if not path.exists(args.output):
print(
f"--output {args.output} does not exist, "
f"can not save files there, try again."
)
sys.exit(1)
return Path(args.output).resolve().as_posix()
def check_suite_name(suite_name):
if is_url_safe(suite_name) is not True:
print(
f"The --name {args.name} contains the unsafe url character - , "
f"{is_url_safe(args.name)}. Please try a different folder name."
)
sys.exit(1)
return suite_name
def get_date_str(suite_name):
try:
date_arr = [int(i) for i in suite_name.split("_")]
date_str = f"{dt(date_arr[1], date_arr[0], 1):%B %Y}"
except Exception:
date_str = f"{suite_name}"
return date_str
def get_year_str(suite_name):
try:
date_arr = [int(i) for i in suite_name.split("_")]
year_str = f"{date_arr[1]}"
except Exception:
year_str = f"{suite_name}"
return year_str
def get_csv_path(csv_path):
data_path = args.data
if not path.exists(data_path) or not data_path.endswith(".csv"):
data_path = path.join(this_dir, "data", data_path)
if not path.exists(data_path) or not data_path.endswith(".csv"):
print(
f"--data {data_path} does not exist, "
f"please verify location on output .csv, try again."
)
sys.exit(1)
return Path(data_path).resolve()
def get_args():
cli_desc = "Creates visualization suite for CRMMS results"
parser = argparse.ArgumentParser(description=cli_desc)
parser.add_argument(
"-V", "--version", help="show program version", action="store_true"
)
parser.add_argument(
"-P",
"--provisional",
help="watermarks charts with provisional disclaimer",
action="store_true",
)
parser.add_argument(
"-o", "--output", help="override default output folder", default="local"
)
parser.add_argument(
"--config_path",
help=f"path to crmms_viz.config, used to overide deafault local one ({DEFAULT_CONFIG_PATH})",
default=DEFAULT_CONFIG_PATH,
)
parser.add_argument(
"-c",
"--config",
help="key to be used in crmms_viz.config, overrides --name, --models, and --data",
)
parser.add_argument(
"-n",
"--name",
help="override the default current date based folder name should take form m_YYYY, i.e. 7_2020",
)
parser.add_argument(
"-m",
"--models",
help="override models.config, use form: MAX, MOST, MIN. If only one provided it is assumed to be most.",
)
parser.add_argument(
"-d", "--data", help=f"override default data path ({DEFAULT_DATA_PATH})"
)
parser.add_argument(
"--no_esp",
help="use/show ensemble results in creation of suite",
action="store_true",
)
return parser.parse_args()
if __name__ == "__main__":
import argparse
this_dir = Path().absolute()
DEFAULT_DATA_PATH = Path(this_dir, "data", "ESPcsvOutput.csv")
DEFAULT_CONFIG_PATH = Path(this_dir, "configs", "crmms_viz.config")
args = get_args()
if args.version:
print("crmms_viz_gen.py v1.0")
watermark = False
if args.provisional:
watermark = True
if args.config:
config_dict = get_config(args.config, args.config_path)
args.name = config_dict["name"]
args.models = config_dict["mrids"]
args.data = config_dict["data"]
if args.data:
data_path = get_csv_path(args.data)
else:
data_path = Path(DEFAULT_DATA_PATH).resolve()
if args.models:
if isinstance(args.models, dict):
models = args.models
else:
models = parse_model_ids(args.models)
else:
model_config_path = Path("models.config").resolve()
with model_config_path.open("r") as mrid_config:
models = json.load(mrid_config)
now_utc = dt.utcnow()
if not args.output == "local":
crmms_viz_dir = set_ouput_path(args.output)
print(crmms_viz_dir)
else:
crmms_viz_dir = Path(this_dir, "crmms_viz").as_posix()
makedirs(crmms_viz_dir, exist_ok=True)
if args.name:
curr_month_str = check_suite_name(args.name)
date_str = get_date_str(curr_month_str)
year_str = get_year_str(curr_month_str)
else:
curr_month_str = f"{now_utc.month}_{now_utc.year}"
date_str = f"{now_utc:%b %Y} CRMMS Modeling Results"
year_str = f"{now_utc:%Y} CRMMS Modeling Results"
curr_month_dir = Path(crmms_viz_dir, curr_month_str).as_posix()
makedirs(curr_month_dir, exist_ok=True)
meta_filepath = Path(curr_month_dir, "meta.csv").as_posix()
log_dir = Path(this_dir, "logs")
makedirs(log_dir, exist_ok=True)
logger = create_log(Path(log_dir, "crmms_viz_gen.log"))
col_names = ["run", "trace", "obj.slot", "datetime", "value", "unit"]
dtypes = {
"Run Number": np.int64,
"Trace Number": np.int64,
"Object.Slot": str,
"Timestep": str,
"Slot Value": float,
"Unit": str,
}
# this is crmms-esp csv data
df = pd.read_csv(
data_path, dtype=dtypes, parse_dates=["Timestep"], infer_datetime_format=True
)
header_row = list(dtypes.keys())
rename_map = {header_row[i]: v for i, v in enumerate(col_names)}
obj_slot = df["Object.Slot"].str.split(".", 1, expand=True)
df = df.rename(columns=rename_map)
df = df.drop(["run"], axis="columns")
df["obj"] = obj_slot[0]
df["slot"] = obj_slot[1]
sids_map = get_crmms_hdb_site_map()
dids_map = get_crmms_hdb_datatype_map()
sids = list(set([sids_map[x] for x in df["obj"].unique() if x in sids_map.keys()]))
dids = list(set([dids_map[x] for x in df["slot"].unique() if x in dids_map.keys()]))
df["trace"] = df["trace"].apply(lambda x: map_traces(x))
df_meta = get_meta(sids, dids)
make_csv(df_meta, meta_filepath, logger)
use_datatypes = get_use_datetypes()
hdb_alias_map = get_hdb_alias_map()
msg = f"Creating CRMMS charts here: {curr_month_dir}"
print_and_log(msg, logger)
figs = []
for slot in list(df["obj.slot"].unique()):
site_name, datatype_name = slot.split(".")
units = df[df["obj.slot"] == slot]["unit"].iloc[0]
if datatype_name.lower() not in use_datatypes:
continue
if site_name.lower() not in [x.lower() for x in hdb_alias_map.keys()]:
continue
sid = str(sids_map[site_name])
did = str(dids_map[datatype_name])
sdi = None
sdi_series = None
if sid and did:
sdi_series = df_meta[
(df_meta["datatype_id"] == did) & (df_meta["site_id"] == sid)
]["site_datatype_id"]
if not sdi_series.empty:
sdi = str(sdi_series.iloc[0])
chart_dir = Path(curr_month_dir, f"{sid}", "charts")
makedirs(chart_dir, exist_ok=True)
csv_dir = Path(curr_month_dir, f"{sid}", "csv")
makedirs(csv_dir, exist_ok=True)
json_dir = Path(curr_month_dir, f"{sid}", "json")
makedirs(json_dir, exist_ok=True)
df_slot = df[df["obj.slot"] == slot][["value", "datetime", "trace"]]
df_slot["datetime"] = df_slot["datetime"].apply(
lambda x: convert_to_start_of_month(x)
)
if "1000" in units:
df_slot["value"] = df_slot["value"] * 1000
units = units.replace("1000 ", "")
datatype_lower = datatype_name.lower()
img_filename = | |
#!/usr/bin/env python3
# coding: utf-8
################################################################################
# UDPで受信したIoTセンサ機器の値を棒グラフで表示します。
# (Webブラウザで http://127.0.0.1:8080 にアクセスするとグラフ表示されます)
#
# Copyright (c) 2021 <NAME>
################################################################################
# 初期設定
UDP_PORT = 1024 # UDP待ち受けポート番号(デフォルトは1024)
HTTP_PORT = 80 # HTTP待ち受けポート番号(デフォルトは80->失敗時8080)
SAVE_CSV = True # CSVファイルの保存(True:保存,False:保存しない)
DEV_CHECK = False # 未登録デバイス保存(True:破棄,False:UNKNOWNで保存)
ELEVATION = 0 # 標高(m) 気圧値の補正用
HIST_BUF_N = 10 # 1センサ値あたりの履歴保持数
# 補正用(表示のみ・保存データは補正されない)
OFFSET_VALUE = {\
'temp._1':(1, 1.0, 0.0),\
'temp._2':(1, 1.0, 0.0),\
'temp._3':(1, 1.0, 0.0),\
}# device col A B col:列番号1~, corrected = A * value + B
# センサ機器用登録デバイス(UDPペイロードの先頭5文字)
sensors = [\
'temp0','hall0','adcnv','btn_s','pir_s','illum',\
'temp.','humid','press','envir','accem','rd_sw',\
'press','e_co2','janke',\
'actap','awsin','count','esp32','ident','medal',\
'meter','ocean','river','tftpc','timer','voice',\
'xb_ac','xb_ct','xb_ev','xb_sw','xbbat','xbbel',\
'xbgas','xbhum','xblcd','xbled','xbprs','xbrad',\
'xbsen'\
]
# センサ機器用CSV形式データの項目(数値データ)
csvs = {\
'pir_s':[('Wake up Switch',''),('PIR Switch','')],\
'rd_sw':[('Wake up Switch',''),('Reed Switch','')],\
'temp0':[('Temperature','deg C')],\
'temp.':[('Temperature','deg C')],\
'ocean':[('Temperature','deg C'),('RSSI','dBm')],\
'humid':[('Temperature','deg C'),('Humidity','%')],\
'press':[('Temperature','deg C'),('Pressure','hPa')],\
'envir':[('Temperature','deg C'),('Humidity','%'),('Pressure','hPa')],\
'e_co2':[('Temperature','deg C'),('Humidity','%'),('Pressure','hPa'),('CO2','ppm'),('TVOC','ppb'),('Counter','')],\
'janke':[('Janken',''),('Fingers','')],\
#'accem':[('Accelerometer X','g'),('Accelerometer Y','g'),('Accelerometer Z','g')],\
'accem':[('Accelerometer X','m/s2'),('Accelerometer Y','m/s2'),('Accelerometer Z','m/s2')],\
'actap':[('Power','W'),('Cumulative','Wh'),('Time','Seconds')],\
'meter':[('Power','W'),('Cumulative','Wh'),('Time','Seconds')],\
'awsin':[('Participants',''),('Cumulative','')],\
'xb_ac':[('Usage Time','h'),('Consumption','kWh'),('Prev. Usage Time','h'),('Consumption','kWh')],\
'xb_ct':[('Power','W')],\
'xb_ev':[('Illuminance','lx'),('Temperature','deg C'),('Humidity','%')],\
'xb_sw':[('Reed Switch','')],\
'xbbel':[('Ringing','')],\
'xbgas':[('CO','ppm'),('CH4','ppm')],\
'xbhum':[('Illuminance','lx'),('Temperature','deg C'),('Humidity','%')],\
'xblcd':[('Illuminance','lx'),('Temperature','deg C')],\
'xbled':[('Illuminance','lx'),('Temperature','deg C')],\
'xbprs':[('Temperature','deg C'),('Pressure','hPa')],\
'xbrad':[('Radiation','uSievert'),('Temperature','deg C'),('Voltage','V')],\
'xbsen':[('Illuminance','lx'),('Temperature','deg C'),('Low Battery','')]\
}
# データ範囲
csvs_range = {\
('Wake up Switch',''): (0,1),\
('PIR Switch',''): (0,1),\
('Reed Switch',''): (0,1),\
('Temperature','deg C'): (0,40),\
('RSSI','dBm'): (-100,0),\
('Humidity','%'): (0,100),\
('Pressure','hPa'): (1013.25 - 33.25, 1013.25 + 33.25),\
('CO','ppm'): (0,2000),\
('CO2','ppm'): (0,2000),\
('CH4','ppm'): (0,2000),\
('TVOC','ppb'): (0,5000),\
('Counter',''): (0,10),\
('Fingers',''): (0,5),\
('Accelerometer X','m/s2'): (-9.8,9.8),\
('Accelerometer Y','m/s2'): (-9.8,9.8),\
('Accelerometer Z','m/s2'): (-9.8,9.8),\
('Accelerometer X','g'): (-1,1),\
('Accelerometer Y','g'): (-1,1),\
('Accelerometer Z','g'): (-1,1),\
('Power','W'): (0,3000),\
('Cumulative','Wh'): (0,3000),\
('Consumption','kWh'): (0,3),\
('Time','Seconds'): (0,3600),\
('Time','Hours'): (0,8760),\
('Usage Time','h'): (0,24),\
('Prev. Usage Time','h'): (0,24),\
('Participants',''): (0,100),\
('Cumulative',''): (0,100000),\
('Illuminance','lx'): (0,1000),\
('Ringing',''): (0,1),\
('Radiation','uSievert'): (0.04,0.23),\
('Voltage','V'): (0,5),\
('Low Battery',''): (0,1)\
}
# センサ機器以外(文字データ入り)の登録デバイス
notifyers = [\
'adash','atalk','cam_a','ir_in','sound',\
'xb_ir','xbidt'\
]
# 特定文字列
pingpongs = [
'Ping','Pong','Emergency','Reset'\
]
devices = list()
dev_vals = dict()
dev_date = dict()
http_port = HTTP_PORT
import os
import sys
import socket
import datetime
from wsgiref.simple_server import make_server # WSGIサーバ
from getpass import getuser # ユーザ取得を組み込む
from time import time # 時間取得を組み込む
from time import sleep # スリープ機能を組み込む
import threading # スレッド管理を組み込む
def get_dev_name(s): # デバイス名を取得
if s.strip() in pingpongs: # Ping または Pong
return s.strip()
if not s[0:8].isprintable():
return None # Noneを応答
if s[5] == '_' and s[6].isdecimal() and s[7] == ',': # 形式が一致する時
if s[0:5] in sensors: # センサリストの照合
return s[0:7] # デバイス名を応答
if s[0:5] in notifyers: # センサリストの照合
return s[0:7] # デバイス名を応答
return None # Noneを応答
def get_val(s): # データを数値に変換
s = s.replace(' ','') # 空白文字を削除
try: # 小数変換の例外監視
val = float(s) # 小数値に変換
except ValueError: # 小数変換失敗時
return None # Noneを応答
if float(int(val)) == val: # valが整数のとき
return int(val) # 整数値を応答
else:
return val # 小数値を応答
def calc_press_h0(temp,press):
print('calc_press_h0',temp,press,end=' -> ')
temp += 273.15 + 0.0065 * ELEVATION # 海面(標高)温度
press /= (1.0 - 0.0065 * ELEVATION / temp) ** 5.257 # 海面(標高)気圧
press = round(press,1)
print(press)
return press
def save(filename, data):
if SAVE_CSV == False:
return
try:
fp = open(filename, mode='a') # 書込用ファイルを開く
except Exception as e: # 例外処理発生時
print(e) # エラー内容を表示
fp.write(data + '\n') # dataをファイルへ
fp.close() # ファイルを閉じる
def barChartHtml(colmun, range, val, color='lightgreen'): # 棒グラフHTMLを作成する関数
unit = ''
if len(colmun) > 0:
if colmun == 'deg C':
unit = ' ℃'
elif colmun == 'uSievert':
unit = ' μSv'
elif colmun == 'm/s2':
unit = ' m/s²'
else:
unit = ' ' + colmun
html = '<td align="right">' + str(val) + unit + '</td>\n' # 変数valの値を表示
min = range[0]
max = range[1]
i= round(200 * (val - min) / (max - min)) # 棒グラフの長さを計算
if val - min <= (max - min) * 0.2: # 20%以下のとき
color = 'lightblue' # 棒グラフの色を青に
if val - min >= (max - min) * 0.8: # 80%以上のとき
color = 'lightpink' # 棒グラフの色をピンクに
if val > max or val < min: # 最大値or最小値を超えた時
color = 'red' # 棒グラフの色を赤に
i = 200 # グラフ長を200ポイントに
html += '<td align ="right" valign="bottom"><div style="font-size:xx-small;">' + str(min) + '</div></td>\n'
html += '<td width=200><div style="background-color: ' + color
html += '; width: ' + str(i) + 'px"> </div></td>\n'
html += '<td valign="bottom"><div style="font-size:xx-small;">' + str(max) + '</div></td>\n'
return html # HTMLデータを返却
def wsgi_app(environ, start_response): # HTTPアクセス受信時の処理
path = environ.get('PATH_INFO') # リクエスト先のパスを代入
if (path[1:5] == 'log_') and (path[5:10] in sensors) and (path[12:16] == '.csv'):
filename = 'log_' + path[5:12] + '.csv'
try:
fp = open(filename, 'rb')
start_response('200 OK', [('Content-type', 'application/force-download')])
return[fp.read()]
except Exception as e:
start_response('404 Not Found',[]) # 404エラー設定
return ['404 Not Found'.encode()] # 応答メッセージ(404)を返却
if path != '/': # パスがルート以外のとき
start_response('404 Not Found',[]) # 404エラー設定
return ['404 Not Found'.encode()] # 応答メッセージ(404)を返却
html = '<html>\n<head>\n' # HTMLコンテンツを作成
html += '<meta http-equiv="refresh" content="10;">\n' # 自動再読み込み
html += '</head>\n<body>\n' # 以下は本文
html += '<h1>UDPセンサ用モニタ ('\
+ str(len(devices)) + ' devices)</h1>\n'
queries = environ.get('QUERY_STRING').lower().split('&')
# print('debug queries:',queries) ##確認用
sort_col = 'devices'
filter_dev = list()
filter_item = list()
hist = 0
for query in queries:
if query == '' or query == 'devices':
sort_col = 'devices'
if query == 'items':
sort_col = 'items'
if query.startswith('device='):
filter_dev.append(query[7:12])
if query.startswith('item='):
filter_item.append(query[5:])
if query.startswith('hist='):
filter_dev.append(query[5:12])
hist = 1
html += 'Filter :'
if len(filter_dev) > 0:
html += ' <a href="/">device</a> = ' + str(filter_dev)
if len(filter_item) > 0:
html += ' <a href="/?items">item</a> = ' + str(filter_item)
if len(filter_dev) == 0 and len(filter_item) == 0:
html += ' None'
if sort_col == 'devices':
html += ', <a href="/?items">Order</a> : '
else:
html += ', <a href="/?devices">Order</a> : '
html += sort_col + '<br>\n'
html += '<table border=1>\n' # 作表を開始
html += '<tr><th><a href="?devices">デバイス名</a></th>'
html += '<th><a href="?items">項目</a></th><th>日 時:分:秒</th><th>値</th>'
html += '<th colspan = 3>グラフ</th></tr>\n' # 「グラフ」を表示
col_dict = dict()
for dev in sorted(devices):
if (len(filter_dev) > 0) and (dev[0:5] not in filter_dev) and (dev[0:7] not in filter_dev):
continue
if dev[0:5] in sensors:
colmuns = csvs.get(dev[0:5])
if colmuns is None:
print('[ERROR] founds no devices on csvs dictionary; dev =',dev[0:5])
continue
# i_max = min(len(colmuns), len(dev_vals[dev][-1]))
i_max = len(colmuns)
for j in range(len(dev_vals[dev])):
i_max = min(i_max, len(dev_vals[dev][j]))
if dev[0:5] == 'actap': # 数が多いので電力のみを表示する
i_max = 1
for i in range(i_max):
colmun = csvs[dev[0:5]][i]
minmax = csvs_range.get(colmun)
if minmax is None:
minmax = (0.0, 1.0)
val = dev_vals[dev][-1][i]
if val is None:
val = 0
if sort_col == 'devices':
i_max_hist = i_max
if hist > 0:
hist = len(dev_vals[dev])
i_max_hist *= hist
if i == 0:
html += '<tr><td rowspan = ' + str(i_max_hist) + '>'
if hist > 0:
filename = 'log_' + dev[0:7] + '.csv'
html += dev[0:7] + '<br>[<a href="' + filename + '">csv</a>]</td>'
elif dev[0:5] in filter_dev:
html += '<a href="?hist=' + dev[0:7] + '">' + dev[0:7] + '</td>'
else:
html += '<a href="?device=' + dev[0:5] + '">'\
+ dev[0:5] + '</a> <a href="?hist=' + dev[0:7] + '">' + dev[6] + '</td>'
else:
html += '<tr>'
if hist > 0:
html += '<td rowspan = ' + str(hist) + '><a href="?items&item=' + colmun[0] + '">'\
+ colmun[0] + '</a></td>\n'
for j in range(hist):
if j > 0:
html += '<tr>'
val = dev_vals[dev][j][i]
if val is None:
val = 0
html += '<td align ="center">' + dev_date[dev][j].strftime('%d %H:%M:%S') + '</td>'
html += barChartHtml(colmun[1], minmax, val) # 棒グラフ化
html += '</tr>\n'
else:
html += '<td><a href="?items&item=' + colmun[0] + '">'\
+ colmun[0] + '</a></td>\n'
html += '<td align ="center">' + dev_date[dev][-1].strftime('%d %H:%M:%S') + '</td>'
html += barChartHtml(colmun[1], minmax, val) # 棒グラフ化
elif sort_col == 'items':
if len(filter_item) == 0 or colmun[0].lower() in filter_item:
if colmun not in col_dict:
col_dict[colmun] = list()
col_dict[colmun].append(dev)
# print('debug col_dict:',col_dict) ##確認用
if len(col_dict) > 0:
for colmun in sorted(col_dict):
# print('debug colmun:',colmun) ##確認用
j = 0
for dev in col_dict[colmun]:
minmax = csvs_range.get(colmun)
if minmax is None:
minmax = (0.0, 1.0)
i = csvs[dev[0:5]].index(colmun)
val = dev_vals[dev][-1][i]
if val is None:
val = 0
html += '<tr><td><a href="?device=' + dev[0:5] + '">'\
+ dev[0:5] + '</a> <a href="?hist=' + dev[0:7] + '">' + dev[6] + '</td>'
if j == 0:
html += '<td rowspan = ' + str(len(col_dict[colmun])) + '>'\
+ '<a href="?items&item=' + colmun[0] + '">'\
+ colmun[0] + '</a></td>\n'
# print('debug barChartHtml:', minmax, val) ##確認用
html += '<td align ="center">' + dev_date[dev][-1].strftime('%d %H:%M:%S') + '</td>'
html += barChartHtml(colmun[1], minmax, val) # 棒グラフ化
j += 1
html += '<tr><td colspan=7 align=right>'
html += '<div><font size=2>Usage: http://127.0.0.1'
if http_port != 80:
html += ':' + str(http_port)
html | |
first element is the start of the temporal interval. The
specified instance in time is **included** in the interval. 2. The second element is the end of the
temporal interval. The specified instance in time is **excluded** from the interval. The specified
temporal strings follow [RFC 3339](https://www.rfc-editor.org/rfc/rfc3339.html). Although [RFC 3339
prohibits the hour to be '24'](https://www.rfc-editor.org/rfc/rfc3339.html#section-5.7), **this process
allows the value '24' for the hour** of an end time in order to make it possible that left-closed time
intervals can fully cover the day.
:param reducer: A reducer to be applied for the values contained in each interval. A reducer is a
single process such as ``mean()`` or a set of processes, which computes a single value for a list of
values, see the category 'reducer' for such processes. Intervals may not contain any values, which for
most reducers leads to no-data (`null`) values by default.
:param labels: Distinct labels for the intervals, which can contain dates and/or times. Is only
required to be specified if the values for the start of the temporal intervals are not distinct and
thus the default labels would not be unique. The number of labels and the number of groups need to be
equal.
:param dimension: The name of the temporal dimension for aggregation. All data along the dimension is
passed through the specified reducer. If the dimension is not set or set to `null`, the data cube is
expected to only have one temporal dimension. Fails with a `TooManyDimensions` exception if it has more
dimensions. Fails with a `DimensionNotAvailable` exception if the specified dimension does not exist.
:param context: Additional data to be passed to the reducer.
:return: A new data cube with the same dimensions. The dimension properties (name, type, labels,
reference system and resolution) remain unchanged, except for the resolution and dimension labels of
the given temporal dimension.
"""
return aggregate_temporal(data=self, intervals=intervals, reducer=reducer, labels=labels, dimension=dimension, context=context)
def aggregate_temporal_period(self, period, reducer, dimension=UNSET, context=UNSET) -> 'ProcessBuilder':
"""
Temporal aggregations based on calendar hierarchies
:param self: A data cube.
:param period: The time intervals to aggregate. The following pre-defined values are available: *
`hour`: Hour of the day * `day`: Day of the year * `week`: Week of the year * `dekad`: Ten day periods,
counted per year with three periods per month (day 1 - 10, 11 - 20 and 21 - end of month). The third
dekad of the month can range from 8 to 11 days. For example, the fourth dekad is Feb, 1 - Feb, 10 each
year. * `month`: Month of the year * `season`: Three month periods of the calendar seasons (December -
February, March - May, June - August, September - November). * `tropical-season`: Six month periods of
the tropical seasons (November - April, May - October). * `year`: Proleptic years * `decade`: Ten year
periods ([0-to-9 decade](https://en.wikipedia.org/wiki/Decade#0-to-9_decade)), from a year ending in a
0 to the next year ending in a 9. * `decade-ad`: Ten year periods ([1-to-0
decade](https://en.wikipedia.org/wiki/Decade#1-to-0_decade)) better aligned with the anno Domini (AD)
calendar era, from a year ending in a 1 to the next year ending in a 0.
:param reducer: A reducer to be applied for the values contained in each period. A reducer is a single
process such as ``mean()`` or a set of processes, which computes a single value for a list of values,
see the category 'reducer' for such processes. Periods may not contain any values, which for most
reducers leads to no-data (`null`) values by default.
:param dimension: The name of the temporal dimension for aggregation. All data along the dimension is
passed through the specified reducer. If the dimension is not set or set to `null`, the data cube is
expected to only have one temporal dimension. Fails with a `TooManyDimensions` exception if it has more
dimensions. Fails with a `DimensionNotAvailable` exception if the specified dimension does not exist.
:param context: Additional data to be passed to the reducer.
:return: A new data cube with the same dimensions. The dimension properties (name, type, labels,
reference system and resolution) remain unchanged, except for the resolution and dimension labels of
the given temporal dimension. The specified temporal dimension has the following dimension labels
(`YYYY` = four-digit year, `MM` = two-digit month, `DD` two-digit day of month): * `hour`: `YYYY-MM-
DD-00` - `YYYY-MM-DD-23` * `day`: `YYYY-001` - `YYYY-365` * `week`: `YYYY-01` - `YYYY-52` * `dekad`:
`YYYY-00` - `YYYY-36` * `month`: `YYYY-01` - `YYYY-12` * `season`: `YYYY-djf` (December - February),
`YYYY-mam` (March - May), `YYYY-jja` (June - August), `YYYY-son` (September - November). * `tropical-
season`: `YYYY-ndjfma` (November - April), `YYYY-mjjaso` (May - October). * `year`: `YYYY` * `decade`:
`YYY0` * `decade-ad`: `YYY1`
"""
return aggregate_temporal_period(data=self, period=period, reducer=reducer, dimension=dimension, context=context)
def all(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Are all of the values true?
:param self: A set of boolean values.
:param ignore_nodata: Indicates whether no-data values are ignored or not and ignores them by default.
:return: Boolean result of the logical operation.
"""
return all(data=self, ignore_nodata=ignore_nodata)
def and_(self, y) -> 'ProcessBuilder':
"""
Logical AND
:param self: A boolean value.
:param y: A boolean value.
:return: Boolean result of the logical AND.
"""
return and_(x=self, y=y)
def anomaly(self, normals, period) -> 'ProcessBuilder':
"""
Compute anomalies
:param self: A data cube with exactly one temporal dimension and the following dimension labels for the
given period (`YYYY` = four-digit year, `MM` = two-digit month, `DD` two-digit day of month): *
`hour`: `YYYY-MM-DD-00` - `YYYY-MM-DD-23` * `day`: `YYYY-001` - `YYYY-365` * `week`: `YYYY-01` -
`YYYY-52` * `dekad`: `YYYY-00` - `YYYY-36` * `month`: `YYYY-01` - `YYYY-12` * `season`: `YYYY-djf`
(December - February), `YYYY-mam` (March - May), `YYYY-jja` (June - August), `YYYY-son` (September -
November). * `tropical-season`: `YYYY-ndjfma` (November - April), `YYYY-mjjaso` (May - October). *
`year`: `YYYY` * `decade`: `YYY0` * `decade-ad`: `YYY1` * `single-period` / `climatology-period`: Any
``aggregate_temporal_period()`` can compute such a data cube.
:param normals: A data cube with normals, e.g. daily, monthly or yearly values computed from a process
such as ``climatological_normal()``. Must contain exactly one temporal dimension with the following
dimension labels for the given period: * `hour`: `00` - `23` * `day`: `001` - `365` * `week`: `01` -
`52` * `dekad`: `00` - `36` * `month`: `01` - `12` * `season`: `djf` (December - February), `mam`
(March - May), `jja` (June - August), `son` (September - November) * `tropical-season`: `ndjfma`
(November - April), `mjjaso` (May - October) * `year`: Four-digit year numbers * `decade`: Four-digit
year numbers, the last digit being a `0` * `decade-ad`: Four-digit year numbers, the last digit being a
`1` * `single-period` / `climatology-period`: A single dimension label with any name is expected.
:param period: Specifies the time intervals available in the normals data cube. The following options
are available: * `hour`: Hour of the day * `day`: Day of the year * `week`: Week of the year *
`dekad`: Ten day periods, counted per year with three periods per month (day 1 - 10, 11 - 20 and 21 -
end of month). The third dekad of the month can range from 8 to 11 days. For example, the fourth dekad
is Feb, 1 - Feb, 10 each year. * `month`: Month of the year * `season`: Three month periods of the
calendar seasons (December - February, March - May, June - August, September - November). * `tropical-
season`: Six month periods of the tropical seasons (November - April, May - October). * `year`:
Proleptic years * `decade`: Ten year periods ([0-to-9
decade](https://en.wikipedia.org/wiki/Decade#0-to-9_decade)), from a year ending in a 0 to the next
year ending in a 9. * `decade-ad`: Ten year periods ([1-to-0
decade](https://en.wikipedia.org/wiki/Decade#1-to-0_decade)) better aligned with the anno Domini (AD)
calendar era, from a year ending in a 1 to the next year ending | |
Set of waypoints
> *Returns*
`uuv_waypoins.WaypointSet`: Filtered set of waypoints
"""
wp_set = uuv_waypoints.WaypointSet(
inertial_frame_id=self.inertial_frame_id)
for i in range(waypoint_set.num_waypoints):
wp = waypoint_set.get_waypoint(i)
if wp.z > 0 and self.inertial_frame_id == 'world':
continue
if wp.z < 0 and self.inertial_frame_id == 'world_ned':
continue
wp_set.add_waypoint(wp)
return wp_set
# =========================================================================
def _publish_trajectory_info(self, event):
"""Publish messages for the waypoints, trajectory and
debug flags.
"""
if self._waypoints_msg is not None:
self._waypoints_pub.publish(self._waypoints_msg)
if self._trajectory_msg is not None:
self._trajectory_pub.publish(self._trajectory_msg)
markers = self._traj_interpolator.get_visual_markers()
if markers is not None:
self._interp_visual_markers.publish(markers)
else:
self._interp_visual_markers.publish(MarkerArray())
self._station_keeping_pub.publish(Bool(data = self._station_keeping_on))
self._automatic_control_pub.publish(Bool(data = self._is_automatic))
self._traj_tracking_pub.publish(Bool(data = self._traj_running))
return True
# =========================================================================
def _update_trajectory_info(self):
"""Update the trajectory message."""
self._waypoints_msg = WaypointSet()
if self._traj_interpolator.is_using_waypoints():
wps = self._traj_interpolator.get_waypoints()
if wps is not None:
wps.inertial_frame_id = self.inertial_frame_id
self._waypoints_msg = wps.to_message(self.node)
self._waypoints_msg.header.frame_id = self.inertial_frame_id
msg = self._traj_interpolator.get_trajectory_as_message()
if msg is not None:
msg.header.frame_id = self.inertial_frame_id
self._trajectory_msg = msg
self._logger.info('Updating the trajectory information')
else:
self._trajectory_msg = None
self._logger.error('Error generating trajectory message')
# =========================================================================
def _update_teleop(self, msg):
"""Callback to the twist teleop subscriber."""
# Test whether the vehicle is in automatic mode (following a given
# trajectory)
if self._is_automatic:
self._teleop_vel_ref = None
return
# If this is the first twist message since the last time automatic mode
# was turned off, then just update the teleop timestamp and wait for
# the next message to allow computing pose and velocity reference.
if self._last_teleop_update is None:
self._teleop_vel_ref = None
self._last_teleop_update = to_fsec(self.node.get_clock().now())
return
# Store twist reference message
self._teleop_vel_ref = msg
# Set the teleop mode is active only if any of the linear velocity components and
# yaw rate are non-zero
vel = np.array([self._teleop_vel_ref.linear.x, self._teleop_vel_ref.linear.y, self._teleop_vel_ref.linear.z, self._teleop_vel_ref.angular.z])
self._is_teleop_active = np.abs(vel).sum() > 0
# Store time stamp
self._last_teleop_update = to_fsec(self.node.get_clock().now())
# =========================================================================
def _calc_teleop_reference(self):
"""Compute pose and velocity reference using the
joystick linear and angular velocity input.
"""
# Check if there is already a timestamp for the last received reference
# message from the teleop node
if self._last_teleop_update is None:
self._is_teleop_active = False
# Compute time step
self._dt = to_fsec(self.node.get_clock().now()) - self._last_teleop_update
# Compute the pose and velocity reference if the computed time step is
# positive and the twist teleop message is valid
if self._dt > 0 and self._teleop_vel_ref is not None and self._dt < 0.1:
speed = np.sqrt(self._teleop_vel_ref.linear.x**2 + self._teleop_vel_ref.linear.y**2)
vel = np.array([self._teleop_vel_ref.linear.x, self._teleop_vel_ref.linear.y, self._teleop_vel_ref.linear.z])
# Cap the forward speed if needed
if speed > self._max_forward_speed:
vel[0] *= self._max_forward_speed / speed
vel[1] *= self._max_forward_speed / speed
vel = np.dot(self._vehicle_pose.rot_matrix, vel)
# Compute pose step
step = uuv_trajectory_generator.TrajectoryPoint()
step.pos = np.dot(self._vehicle_pose.rot_matrix, vel * self._dt)
step.rotq = quaternion_about_axis(self._teleop_vel_ref.angular.z * self._dt, [0, 0, 1])
# Compute new reference
ref_pnt = uuv_trajectory_generator.TrajectoryPoint()
ref_pnt.pos = self._vehicle_pose.pos + step.pos
ref_pnt.rotq = quaternion_multiply(self.get_vehicle_rot(), step.rotq)
# Cap the pose reference in Z to stay underwater
if ref_pnt.z > 0:
ref_pnt.z = 0.0
ref_pnt.vel = [vel[0], vel[1], 0, 0, 0, self._teleop_vel_ref.angular.z]
else:
ref_pnt.vel = [vel[0], vel[1], vel[2], 0, 0, self._teleop_vel_ref.angular.z]
ref_pnt.acc = np.zeros(6)
else:
self._is_teleop_active = False
ref_pnt = deepcopy(self._vehicle_pose)
return ref_pnt
# =========================================================================
def _calc_smooth_approach(self):
"""Add the current vehicle position as waypoint
to allow a smooth approach to the given trajectory.
"""
if self._vehicle_pose is None:
self._logger.error('Simulation not properly initialized yet, ignoring approach...')
return
if not self._traj_interpolator.is_using_waypoints():
self._logger.error('Not using the waypoint interpolation method')
return
heading = euler_from_quaternion(self.get_vehicle_rot())[2]
if self._thrusters_only:
init_wp = uuv_waypoints.Waypoint(
x=self._vehicle_pose.pos[0],
y=self._vehicle_pose.pos[1],
z=self._vehicle_pose.pos[2],
max_forward_speed=self._traj_interpolator.get_waypoints().get_waypoint(0).max_forward_speed,
heading_offset=self._traj_interpolator.get_waypoints().get_waypoint(0).heading_offset)
else:
max_speed = self._traj_interpolator.get_waypoints().get_waypoint(0).max_forward_speed
init_wp = uuv_waypoints.Waypoint(
x=self._vehicle_pose.pos[0],# + max_speed / self._look_ahead_delay * np.cos(heading),
y=self._vehicle_pose.pos[1],# + max_speed / self._look_ahead_delay * np.sin(heading),
z=self._vehicle_pose.pos[2],
max_forward_speed=max_speed,
heading_offset=self._traj_interpolator.get_waypoints().get_waypoint(0).heading_offset)
first_wp = self._traj_interpolator.get_waypoints().get_waypoint(0)
dx = first_wp.x - init_wp.x
dy = first_wp.y - init_wp.y
dz = first_wp.z - init_wp.z
# One new waypoint at each meter
self._logger.info('Adding waypoints to approach the first position in the given waypoint set')
steps = int(np.floor(first_wp.dist(init_wp.pos)) / 10)
if steps > 0 and self._traj_interpolator.get_interp_method() != 'dubins':
for i in range(1, steps):
wp = uuv_waypoints.Waypoint(
x=first_wp.x - i * dx / steps,
y=first_wp.y - i * dy / steps,
z=first_wp.z - i * dz / steps,
max_forward_speed=self._traj_interpolator.get_waypoints().get_waypoint(0).max_forward_speed)
self._traj_interpolator.add_waypoint(wp, add_to_beginning=True)
self._traj_interpolator.add_waypoint(init_wp, add_to_beginning=True)
self._update_trajectory_info()
# =========================================================================
def is_station_keeping_on(self):
"""Return `True`, if vehicle is holding its position."""
return self._station_keeping_on
# =========================================================================
def is_automatic_on(self):
"""Return `True` if vehicle if following a trajectory in
automatic mode.
"""
return self._is_automatic
# =========================================================================
def set_station_keeping(self, is_on=True):
"""Set station keeping mode flag.
> *Input arguments*
* `is_on` (*type:* `bool`, *default:* `True`): Station keeping flag
"""
self._station_keeping_on = is_on
self._logger.info('STATION KEEPING MODE = ' + ('ON' if is_on else 'OFF'))
# =========================================================================
def set_automatic_mode(self, is_on=True):
"""Set automatic mode flag."""
self._is_automatic = is_on
self._logger.info('AUTOMATIC MODE = ' + ('ON' if is_on else 'OFF'))
# =========================================================================
def set_trajectory_running(self, is_on=True):
"""Set trajectory tracking flag."""
self._traj_running = is_on
self._logger.info('TRAJECTORY TRACKING = ' + ('ON' if is_on else 'OFF'))
# =========================================================================
def has_started(self):
"""Return if the trajectory interpolator has started generating
reference points.
"""
return self._traj_interpolator.has_started()
# =========================================================================
def has_finished(self):
"""Return `True` if the trajectory has finished."""
return self._traj_interpolator.has_finished()
# =========================================================================
def update_vehicle_pose(self, pos, quat):
"""Update the vehicle's pose information.
> *Input arguments*
* `pos` (*type:* `numpy.array`): Position vector
* `quat` (*type:* `numpy.array`): Quaternion as `(qx, qy, qz, qw)`
"""
if self._vehicle_pose is None:
self._vehicle_pose = uuv_trajectory_generator.TrajectoryPoint()
self._vehicle_pose.pos = pos
self._vehicle_pose.rotq = quat
self._vehicle_pose.t = to_fsec(self.node.get_clock().now())
self.init_odom_event.set()
# =========================================================================
def get_vehicle_rot(self):
"""Return the vehicle's rotation quaternion."""
self.init_odom_event.wait()
return self._vehicle_pose.rotq
# =========================================================================
def _update_trajectory_from_msg(self, msg):
self._stamp_trajectory_received = to_fsec(self.node.get_clock().now())
self._traj_interpolator.init_from_trajectory_message(msg)
self._logger.info('New trajectory received at ' + str(self._stamp_trajectory_received) + 's')
self._update_trajectory_info()
# =========================================================================
def start_station_keeping(self):
"""Start station keeping mode by setting the pose
set-point of the vehicle as the last pose before the
vehicle finished automatic mode.
"""
if self._vehicle_pose is not None:
self._this_ref_pnt = deepcopy(self._vehicle_pose)
self._this_ref_pnt.vel = np.zeros(6)
self._this_ref_pnt.acc = np.zeros(6)
self.set_station_keeping(True)
self.set_automatic_mode(False)
self._smooth_approach_on = False
# =========================================================================
def hold_vehicle(self, request, response):
"""Service callback function to hold the vehicle's
current position.
"""
if self._vehicle_pose is None:
self._logger.error('Current pose of the vehicle is invalid')
response.success = False
#return HoldResponse(False)
else:
self.start_station_keeping()
response.success = True
return response
# return HoldResponse(True)
# =========================================================================
def start_waypoint_list(self, request, response):
"""Service callback function to follow a set of waypoints
> *Input arguments*
* `request` (*type:* `uuv_control_msgs.InitWaypointSet`)
"""
if len(request.waypoints) == 0:
self._logger.error('Waypoint list is empty')
response.success = False
return response
#return InitWaypointSetResponse(False)
t = rclpy.time.Time(request.start_time.data.secs, request.start_time.data.nsecs)
if to_fsec(t) < to_fsec(self.node.get_clock().now()) and not request.start_now:
self._logger.error('The trajectory starts in the past, correct the starting time!')
response.success = False
return response
#return InitWaypointSetResponse(False)
else:
self._logger.info('Start waypoint trajectory now!')
self._lock.acquire()
# Create a waypoint set
wp_set = uuv_waypoints.WaypointSet(
inertial_frame_id=self.inertial_frame_id)
# Create a waypoint set message, to fill wp_set
waypointset_msg = WaypointSet()
waypointset_msg.header.stamp = self.node.get_clock().now().to_msg()
waypointset_msg.header.frame_id = self.inertial_frame_id
if request.start_now:
waypointset_msg.start_time = self.node.get_clock().now().to_msg()
else:
waypointset_msg.start_time = t.to_msg()
waypointset_msg.waypoints = request.waypoints
wp_set.from_message(waypointset_msg)
wp_set = self._transform_waypoint_set(wp_set)
wp_set = self._apply_workspace_constraints(wp_set)
if self._traj_interpolator.set_waypoints(wp_set, self.get_vehicle_rot()):
self._station_keeping_center = None
self._traj_interpolator.set_start_time((to_fsec(t) if not request.start_now else to_fsec(self.node.get_clock().now())))
self._update_trajectory_info()
self.set_station_keeping(False)
self.set_automatic_mode(True)
self.set_trajectory_running(True)
self._idle_circle_center = None
self._smooth_approach_on = True
self._logger.info('============================')
self._logger.info(' WAYPOINT SET ')
self._logger.info('============================')
self._logger.info('Interpolator = ' + request.interpolator.data)
self._logger.info('# waypoints = %d' % self._traj_interpolator.get_waypoints().num_waypoints)
self._logger.info('Starting time = %.2f' % (to_fsec(t) if not request.start_now else to_fsec(self.node.get_clock().now())))
self._logger.info('Inertial frame ID = ' + self.inertial_frame_id)
self._logger.info('============================')
self._lock.release()
response.success = True
return response
#return InitWaypointSetResponse(True)
else:
self._logger.error('Error occurred while parsing waypoints')
self._lock.release()
response.success = False
return response
#return InitWaypointSetResponse(False)
# =========================================================================
def start_circle(self, request, response):
"""Service callback function to initialize a parametrized
circular trajectory.
> *Input arguments*
* `request` (*type:* `uuv_control_msgs.InitCircularTrajectory`)
"""
if request.max_forward_speed <= 0 or request.radius <= 0 or \
request.n_points <= 0:
self._logger.error('Invalid parameters to generate a circular trajectory')
response.success = False
return response
#return InitCircularTrajectoryResponse(False)
t = rclpy.time.Time(request.start_time.data.secs, request.start_time.data.nsecs)
if to_fsec(t) < to_fsec(self.node.get_clock().now()) and not request.start_now:
self._logger.error('The trajectory starts in the past, correct the starting time!')
response.success = False
return response
#return InitCircularTrajectoryResponse(False)
try:
wp_set = uuv_waypoints.WaypointSet(
inertial_frame_id=self.inertial_frame_id)
success = wp_set.generate_circle(radius=request.radius,
center=request.center,
num_points=request.n_points,
max_forward_speed=request.max_forward_speed,
theta_offset=request.angle_offset,
heading_offset=request.heading_offset)
if not success:
self._logger.error('Error generating circular trajectory from waypoint set')
response.success = False
return response
#return InitCircularTrajectoryResponse(False)
| |
= bit_count(state[k])
# Round 1 - 10
for j in range(1, 11):
old_state = state
state = aes_funcs.subbytes(state)
state = aes_funcs.shiftrows(state)
if (j < 10):
state = aes_funcs.mixcolumns(state)
state = np.bitwise_xor(state, subkey[j])
for k in range(16):
if leakage_model == 'HAMMING_DISTANCE':
leakage[j][k][i] = bit_count(
np.bitwise_xor(state[k], old_state[k]))
else:
leakage[j][k][i] = bit_count(state[k])
return leakage
def parse_args():
"""Parses command-line arguments."""
parser = argparse.ArgumentParser(
description="""A histogram-based TVLA described in "Fast Leakage Assessment" by <NAME>,
<NAME> and <NAME> (https://eprint.iacr.org/2017/624.pdf)."""
)
parser.add_argument(
"-p",
"--project-file",
default="projects/opentitan_simple_aes.cwp",
help="""Name of the ChipWhisperer project file to use. Not required. If not provided,
projects/opentitan_simple_aes.cwp is used.""",
)
parser.add_argument(
"-t",
"--trace-file",
help="""Name of the trace file containing the numpy array with all traces in 16-bit integer
format. Not required. If not provided, the data from the ChipWhisperer project file
is used. Ignored for number-of-steps > 1.""",
)
parser.add_argument(
"-s",
"--trace-start",
help="""Index of the first trace to use. Not required. If not provided, starts at the first
trace.""",
)
parser.add_argument(
"-e",
"--trace-end",
help="""Index of the last trace to use. Not required. If not provided, ends at the last
trace.""",
)
parser.add_argument(
"-l",
"--leakage-file",
help="""Name of the leakage file containing the numpy array with the leakage model for all
rounds, all bytes, and all traces. Not required. If not provided, the leakage is computed
from the data in the ChipWhisperer project file. Ignored for number-of-steps > 1.""",
)
parser.add_argument(
"-d",
"--save-to-disk",
action="store_true",
default=False,
help="""Save trace, leakage and t-test files to disk. Ignored for trace and leakage files
when number-of-steps > 1.""",
)
parser.add_argument(
"-r",
"--round-select",
help="""Index of the AES round for which the histograms are to be computed: 0-10. Not
required. If not provided, the histograms for all AES rounds are computed.""",
)
parser.add_argument(
"-b",
"--byte-select",
help="""Index of the AES state byte for which the histograms are to be computed: 0-15. Not
required. If not provided, the histograms for all AES state bytes are computed.""",
)
parser.add_argument(
"-i",
"--input-file",
help="""Name of the input file containing the histograms. Not required. If both -i and -o
are provided, the input file is appended with more data to produce the output file.""",
)
parser.add_argument(
"-o",
"--output-file",
help="""Name of the output file to store generated histograms. Not required. If both -i and
-o are provided, the input file is appended with more data to produce the output file.""",
)
parser.add_argument(
"-n",
"--number-of-steps",
type=int,
default="1",
help="""Number of steps to breakdown the analysis into. For every step, traces are
separately filtered and the leakage is computed. The histograms are appended to the
ones of the previous step. This is useful when operating on very large trace sets and/or
when analyzing how results change with the number of traces used.""",
)
parser.add_argument(
"-a",
"--ttest-step-file",
help="""Name of the t-test step file containing one t-test analysis per step. Not
required. If not provided, the data is recomputed.""",
)
parser.add_argument(
"-f",
"--plot-figures",
action="store_true",
default=False,
help="""Plot figures and save them to disk. Not required.""",
)
parser.add_argument(
"-g",
"--general-test",
action="store_true",
default=False,
help="""Perform general fixed-vs-random TVLA without leakage model. Odd traces are grouped
in the fixed set while even traces are grouped in the random set. Not required.""",
)
parser.add_argument(
"-m",
"--mode",
default="aes",
help="""Select mode: can be either "aes" or "sha3". Not required. If not provided or if a
another string is provided, "aes" is used.""",
)
return parser.parse_args()
def main():
Path("tmp").mkdir(exist_ok=True)
log_format = "%(asctime)s %(levelname)s: %(message)s"
log.basicConfig(format=log_format,
datefmt="%Y-%m-%d %I:%M:%S",
handlers=[
log.FileHandler("tmp/log.txt"),
log.StreamHandler()
],
level=log.INFO,
force=True,)
args = parse_args()
if args.mode != "sha3" and args.mode != "aes":
log.info("Unsupported mode {args.mode}, falling back to \"aes\"")
if args.mode == "sha3" or args.general_test is True:
general_test = True
else:
general_test = False
if args.mode == "sha3" or general_test is True:
# We don't care about the round select in this mode. Set it to 0 for code compatibility.
rnd_list = [0]
elif args.round_select is None:
rnd_list = list(range(11))
else:
rnd_list = [int(args.round_select)]
assert all(rnd >= 0 and rnd < 11 for rnd in rnd_list)
num_rnds = len(rnd_list)
if args.mode == "sha3" or general_test is True:
# We don't care about the byte select in this mode. Set it to 0 for code compatibility.
byte_list = [0]
elif args.byte_select is None:
byte_list = list(range(16))
else:
byte_list = [int(args.byte_select)]
assert all(byte >= 0 and byte < 16 for byte in byte_list)
num_bytes = len(byte_list)
num_steps = int(args.number_of_steps)
assert num_steps >= 1
save_to_disk_trace = args.save_to_disk
save_to_disk_leakage = args.save_to_disk
save_to_disk_ttest = args.save_to_disk
# Step-wise processing isn't compatible with a couple of other arguments.
if num_steps > 1:
args.trace_file = None
args.leakage_file = None
save_to_disk_trace = False
save_to_disk_leakage = False
if args.input_file is not None:
# Load previously generated histograms.
histograms_file = np.load(args.input_file)
histograms_in = histograms_file['histograms']
num_samples = histograms_in.shape[3]
trace_resolution = histograms_in.shape[4]
# If previously generated histograms are loaded, the rounds and bytes of interest must
# match. Otherwise, indices would get mixed up.
assert rnd_list == histograms_file['rnd_list']
assert byte_list == histograms_file['byte_list']
if (args.input_file is None or args.output_file is not None) and args.ttest_step_file is None:
# Either don't have previously generated histograms or we need to append previously
# generated histograms.
# Make sure the project file is compatible with the previously generated histograms.
project = cw.open_project(args.project_file)
if args.input_file is None:
num_samples = len(project.waves[0])
else:
assert num_samples == len(project.waves[0])
if args.input_file is None:
adc_bits = 12
trace_resolution = 2**adc_bits
# When doing general fixed-vs-random TVLA, the first trace is using the fixed key.
if general_test is True:
fixed_key = np.copy(project.keys[0])
# Amount of tolerable deviation from average during filtering.
num_sigmas = 3.5
# Overall number of traces, trace start and end indices.
num_traces_tot = len(project.waves)
if args.trace_start is None:
trace_start_tot = 0
else:
trace_start_tot = int(args.trace_start)
if args.trace_end is None:
trace_end_tot = num_traces_tot - 1
else:
trace_end_tot = int(args.trace_end)
assert trace_end_tot - trace_start_tot < num_traces_tot
num_traces_tot = trace_end_tot - trace_start_tot + 1
# Generate indices for step-wise processing.
num_traces_vec = []
trace_start_vec = []
trace_end_vec = []
num_traces_step = num_traces_tot // num_steps
num_traces_rem = num_traces_tot % num_steps
for i_step in range(num_steps):
trace_start_vec.append(trace_start_tot + i_step * num_traces_step)
if i_step < num_steps - 1 or num_traces_rem == 0:
num_traces_vec.append(num_traces_step)
trace_end_vec.append((i_step + 1) * num_traces_step - 1)
else:
num_traces_vec.append(num_traces_step + num_traces_rem)
trace_end_vec.append(trace_end_tot)
# The number of parallel jobs to use for the processing-heavy tasks.
num_jobs = multiprocessing.cpu_count()
# The number of traces/samples processed by each parallel job at a time.
trace_step_leakage = min(10000, num_traces_step // num_jobs)
sample_step_hist = 1
# Increase work per thread to amortize parallelization overhead.
if len(rnd_list) == 1 and len(byte_list) == 1:
if general_test is True:
sample_step_hist = min(10000, num_samples // num_jobs)
else:
sample_step_hist = 5
for i_step in range(num_steps):
num_traces = num_traces_vec[i_step]
trace_start = trace_start_vec[i_step]
trace_end = trace_end_vec[i_step]
log.info("Processing Step %i/%i: Trace %i - %i",
i_step+1, num_steps, trace_start, trace_end)
if args.trace_file is None:
# Make sure to re-open the project file as we close it during the operation to free
# up some memory.
if i_step > 0:
project = cw.open_project(args.project_file)
# Converting traces from floating point to integer and creating a dense copy.
log.info("Converting Traces")
if project.waves[0].dtype == 'uint16':
traces = np.empty((num_traces, num_samples), dtype=np.uint16)
for i_trace in range(num_traces):
traces[i_trace] = project.waves[i_trace + trace_start]
else:
traces = np.empty((num_traces, num_samples), dtype=np.double)
for i_trace in range(num_traces):
traces[i_trace] = (project.waves[i_trace +
trace_start] + 0.5) * trace_resolution
traces = traces.astype('uint16')
if general_test is False:
# Filter out noisy traces.
log.info("Filtering Traces")
# Get the mean and standard deviation.
mean = traces.mean(axis=0)
std = traces.std(axis=0)
# Define upper and lower limits.
max_trace = mean + num_sigmas * std
min_trace = mean - num_sigmas * std
# Filtering of converted traces (len = num_samples). traces_to_use itself can be
# used to index the entire project file (len >= num_samples).
traces_to_use = np.zeros(len(project.waves), dtype=bool)
traces_to_use[trace_start:trace_end + 1] = np.all((traces >= min_trace) &
(traces <= max_trace), axis=1)
traces = traces[traces_to_use[trace_start:trace_end + 1]]
else:
# For now, don't perform any filtering when doing general fixed-vs-random | |
<filename>utils/jornada_plot.py
# ===============================================================================
# Copyright 2016 gabe-parrish
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= standard library imports ========================
import os
import pandas as pd
import numpy as np
import datetime
from matplotlib import pyplot as plt
from datetime import datetime
# ============= local library imports ===========================
# =================================== run() exclusive functions =============================================
def plot_depths(dates, d30, d60, d90, d110, d130, name, tdate, rzsm, taw, pixel):
""""""
figure_title = "rzsm_5_depths_{}_TAW_{}_pixel_{}".format(name, taw, pixel)
fig = plt.figure()
fig.suptitle("Soil moisture at different depths for {} at TAW = {} corr to pixel {}".format(name, taw, pixel), fontsize=12, fontweight='bold')
plt.rcParams['axes.grid'] = True
p30 = fig.add_subplot(511)
p30.set_title("30cm depth", fontsize=10, fontweight='bold')
# p30.set_xlabel('date', fontsize=8)
p30.set_ylabel('swc')
p30.plot(dates, d30, linewidth=1)
p30.plot(tdate, rzsm, linewidth=1)
p30.plot_date(dates, d30, marker='o', markersize=2)
p60 = fig.add_subplot(512)
p60.set_title("60cm depth", fontsize=10, fontweight='bold')
# p60.set_xlabel('date', fontsize=8)
p60.set_ylabel('swc')
p60.plot(dates, d60, linewidth=1)
p60.plot(tdate, rzsm, linewidth=1)
p60.plot_date(dates, d60, marker='o', markersize=2)
p90 = fig.add_subplot(513)
p90.set_title("90cm depth", fontsize=10, fontweight='bold')
# p90.set_xlabel('date', fontsize=8)
p90.set_ylabel('swc')
p90.plot(dates, d90, linewidth=1)
p90.plot(tdate, rzsm, linewidth=1)
p90.plot_date(dates, d90, marker='o', markersize=2)
p110 = fig.add_subplot(514)
p110.set_title("110cm depth", fontsize=10, fontweight='bold')
# p110.set_xlabel('date', fontsize=8)
p110.set_ylabel('swc')
p110.plot(dates, d110, linewidth=1)
p110.plot(tdate, rzsm, linewidth=1)
p110.plot_date(dates, d110, marker='o', markersize=2)
# p110.grid()
p130 = fig.add_subplot(515)
p130.set_title("130cm depth", fontsize=10, fontweight='bold')
# p130.set_xlabel('date', fontsize=8)
p130.set_ylabel('swc')
p130.plot(dates, d130, linewidth=1)
p130.plot(tdate, rzsm, linewidth=1)
p130.plot_date(dates, d130, marker='o', markersize=2)
# p130.grid()
# plt.tight_layout()
# plt.subplots_adjust(top=0.89)
plt.subplots_adjust(hspace=.5)
# plt.show()
plt.savefig("/Users/Gabe/Desktop/juliet_stuff/jornada_plot_output/{}.pdf".format(figure_title))
plt.close(fig)
def make_set(df):
""""""
location_list = []
locations = df['location']
for i in locations:
# print "location", i
if i.startswith("C"):
location_list.append(i)
location_set = set(location_list)
# print "the location set", location_set
## for str in ["DRY", "WET", "STD"]:
## location_set.remove(str)
return location_set
def build_jornada_etrm():
""""""
relate_dict = {"000": ["C01", "C02"], "001": ["C03", "C04", "C05", "C06", "C07", "C08", "C09"],
"002": ["C10", "C11"], "003": ["C12", "C13", "C14", "C15", "C16", "C17", "C18", "C19", "C20", "C21"],
"004": ["C22", "C23", "C24", "C25", "C26", "C27", "C28", "C29"],
"005": ["C31", "C32", "C33", "C34", "C35", "C36", "C37", "C38", "C39"],
"006": ["C40", "C41", "C42", "C43", "C44", "C45", "C46", "C47", "C48"],
"007": ["C51", "C52", "C53", "C54", "C55", "C56", "C57"],
"008": ["C58", "C59", "C60", "C61", "C62", "C63", "C64", "C65", "C66"],
"009": ["C67", "C68", "C69", "C70"], "010": ["C71", "C72", "C73", "C74", "C75"],
"011": ["C76", "C77", "C78", "C79", "C80", "C81", "C82", "C83", "C84"],
"012": ["C85", "C86", "C87", "C88", "C89"]}
jornada_etrm = {}
for key, value in relate_dict.iteritems():
if len(value)>0:
for k in value:
jornada_etrm[k] = key
# print "the jornada etrm", jornada_etrm
return jornada_etrm
def nrml_rzsm(data, df_long):
"""
Normalize the volumetric soil water content into a RZSM by taking the Min - Max and normalizing on a scale of zero
to one.
:param data: A shorter dataset we need to normalize to min and max in order to plot.
:param df_long: A longer dataset that contains a lower min and higher max than the dataset we end up plotting
:return: normalized dataset
"""
print('length of data', len(data))
print('length of data long', len(df_long))
# convert from strings to float
data = [float(i) for i in data]
data_long = [float(i) for i in df_long]
# Get min and max from a longer dataset
ma = max(data_long)
print("ma", ma)
mi = min(data_long)
print("mi", mi)
# normalized scale
n0 = 0
n1 = 1
# create a new normalized dataset
nrml_data = [n0 + (value - mi)/(ma - mi) for value in data]
print("lenght of normalized data", len(nrml_data))
return nrml_data
def run():
"""
Get the Jornada data and for each gauge, plot a subplot for a different depth,
i.e for three depths its a three subplot plot.
:return:
"""
# TODO - Make an output text file that records the diff between min and max soil water content for each trans. loc.
#====== Tracker =======
# # path to a tracker file
# tracker_path = "/Users/Gabe/Desktop/juliet_stuff/March_2018_model_runs/taw_295/etrm_tracker_000.csv"
# # get the main dataframe
# df_tracker = pd.read_csv(tracker_path)
# # print "df_tracker\n", df_tracker
# # we need to get rzsm and dates
# tdate = pd.to_datetime(df_tracker['Date'])
# # print 'tdate\n', tdate
# rzsm = df_tracker['rzsm']
# # print 'rzsm\n', rzsm
#====== Jornada =======
# path to the jornada data
path = "/Users/Gabe/Desktop/33_37_ETRM_aoi_project/Jornada_012002_transect_soil_water_content_data/" \
"Jornada_012002_transect_soil_water_content_data.csv" # This version has data that goes up through 2015
df = pd.read_csv(path, header=72) # I have no idea why it works on 72 but whatever.
# print "the df \n", df
# print "df['Date'] \n", df['date']
# filter out missing data "."
df = df[df['swc_30cm'] != "."] #, 'swc_60cm', 'swc_110cm', 'swc_130cm'
df = df[df['swc_60cm'] != "."]
df = df[df['swc_90cm'] != "."]
df = df[df['swc_110cm'] != "."]
df = df[df['swc_130cm'] != "."]
# # Cut off extraneous dates we don't need...
df_long = df[df.index > 15000] # 32000 <- use for plotting
df = df[df.index > 32000]
# +=+=+=+=+=+= Automatic Plotter mode +=+=+=+=+=+=
# set TAW
taw = 115
# set tracker path
tracker_path = "/Users/Gabe/Desktop/juliet_stuff/March_2018_model_runs/taw_{}".format(taw)
# print tracker_path
tracker_path_dict = {}
for path, directories, files in os.walk(tracker_path):
for i in files:
# print "file -> ", i
if len(i) == 20:
name = i[13:-4]
else:
name = i[13:-9]
# print "name", name
csv_path = os.path.join(path, i)
tracker_path_dict[name] = csv_path
print("tracker path dictionary \n", tracker_path_dict['001'])
# Build the jornada ETRM dictionary relating every transect measurement point to a ETRM pixel.
jornada_etrm = build_jornada_etrm() #location_set, tracker_path_dict
# TODO - MIN MAX output function
# create a file at a place
min_max_path = "/Users/Gabe/Desktop/juliet_stuff/March_2018_model_runs/min_max.txt"
with open(min_max_path, "w") as created_file:
created_file.write("\n -------------- \n MIN and MAX volumetric soil moisture for Jornada neutron probe data"
" \n -------------- \n")
# within the loop open the file in append mode (a)
for key, value in jornada_etrm.iteritems():
print("key -> ", key)
print("value -> ", value)
#===== TRACKER ======
df_tracker = pd.read_csv(tracker_path_dict[value])
# we need to get rzsm and dates
tdate = pd.to_datetime(df_tracker['Date'])
# print 'tdate\n', tdate
rzsm = df_tracker['rzsm']
# print 'rzsm\n', rzsm
pixel = value
# ===== Jornada ========
jornada_var = df[df['location'] == key]
# a long version of the jornada dataset to get a more accurate min and max from the whole dataset to perform
# the normalization with
jornada_var_long = df_long[df_long['location'] == key]
# ===== Append min and max to min_max.txt ========
# write out the key and value, key = probe, value = pixel
with open(min_max_path, 'a') as append_file:
append_file.write(" \n ====== \n probe {} / pixel {} \n====== \n".format(key, value))
# deal with all the separate depths and report them separately
list_of_codes = ['swc_30cm', 'swc_60cm', 'swc_90cm', 'swc_110cm', 'swc_130cm']
for code in list_of_codes:
jor_var_long = jornada_var_long[code]
jor_var_long = [float(i) for i in jor_var_long]
# Get min and max from a longer dataset
ma = max(jor_var_long)
mi = min(jor_var_long)
diff = ma - mi
# write the min and max underneath a code in the min_max.txt code
with open(min_max_path, 'a') as append_file:
append_file.write("\n ****** \n depth: {} \n min: {} \n max: {} "
"\n diff btwn max and min: {} \n \n".format(code, ma, mi, diff))
# ===== Depths ========
# 30 cm depth
j_30 = np.array(nrml_rzsm(jornada_var['swc_30cm'], jornada_var_long['swc_30cm']))
# convert to a float
# j_30 = j_30.astype(np.float)
# 60cm
j_60 = np.array(nrml_rzsm(jornada_var['swc_60cm'], jornada_var_long['swc_60cm']))
# j_60 = j_60.astype(np.float)
# 90cm
j_90 = np.array(nrml_rzsm(jornada_var['swc_90cm'], jornada_var_long['swc_90cm']))
# print "here is j_90 -> {}".format(j_90)
# j_90 = j_90.astype(np.float)
# 110cm
j_110 = np.array(nrml_rzsm(jornada_var['swc_110cm'], jornada_var_long['swc_110cm']))
# j_110 = j_110.astype(np.float)
# 130cm
j_130 = np.array(nrml_rzsm(jornada_var['swc_130cm'], jornada_var_long['swc_130cm']))
# j_130 = j_130.astype(np.float)
# get the date...
j_date = pd.to_datetime(jornada_var['date'])
j_name = key
plot_depths(j_date, j_30, j_60, j_90, j_110, j_130, j_name, tdate, rzsm, taw, pixel)
# todo - write a function that builds the jornada_etrm dict you are using for plotting
# todo - give plot_depths some alternative MODES | |
einsum('ij,IJ->iIjJ',I,Ii)
I.merge_inds([0,1])
I.merge_inds([1,2])
# Contract identity with the bra
res = einsum('gkhl,Qq->gkQlhq',braten,I)
# Merge correct inds
res.merge_inds([0,1,2])
res.merge_inds([2,3])
# Add to bot_env
bot_env.append(res)
# Last site is the current right bound_mpo
res = einsum('fio,orx->irxf',right1,right2)
# Merge correct inds
res.merge_inds([0,1])
# Add to bot env
bot_env.append(res)
# Put result into an MPS -------------------------------------------
bot_env = MPS(bot_env)
# Reduce bond dimension
if truncate:
mpiprint(5,'Truncating Boundary MPS')
if DEBUG:
mpiprint(6,'Computing initial bmpo norm')
norm0 = bot_env.norm()
bot_env = bot_env.apply_svd(chi)
if DEBUG:
mpiprint(6,'Computing resulting bmpo norm')
norm1 = bot_env.norm()
mpiprint(0,'Init bot BMPO Canonicalization Norm Difference for chi={}: {} ({},{})'.format(chi,abs(norm0-norm1)/abs(norm0),norm0,norm1))
else:
# Add the bra layer --------------------------------------------------
"""
Doing the following contraction:
j bk l vm n x
| || | || | |
| || | || | |
+----+ |+------+----+ |+------+----+ +----+
| l1 |---g--^-------| b1 |-h ... ----^-------| b2 |-i-| r1 |
+----+ | +----+ | +----+ +----+
| | | | | |
a b c d e f
| | | | | |
+----+ +----+ +----+ +----+ +----+ +----+
z--| p1 |-y-| p2 |--x--| p3 |-w ... --| p4 |--v-| p5 |-u-| p6 |--t
+----+ +----+ +----+ +----+ +----+ +----+
"""
# Create the next bot environment
bot_env = []
# First, absorb left boundary mps
res = einsum('agj,zay->zjyg',left1,prev_env[0])
# Merge correct inds
res.merge_inds([2,3])
# Add to bottom env
bot_env.append(res)
# Loop through to add bras
for col in range(ncol):
braten = bra[col][row].copy()
# Add identity ---------------------
# TODO - Make sure signs are correct (will give error in symmetric case)
D1 = braten.shape[braten.legs[0][0]]
Z1 = braten.qn_sectors[braten.legs[0][0]]
I1 = eye(D1,
Z1,
is_symmetric=braten.is_symmetric,
backend=braten.backend)
if len(braten.legs[0]) > 1:
for legind in range(1,len(braten.legs[0])):
Dli = braten.shape[braten.legs[0][legind]]
Zli = braten.qn_sectors[braten.legs[0][legind]]
Ii = eye(Dli,
Zli,
is_symmetric=braten.is_symmetric,
backend=braten.backend)
I1 = einsum('ij,IJ->iIjJ',I1,Ii)
I1.merge_inds([0,1])
I1.merge_inds([1,2])
D2 = braten.shape[braten.legs[2][0]]
Z2 = braten.qn_sectors[braten.legs[2][0]]
I2 = eye(D2,
Z2,
is_symmetric=braten.is_symmetric,
backend=braten.backend)
if len(braten.legs[2]) > 1:
for legind in range(1,len(braten.legs[2])):
Dli = braten.shape[braten.legs[2][legind]]
Zli = braten.qn_sectors[braten.legs[2][legind]]
Ii = eye(Dli,
Zli,
is_symmetric=braten.is_symmetric,
backend=braten.backend)
I2 = einsum('ij,IJ->iIjJ',I2,Ii)
I2.merge_inds([0,1])
I2.merge_inds([1,2])
# Contract with previous environment
res = einsum('ybx,Gg->yGbxg',prev_env[2*col+1],I1)
res = einsum('yGbxg,Kk->yGbKxgk',res,I2)
# Merge correct indices
res.merge_inds([0,1])
res.merge_inds([1,2])
res.merge_inds([2,3,4])
# Add to bot_env
bot_env.append(res)
# Add ket --------------------------
# Contract with previous bot_env
res = einsum('gckhl,xcw->xgklwh',braten,prev_env[2*col+2])
# Merge correct indices
res.merge_inds([0,1,2])
res.merge_inds([2,3])
# Add to bot_env
bot_env.append(res)
# Last, absorb right boundary mpo
res = einsum('fix,uft->uixt',right1,prev_env[2*ncol+1])
# Merge needed inds
res.merge_inds([0,1])
# Add to bot_env
bot_env.append(res)
# Put result into an MPS ------------------
bot_env = MPS(bot_env)
# Reduce bond dimension
if truncate:
mpiprint(5,'Truncating Boundary MPS')
if DEBUG:
mpiprint(6,'Computing initial bmpo norm')
norm0 = bot_env.norm()
bot_env = bot_env.apply_svd(chi)
if DEBUG:
mpiprint(6,'Computing resulting bmpo norm')
norm1 = bot_env.norm()
mpiprint(0,'Add ket bot BMPO Canonicalization Norm Difference for chi={}: {} ({},{})'.format(chi,abs(norm0-norm1)/abs(norm0),norm0,norm1))
# Update prev_env
prev_env = bot_env
# Add the bra layer --------------------------------------------------
"""
Doing the following contraction:
s t v x
| | | | | |
| | | | | |
+----+ +----+ | +----+ | +----+
| l2 |-p-| k1 |----q---^---- ... --| k2 |---r---^-----| r2 |
+----+ +----+ | +----+ | +----+
| || | || | |
a bk c dm e f
| || | || | |
+----+ +----+ +----+ +----+ +----+ +----+
z--| p1 |-y-| p2 |--x--| p3 |-w ... --| p4 |--v-| p5 |-y-| p6 |--t
+----+ +----+ +----+ +----+ +----+ +----+
"""
# Create the next bottom environment
bot_env = []
# First, absorb left boundary mpo
res = einsum('zay,aps->zsyp',prev_env[0],left2)
# Merge correct inds
res.merge_inds([2,3])
# Add to bot_env
bot_env.append(res)
# Loop through and add ket tensors
for col in range(ncol):
# Get the ket tensor
ketten = ket[col][row].copy()
# Add ket --------------------------
envten = prev_env[2*col+1].copy()
# Unmerge physical index
if thermal:
envten.unmerge_ind(1)
envten.merge_inds([2,3])
else:
envten.unmerge_ind(1)
# Contract with ket
res = einsum('ybkx,pbkqt->yptxq',envten,ketten)
# Merge correct indices
res.merge_inds([0,1])
res.merge_inds([2,3])
# Add to bot_env
bot_env.append(res)
# Add identity ---------------------
# TODO - Make sure signs are correct (will give error in symmetric case)
D1 = ketten.shape[ketten.legs[3][0]]
Z1 = ketten.qn_sectors[ketten.legs[3][0]]
I1 = eye(D1,
Z1,
is_symmetric=ketten.is_symmetric,
backend=ketten.backend)
if len(ketten.legs[3]) > 1:
for legind in range(1,len(ketten.legs[3])):
Dli = ketten.shape[ketten.legs[3][legind]]
Zli = ketten.qn_sectors[ketten.legs[3][legind]]
Ii = eye(Dli,
Zli,
is_symmetric=braten.is_symmetric,
backend=braten.backend)
I1 = einsum('ij,IJ->iIjJ',I1,Ii)
I1.merge_inds([0,1])
I1.merge_inds([1,2])
# Contract with previous environment
res = einsum('xcw,Qq->xQcwq',prev_env[2*col+2],I1)
# Merge correct indices
res.merge_inds([0,1])
res.merge_inds([2,3])
# Add to bot_env
bot_env.append(res)
# Last, absorb right boundary mpo
res = einsum('yft,frx->yrxt',prev_env[2*ncol+1],right2)
# Merge needed inds
res.merge_inds([0,1])
# Add to bot_env
bot_env.append(res)
# Put result into an MPS ------------------
bot_env = MPS(bot_env)
# Reduce bond dimension
if truncate:
mpiprint(5,'Truncating Boundary MPS')
if DEBUG:
mpiprint(6,'Computing initial bmpo norm')
norm0 = bot_env.norm()
bot_env = bot_env.apply_svd(chi)
if DEBUG:
mpiprint(6,'Computing resulting bmpo norm')
norm1 = bot_env.norm()
mpiprint(0,'Add bra bot BMPO Canonicalization Norm Difference for chi={}: {} ({},{})'.format(chi,abs(norm0-norm1)/abs(norm0),norm0,norm1))
# return result
return bot_env
def calc_bot_envs_gen(bra,left_bmpo,right_bmpo,ket=None,chi=10):
"""
"""
Ny = len(bra[0])
# Copy bra if needed
copy_ket = False
if ket is None: copy_ket = True
elif hasattr(ket,'__len__'):
if ket[0] is None: copy_ket = True
if copy_ket:
ket = [None]*len(bra)
for i in range(len(bra)):
ketcol = [None]*len(bra[i])
for j in range(len(bra[i])):
ketcol[j] = bra[i][j].copy()
# TODO - Conjugate this ket col?
ket[i] = ketcol
# Compute the bottom environment
bot_env = [None]*Ny
for row in range(Ny):
if row == 0: prev_env = None
else: prev_env = bot_env[row-1]
bot_env[row] = update_bot_env_gen(row,
bra,
ket,
left_bmpo[2*row],
left_bmpo[2*row+1],
right_bmpo[2*row],
right_bmpo[2*row+1],
prev_env,
chi=chi)
return bot_env
def update_top_env2(row,bra,ket,left1,left2,right1,right2,prev_env,chi=10,truncate=True,contracted_env=False):
"""
Doing the following contraction:
+-----------------------------------------------+
| prev_env |
+-----------------------------------------------+
| | | | | |
a b c d e f
| | | | | |
+----+ +----+ | +----+ | +----+
| l2 |-g-| k1 |-----h--^----| k2 |-----i--^-----| r2 |
+----+ +----+ | +----+ | +----+
| | \ | | \ | |
| | \ | | \ | |
j | l | | o | q
| | \ | | \ | |
| | \ | | \ | |
+----+ | +----+ | +----+ +----+
| l1 |---r--^-------| b1 |-----^-s-----| b2 |-t-| r1 |
+----+ | +----+ | +----+ +----+
| | | | | |
| | | | | |
u k v n w x
"""
if not contracted_env:
top_env = update_top_env_gen(row,
bra,
ket,
left1,
left2,
right1,
right2,
prev_env,
chi=chi,
truncate=truncate)
else:
bra1 = bra[0][row]
bra2 = bra[1][row]
ket1 = ket[0][row]
ket2 = ket[1][row]
if prev_env is None:
# Create first top env
tmp = einsum('jga,gklhb->abjklh',left2,ket1).remove_empty_ind(0).remove_empty_ind(0)
tmp = einsum('jklh,hnoid->djklnoi',tmp,ket2).remove_empty_ind(0)
tmp = einsum('jklnoi,qif->fjklnoq',tmp,right2).remove_empty_ind(0)
tmp = einsum('jklnoq,urj->urklnoq',tmp,left1)
tmp = einsum('urklnoq,rvlsc->cukvsnoq',tmp,bra1).remove_empty_ind(0)
tmp = einsum('ukvsnoq,swote->eukvnwtq',tmp,bra2).remove_empty_ind(0)
top_env = einsum('ukvnwtq,xtq->ukvnwx',tmp,right1)
else:
tmp = einsum('jga,abcdef->jgbcdef',left2,prev_env)
tmp = einsum('jgbcdef,gklhb->jklhcdef',tmp,ket1)
tmp = einsum('jklhcdef,hnoid->jklcnoief',tmp,ket2)
tmp = einsum('jklcnoief,qif->jklcnoeq',tmp,right2)
tmp = einsum('jklcnoeq,urj->urklcnoeq',tmp,left1)
tmp = einsum('urklcnoeq,rvlsc->ukvnsoeq',tmp,bra1)
tmp = einsum('ukvnsoeq,swote->ukvnwtq',tmp,bra2)
top_env = einsum('ukvnwtq,xtq->ukvnwx',tmp,right1)
return top_env
def calc_top_envs2(bra,left_bmpo,right_bmpo,ket=None,chi=10,truncate=True,contracted_env=False):
"""
"""
# Figure out height of peps column
Ny = len(bra[0])
# Copy bra if needed
copy_ket = False
if ket is None: copy_ket = True
elif hasattr(ket,'__len__'):
if ket[0] is None: copy_ket = True
if copy_ket:
ket = [None]*len(bra)
for i in range(len(bra)):
ketcol = [None]*len(bra[i])
for j in range(len(bra[i])):
ketcol[j] = bra[i][j].copy()
# TODO - Conjugate this ket col?
ket[i] = ketcol
# Compute the bottom environment
top_env = [None]*Ny
for row in reversed(range(Ny)):
if row == Ny-1: prev_env = None
else: prev_env = top_env[row+1]
top_env[row] = update_top_env2(row,
bra,
ket,
left_bmpo[2*row],
left_bmpo[2*row+1],
right_bmpo[2*row],
right_bmpo[2*row+1],
prev_env,
chi=chi,
truncate=truncate,
contracted_env=contracted_env)
return top_env
def update_bot_env2(row,bra,ket,left1,left2,right1,right2,prev_env,chi=10,truncate=True,contracted_env=False):
"""
Doing the following contraction:
s t l v n x
| | | | | |
| | | | | |
+----+ +----+ | +----+ | +----+
| l2 |-p-| k1 |----q---^----| k2 |---r----^-----| r2 |
+----+ +----+ | +----+ | +----+
| | \ | | \ | |
| | \ | | | |
<reponame>andreacosolo/granite
#!/usr/bin/env python
#################################################################
#
# vcf_parser
# <NAME>
# Harvard Medical School
# <EMAIL>
#
#################################################################
#################################################################
#
# LIBRARIES
#
#################################################################
import sys, os
import re
import gzip
#################################################################
#
# Vcf
# -> Header
# -> Variant
#
#################################################################
class Vcf(object):
''' object to read and manipulate vcf file format '''
def __init__(self, inputfile):
''' open input vcf, read header lines and save
information as Header object to initialize Vcf object '''
self.inputfile = inputfile
self.header = self.parse_header()
#end def
class Header(object):
''' object to store vcf header information '''
def __init__(self, definitions, columns, IDs_genotypes):
''' initialize Header object '''
self.definitions = definitions
self.columns = columns
self.IDs_genotypes = IDs_genotypes
#end def
def add_tag_definition(self, tag_definition, tag_type='INFO'):
''' add tag_definition to the header on top
of the block specified by tag_type (e.g. FORMAT, INFO) '''
added_tag, new_definitions = False, ''
for line in self.definitions.split('\n')[:-1]:
if line.startswith('##' + tag_type) and not added_tag:
added_tag = True
new_definitions += tag_definition + '\n'
#end if
new_definitions += line + '\n'
#end for
self.definitions = new_definitions
#end def
def remove_tag_definition(self, tag, tag_type='INFO'):
''' remove tag definition from header,
block specified by tag_type (e.g. FORMAT, INFO) '''
new_definitions = ''
for line in self.definitions.split('\n')[:-1]:
if line.startswith('##' + tag_type + '=<ID=' + tag + ','): ##<tag_type>=<ID=<tag>,...
continue
#end if
new_definitions += line + '\n'
#end for
self.definitions = new_definitions
#end def
def get_tag_field_idx(self, tag, field, tag_type='INFO', sep='|'):
''' get idx for value field in tag from definition,
block specified by tag_type (e.g. FORMAT, INFO) '''
for line in self.definitions.split('\n')[:-1]:
if line.startswith('##' + tag_type + '=<ID=' + tag + ','):
try:
format = line.split('Format:')[1]
# Cleaning format
format = format.replace('\'', '')
format = format.replace('\"', '')
format = format.replace('>', '')
except Exception:
raise ValueError('\nERROR in VCF header structure, {0} tag definition has no format specification\n'
.format(tag))
#end try
# Search exact match
# if not exact match, search for partial match (included in field name)
for i, field_i in enumerate(format.split(sep)):
if field == field_i.strip(): return i # exact match
#end if
#end for
for i, field_i in enumerate(format.split(sep)):
if field in field_i.strip(): return i # partial match
#end if
#end for
#end if
#end for
raise ValueError('\nERROR in VCF header structure, {0} tag definition is missing\n'
.format(tag))
#end def
def check_tag_definition(self, tag, tag_type='INFO', sep='|'):
''' check if tag is standalone or field of another leading tag,
return leading tag and field index, if any, to acces requested tag '''
for line in self.definitions.split('\n')[:-1]:
if line.startswith('##' + tag_type):
if ('=<ID=' + tag + ',') in line: ##<tag_type>=<ID=<tag>,..
# tag is already a standalone tag
return tag, 0
elif tag in line and 'Format:' in line: ##<tag_type>=<ID=<lead_tag>,...,Description="... Format:<tag>">
# tag is a field, get leading tag and field index
lead_tag = line.split('=<ID=')[1].split(',')[0]
idx = self.get_tag_field_idx(lead_tag, tag, tag_type, sep)
return lead_tag, idx
#end if
#end if
#end for
raise ValueError('\nERROR in VCF header structure, {0} tag definition is missing\n'
.format(tag))
#end def
#end class Header
class Variant(object):
''' object to store information for variant in vcf format '''
def __init__(self, line_strip, IDs_genotypes):
''' initialize Variant object '''
line_split = line_strip.split('\t')
self.CHROM = line_split[0]
self.POS = int(line_split[1])
self.ID = line_split[2]
self.REF = line_split[3]
self.ALT = line_split[4]
self.QUAL = line_split[5]
self.FILTER = line_split[6]
self.INFO = line_split[7]
self.FORMAT = line_split[8]
self.IDs_genotypes = IDs_genotypes
self.GENOTYPES = {k: v for k, v in zip(IDs_genotypes, line_split[9:])}
#end def
def to_string(self):
''' variant as string rapresentation '''
genotypes_as_list = []
variant_as_string = '{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\t'.format(self.CHROM,
self.POS,
self.ID,
self.REF,
self.ALT,
self.QUAL,
self.FILTER,
self.INFO,
self.FORMAT)
for IDs_genotype in self.IDs_genotypes:
genotypes_as_list.append(self.GENOTYPES[IDs_genotype])
#end for
return variant_as_string + '\t'.join(genotypes_as_list) + '\n'
#end def
def repr(self):
''' variant representation as CHROM:POSREF>ALT'''
return '{0}:{1}{2}>{3}'.format(self.CHROM,
self.POS,
self.REF,
self.ALT)
#end def
def remove_tag_genotype(self, tag_to_remove, sep=':'):
''' remove tag field from FORMAT and GENOTYPES '''
idx_tag_to_remove, new_format = -1, []
# Removing tag field from FORMAT
for i, tag in enumerate(self.FORMAT.split(sep)):
if tag_to_remove == tag:
idx_tag_to_remove = i
else:
new_format.append(tag)
#end if
#end for
# Error if tag_to_remove not found in FORMAT
if idx_tag_to_remove == -1:
raise ValueError('\nERROR in variant FORMAT field, {0} tag is missing\n'
.format(tag_to_remove))
#end if
# Updating FORMAT
self.FORMAT = sep.join(new_format)
# Removing tag field from GENOTYPES
for ID_genotype, genotype in self.GENOTYPES.items():
genotype_as_list = genotype.split(sep)
try:
del genotype_as_list[idx_tag_to_remove]
except Exception: # del will fail for trailing fields that are dropped
# field to remove is missing already
pass
#end try
self.GENOTYPES[ID_genotype] = sep.join(genotype_as_list)
#end for
#end def
def complete_genotype(self, sep=':'):
''' fill the trailing fields dropped in GENOTYPES,
based on FORMAT structure '''
len_FORMAT = len(self.FORMAT.split(sep))
for ID_genotype, genotype in self.GENOTYPES.items():
genotype_as_list = genotype.split(sep)
for i in range(len_FORMAT - len(genotype_as_list)):
genotype_as_list.append('.')
#end for
self.GENOTYPES[ID_genotype] = sep.join(genotype_as_list)
#end for
#end def
def empty_genotype(self, sep=':'):
''' return a empty genotype based on FORMAT structure '''
len_FORMAT = len(self.FORMAT.split(sep))
return './.' + (sep + '.') * (len_FORMAT - 1)
#end def
def remove_tag_info(self, tag_to_remove, sep=';'):
''' remove tag field from INFO '''
new_INFO = []
for tag in self.INFO.split(sep):
if tag.startswith(tag_to_remove + '='):
continue
#end if
new_INFO.append(tag)
#end for
self.INFO = sep.join(new_INFO)
#end def
def add_tag_format(self, tag_to_add, sep=':'):
''' add tag field to FORMAT '''
self.FORMAT += sep + tag_to_add
#end def
def add_values_genotype(self, ID_genotype, values, sep=':'):
''' add values field to genotype specified by corresponding ID '''
self.GENOTYPES[ID_genotype] += sep + values
#end def
def add_tag_info(self, tag_to_add, sep=';'):
''' add tag field and value (tag_to_add) to INFO '''
# tag_to_add -> tag=<value>
if self.INFO.endswith(sep): # if INFO ending is wrongly formatted
self.INFO += tag_to_add
else:
self.INFO += sep + tag_to_add
#end if
#end def
def get_tag_value(self, tag_to_get, sep=';'):
''' get value from tag (tag_to_get) in INFO '''
for tag in self.INFO.split(sep):
if tag.startswith(tag_to_get + '='):
try:
return tag.split(tag_to_get + '=')[1]
except Exception: # tag field is in a wrong format
raise ValueError('\nERROR in variant INFO field, {0} tag is in the wrong format\n'
.format(tag_to_get))
#end try
#end if
#end for
# tag_to_get not found
raise ValueError('\nERROR in variant INFO field, {0} tag is missing\n'.format(tag_to_get))
#end def
def get_genotype_value(self, ID_genotype, tag_to_get, sep=':'):
''' get value from tag (tag_to_get) in genotype specified by corresponding ID '''
# Get index from FORMAT
idx_tag_to_get = -1
for i, tag in enumerate(self.FORMAT.split(sep)):
if tag_to_get == tag:
idx_tag_to_get = i
break
#end if
#end for
# Error if tag_to_get not found in FORMAT
if idx_tag_to_get == -1:
raise ValueError('\nERROR in variant FORMAT field, {0} tag is missing\n'
.format(tag_to_get))
#end if
# Get value from index in genotype by ID
try:
return self.GENOTYPES[ID_genotype].split(sep)[idx_tag_to_get]
except Exception:
raise ValueError('\nERROR in GENOTYPES identifiers, {0} identifier is missing in VCF\n'
.format(ID_genotype))
#end try
#end def
#end class Variant
@staticmethod
def read_vcf(inputfile):
''' read vcf file, gzipped or ungzipped,
return a generator '''
if inputfile.endswith('.gz'):
with gzip.open(inputfile, 'rb') as fz:
for byteline in fz:
yield byteline.decode()
#end for
#end with
else:
with open(inputfile) as fi:
for line in fi:
yield line
#end for
#end with
#end if
#end def
def parse_header(self):
''' read header and save information as Header object '''
definitions, columns, IDs_genotypes = '', '', ''
for line in self.read_vcf(self.inputfile):
if line.startswith('#'): # reading a header line
line_strip = line.rstrip()
if line_strip.startswith('##'): # header definition line
definitions += line_strip + '\n'
elif line_strip.startswith('#CHROM'): # header columns line
columns += line_strip + '\n'
IDs_genotypes = line_strip.split('\t')[9:]
#end if
else: # finished to read the header
break # exit and close buffer
#end if
#end for
# Checking header is correct
if definitions and columns and IDs_genotypes:
return self.Header(definitions, columns, IDs_genotypes)
else:
raise ValueError('\nERROR in VCF header structure, missing essential lines\n')
#end if
#end def
def parse_variants(self): # generator
''' return a generator to variants stored as Variant objects '''
for line in self.read_vcf(self.inputfile):
if not line.startswith('#'):
line_strip = line.rstrip()
if line_strip:
try:
yield self.Variant(line_strip, self.header.IDs_genotypes)
except Exception:
raise ValueError('\nERROR in variant VCF structure, missing essential columns\n')
#end try
#end if
#end if
#end for
#end def
def write_definitions(self, outputfile_obj):
''' write header definitions to outputfile_obj buffer '''
outputfile_obj.write(self.header.definitions)
#end def
def write_columns(self, outputfile_obj):
''' | |
import keyboard
import time
import sys
import random
from utils import clear
from utils import GoBack
from utils import User
from rich import print
from rich.console import Console
from rich.align import Align
from rich.prompt import Prompt
from rich.panel import Panel
from rich.layout import Layout
from rich.text import Text
from rendering import render_chat_rooms, render_menu_screen
"""
'id, password, size, stype = create_box_tui(User, sessions_data)'
To run the create_box_tui - the variables can be changed (i.e. to a list), by
returning a different value at lines 322 and 352. To be called on main.py, hover_on = 1
'join_box_tui(User, sessions_data)'
To run the join_box_tui - no variables returned, as the session id's, from sessions_data
is used to print and select a valid session. May need to be edited to return session_id if needed.
To be called on main.py, hover_on = 2
"""
console = Console()
"""
This will be passed into the join_box_tui() function when called. List(?) to be filled
from the database. After database is created, it needs to be implemented into this code.
currently a temp variable
"""
sessions_data = [[1234]]
global layout, colours, private_box_art, public_box_art
colour = {
"box_default": "bold white",
"box_hover": "bold yellow"
}
public_box_art = ("""
+--+
| |
| |
| |
| |
| |
+-+ +-+
+-----\ /-------+
/| \ / /|
/ | \/ / |
+--+---------------+ |
| | |
| PUBLIC BOX | +
| | /
+------------------+/
""")
private_box_art = ("""
.------.
/ .----. \
_| |____| |_
.' ____ '.
| / \ |
| \____/ |
'.____________.'
+------------------+
/ /|
/ / |
+--+---------------+ |
| | |
| PRIVATE BOX | +
| | /
+------------------+/
""")
# Splitting the console to place the boxes side by side
layout = Layout()
layout.visible = True
layout.split_column(
Layout(name="title"),
Layout(name="boxes"),
Layout(name="screen")
)
layout["title"].size = 5
layout["boxes"].size = 16
layout["screen"].size = 5
layout["boxes"].split_row(
Layout(name="left"),
Layout(name="right")
)
"""
temp values for testing
sessions_data = [
{
"room_name": "1234",
"private" : True,
"password" : "<PASSWORD>",
"capacity" : "1",
"room_owner" : "bob"
},
{
"room_name": "2222",
"private" : True,
"password" : "<PASSWORD>",
"capacity" : "1",
"room_owner" : "lob"
},
{
"room_name": "3333",
"private" : True,
"password" : "<PASSWORD>",
"capacity" : "1",
"room_owner" : "tob"
},
{
"room_name": "4444",
"private" : False,
"password" : None,
"capacity" : "1",
"room_owner" : "mob"
},
{
"room_name": "5555",
"private" : False,
"password" : None,
"capacity" : "1",
"room_owner" : "cob"
},
]
"""
def is_already_an_id(sessions_data, session_id, server_type="all"):
if not server_type:
for elem in sessions_data:
if not elem["private"]:
if elem["room_name"] == session_id:
return True
elif server_type == "all":
for elem in sessions_data:
if elem["room_name"] == session_id:
return True
else:
for elem in sessions_data:
if elem["private"]:
if elem["room_name"] == session_id:
return True
return False
def is_correct_password(sessions_data, password, session_id):
for elem in sessions_data:
if elem["room_name"] == session_id:
if elem["password"] == password:
return True
return False
def layout_setup(select: str, text: str): # DONE
"""
Creates menu layout, with ASCII box art and navigation help text.
"""
global layout, private_box_art, public_box_art, colour
clear()
layout["title"].update(Align(text, align="center"))
layout["screen"].update(
"\n\n[bold white]! Use [bold magenta] right arrow[/] or [bold magenta]left arrow[/] to select a box.[/]"
+ "\n[bold white]! [bold magenta]ENTER[/] to confirm your choice.[/]"
+ "\n[bold white]! Press [bold magenta]BACKSPACE[/] to return to Menu.[/]")
if select == "left":
layout["right"].update(Align(Text(f"{private_box_art}", style=colour["box_default"]), align="center"))
layout["left"].update(Align(Text(f"{public_box_art}", style=colour["box_hover"]), align="center"))
else:
layout["right"].update(Align(Text(f"{private_box_art}", style=colour["box_hover"]), align="center"))
layout["left"].update(Align(Text(f"{public_box_art}", style=colour["box_default"]), align="center"))
console.print(layout)
def tui_navigation(select: str, sessions_data): # DONE
"""
Returns which box is selected.
"""
# Adds delay between key presses
time.sleep(0.2)
while True:
if keyboard.is_pressed("backspace"):
raise GoBack
if keyboard.is_pressed("left_arrow"):
if select != "left":
select = "left"
return select
continue
if keyboard.is_pressed("right_arrow"):
if select != "right":
select = "right"
return select
continue
if keyboard.is_pressed("enter"):
if select == "left":
select = "public"
return select
elif select == "right":
select = "private"
return select
def create_session_id(sessions_data): # DONE
"""
Adds leading 0's if necessary and returns a unique session_id,
by comparing it to taken id's.
"""
num: int = random.randint(1, 1000)
session_id: str = ("0" * (4 - len(str(num)))) + str(num)
if is_already_an_id(sessions_data, session_id):
return create_session_id(sessions_data)
return str(session_id)
def enter_session_id(prompt: str, alignment: str, password_prompt, sessions_data):
"""
Prompts user to enter a valid session ID
"""
given_id = console.input(prompt)
if given_id.lower() == "back":
return join_box_tui(User, sessions_data)
elif (len(given_id) == 4) and (given_id.isdigit() is True):
if is_already_an_id(sessions_data, given_id, password_prompt):
if password_prompt:
enter_password(sessions_data, password_prompt, given_id)
else:
console.print(Align("\n✔️ Joining ThaBox...", align=alignment))
time.sleep(0.5)
clear()
return given_id
else:
console.print(Align("❌ The room you are trying to join doesn't exist\n", align=alignment))
time.sleep(0.5)
return enter_session_id(prompt, alignment, password_prompt, sessions_data)
else:
console.print(Align("❌ Session ID's can only be 4 digit numbers!\n", align=alignment))
time.sleep(0.5)
return enter_session_id(prompt, alignment, password_prompt, sessions_data)
def enter_password(sessions_data, prompt, session_id):
"""
Checks if entered password is correct.
"""
time.sleep(0.3) # Input delay
given_password = console.input(prompt)
if is_correct_password(sessions_data, given_password, session_id):
console.print(Align("\n✔️ Joining ThaBox...", align="center"))
time.sleep(1)
clear()
return session_id, given_password
else:
console.print(Align("❌ Incorrect Password!", align="center"))
return enter_password(sessions_data, prompt, session_id)
def enter_room_size(user): # DONE
"""
Validates the user input, so they enter a correct size, and
returns the room_size.
"""
size = console.input(" " * ((console.width // 2) - 14) + "[bold red]Enter room size (2 - 6): [/]")
if size.lower() == "back":
return create_box_tui(user, sessions_data)
if size == "2" or size == "3" or size == "4" or size == "5" or size == "6":
return str(size)
else:
console.print(Align("❌ Enter a valid number!\n", align="center"))
return enter_room_size(user)
def join_box_tui(user: User, sessions_data, select="left"):
if select == "public":
"""
PLAN - print only 4 session at once. Users can navigate this using arrow keys.
They type the session id of the room they wan to join, and then they join the room.
DATA NEEDED - public session id, max user count for the room, current amount of users
in the room.
"""
clear()
console.print(Align("\n█▀█ █ █ █▄▄ █ █ █▀▀ █▄▄ █▀█ ▀▄▀ █▀▀ █▀\n"
+ "█▀▀ █▄█ █▄█ █▄▄ █ █▄▄ █▄█ █▄█ █ █ ██▄ ▄█\n",
align="center"),
style=user.preferences.preference_dict['Border Colour'])
for elem in sessions_data:
if not elem["private"]:
console.print(Align(Panel("Session ID = " + elem["room_name"]
+ "\nOwner = " + elem["room_owner"]
+ "\nCapacity = " + elem["capacity"], expand=False), align="center"),
style="bold white")
console.print(
"\n\n[bold white]! Use [bold magenta] right arrow[/] or [bold magenta]left arrow[/] to find more rooms.[/]\n"
+ "[bold white]! [bold magenta]SPACE[/] to type in a session ID.[/]\n"
+ "[bold white]! Press [bold magenta]BACKSPACE[/] to go back.[/]")
time.sleep(0.2)
while True:
if keyboard.is_pressed("backspace"):
select = "left"
return join_box_tui(user, sessions_data, select)
if keyboard.is_pressed("space"):
console.print()
chatroom_name = enter_session_id(
"[bold red]> Enter the session ID or type 'BACK' to go back:[/] ", "left", False, sessions_data)
return chatroom_name
"""
user needs to be redirected to another chat room
"""
elif select == "private":
clear()
console.print(Align("\n█▀█ █▀█ █ █ █ ▄▀█ ▀█▀ █▀▀ █▄▄ █▀█ ▀▄▀\n"
+ "█▀▀ █▀▄ █ ▀▄▀ █▀█ █ ██▄ █▄█ █▄█ █ █\n", align="center"),
style=user.preferences.preference_dict["Border Colour"])
console.print(Align("\nType [bold magenta]BACK[/] in the Session ID field to go back.\n", align="center"))
chatroom_name, room_password = enter_session_id((" " * ((console.width // 2) - 14) + "[bold red]Enter the session ID:[/] "), "center",
(str("\n" + " " * ((console.width // 2) - 17) + "[bold red]Enter the room password:[/] ")), sessions_data)
return chatroom_name
"""
user needs to be redirected to the chat room
"""
else:
"""
JOIN BOX MENU
"""
layout_setup(select,
f"[{user.preferences.preference_dict['Border Colour']}]\n\n █ █▀█ █ █▄ █ █▄▄ █▀█ ▀▄▀\n█▄█ █▄█ █ █ ▀█ █▄█ █▄█ █ █[/]") # select, "JOIN BOX"
select = tui_navigation(select, sessions_data)
join_box_tui(user, sessions_data, select)
def create_box_tui(user: User, sessions_data, select="left"):
"""
Allows user to create thier own box - can set the room_size
and add a password, which are both returned along with a random,
unique session id.
"""
if select == "public":
clear()
public_session_id = create_session_id(sessions_data)
console.print(Align("\n█▄▄ █▀█ ▀▄▀ █▀ █▀▀ ▀█▀ ▀█▀ █ █▄ █ █▀▀ █▀\n"
+ "█▄█ █▄█ █ █ ▄█ ██▄ █ █ █ █ ▀█ █▄█ ▄█\n", align="center"),
style=user.preferences.preference_dict['Border Colour'])
console.print(Align("Type [bold magenta]BACK[/] in any of the fields to go back.\n", align="center"))
console.print(Align(f"[bold red]Session ID:[/] {public_session_id}", align="center"))
room_size = enter_room_size(user)
console.print(Align("\n✔️ Creating ThaBox...", align="center"))
time.sleep(0.5)
is_private = False
password = None
return "create", user, public_session_id, password, room_size, is_private
elif select == "private":
clear()
private_session_id = create_session_id(sessions_data)
console.print(Align("\n█▄▄ █▀█ ▀▄▀ █▀ █▀▀ ▀█▀ ▀█▀ █ █▄ █ █▀▀ █▀\n"
+ "█▄█ █▄█ █ █ ▄█ ██▄ █ █ █ █ ▀█ █▄█ ▄█\n", align="center"),
style=user.preferences.preference_dict['Border Colour'])
console.print(Align("Type [bold magenta]BACK[/] in any of the fields to go back.\n", align="center"))
console.print(Align(f"[bold red]Session ID:[/] {private_session_id}", align="center"))
while True:
time.sleep(0.2)
password = console.input(" " * ((console.width // 2) - 12) + "[bold red]Create a Password: [/]")
if password.lower() == "back":
return create_box_tui(user, sessions_data)
elif password == "":
console.print(Align("❌ Password can't be blank!\n", align="center"))
elif " " in password:
console.print(Align("❌ Password can't have | |
# pylint: disable=global-statement,redefined-outer-name
import argparse
import csv
import glob
import json
import os
import yaml
from flask import Flask, jsonify, redirect, render_template, send_from_directory
from flask_frozen import Freezer
from flaskext.markdown import Markdown
site_data = {}
by_uid = {}
archive_path_root = "archive"
archive_data_exists = False
archive_directories = []
def main(site_data_path):
global site_data, extra_files, archive_path_root, archive_data_exists, archive_directories
extra_files = ["README.md"]
# Load all for your sitedata one time.
for f in glob.glob(site_data_path + "/*"):
extra_files.append(f)
name, typ = f.split("/")[-1].split(".")
if typ == "json":
site_data[name] = json.load(open(f))
elif typ in {"csv", "tsv"}:
site_data[name] = list(csv.DictReader(open(f)))
elif typ == "yml":
site_data[name] = yaml.load(open(f).read(), Loader=yaml.SafeLoader)
for typ in ["papers", "speakers", "tutorials", "proceedings", "workshops", "sponsors", "symposiums"]:
by_uid[typ] = {}
for p in site_data[typ]:
by_uid[typ][p["UID"]] = p
print("Current Data Successfully Loaded")
# check if archive data directory exists
archive_path_sitedata = archive_path_root + "/sitedata"
archive_dir_exists = archive_directory_check(archive_path_sitedata)
if archive_dir_exists:
archive_directories = os.listdir(archive_path_sitedata)
site_data[archive_path_root] = {}
archive_root_dict = {}
archive_year_summary_dict = {}
archive_dict = {}
by_uid_archive_path_root_dict = {}
if len(archive_directories) > 0:
for archive_year in archive_directories:
archive_path = archive_path_sitedata + "/" + str(archive_year)
archive_data_types = []
# check if the archive year has data
if os.path.isdir(archive_path):
if not os.listdir(archive_path):
print(str(archive_path) + " directory is empty")
else:
# Load all archive data
if not hasattr(archive_dict, archive_year):
archive_dict.update({str(archive_year): {}})
archive_year_summary_dict.update({str(archive_year): False})
for f in glob.glob(archive_path + "/*"):
extra_files.append(f)
name, typ = f.split("/")[-1].split(".")
if typ == "json":
archive_dict[archive_year][name] = json.load(open(f))
archive_data_types.append(name)
elif typ in {"csv", "tsv"}:
archive_dict[archive_year][name] = list(csv.DictReader(open(f)))
archive_data_types.append(name)
elif typ == "yml":
archive_dict[archive_year][name] = yaml.load(open(f).read(), Loader=yaml.SafeLoader)
archive_data_types.append(name)
elif typ == "md" and name == "highlights":
archive_year_summary_dict.update({str(archive_year): True})
archive_dict[archive_year][name] = open(f"./{archive_path_root}/sitedata/{archive_year}/{name}.md").read()
if len(archive_data_types) > 0:
archive_data_exists = True
by_uid_archive_year_type = {}
if not hasattr(by_uid_archive_path_root_dict, archive_year):
by_uid_archive_path_root_dict.update({str(archive_year): {}})
# list of archived site data file names
for typ in archive_data_types:
by_uid_archive_year_type_data = {}
for p in archive_dict[archive_year][typ]:
by_uid_archive_year_type_data.update({str(p["UID"]): p})
by_uid_archive_year_type.update({str(typ):by_uid_archive_year_type_data})
by_uid_archive_path_root_dict[archive_year] = by_uid_archive_year_type
by_uid.update({str(archive_path_root): by_uid_archive_path_root_dict})
archive_root_dict.update(archive_dict)
site_data["archive"] = archive_root_dict
site_data["archive"]["years_list"] = archive_directories
site_data["archive"]["has_data"] = archive_data_exists
site_data["archive"]["has_summary"] = archive_year_summary_dict
print("Archive Data Successfully Loaded")
return extra_files
# ------------- SERVER CODE -------------------->
app = Flask(__name__)
app.config.from_object(__name__)
freezer = Freezer(app)
markdown = Markdown(app)
# MAIN PAGES
def _data():
data = {}
data["config"] = site_data["config"]
data["archive"] = site_data["archive"]
data["sponsors"] = site_data["sponsors"]
return data
@app.route("/")
def index():
return redirect("/index.html")
@app.route("/favicon.ico")
def favicon():
return send_from_directory(site_data_path, "favicon.ico")
# REDIRECTS TO SUPPORT EARLIER LINKS
@app.route("/registration")
def registration():
return redirect("/register.html", code=302)
@app.route("/agenda")
def agenda():
return redirect("/calendar.html", code=302)
@app.route("/keynote")
def keynote():
return redirect("/calendar.html", code=302)
@app.route("/toc")
def toc():
return redirect("/papers.html", code=302)
@app.route("/acm-chil-track-1-cfp")
def track1():
return redirect("/call-for-papers.html", code=302)
@app.route("/acm-chil-track-2-cfp")
def track2():
return redirect("/call-for-papers.html", code=302)
@app.route("/acm-chil-track-3-cfp")
def track3():
return redirect("/call-for-papers.html", code=302)
@app.route("/acm-chil-track-4-cfp")
def track4():
return redirect("/call-for-papers.html", code=302)
@app.route("/call-for-tutorials")
def call_tutorials():
return redirect("/call-for-papers.html", code=302)
@app.route("/doctoral-consortium-call-for-phd-students")
def call_doctoral():
return redirect("/call-for-papers.html", code=302)
@app.route("/financial-support")
def financial_support():
return redirect("/sponsor.html", code=302)
@app.route("/acm-chil-2020-sponsorship-policy")
def sponsorship_policy():
return redirect("/sponsor.html", code=302)
@app.route("/organizing-committees")
def organizing_committee():
return redirect("/committee.html", code=302)
@app.route("/reviewers")
def reviewers():
return redirect("/committee.html#tab-reviewers", code=302)
@app.route("/faqs")
def faqs():
return redirect("/help.html", code=302)
# TOP LEVEL PAGES
@app.route("/index.html")
def home():
data = _data()
data["index"] = open("./templates/content/index.md").read()
data["committee"] = site_data["committee"]["committee"]
return render_template("index.html", **data)
@app.route("/help.html")
def about():
data = _data()
data["FAQ"] = site_data["faq"]["FAQ"]
return render_template("help.html", **data)
@app.route("/papers.html")
def papers():
data = _data()
data["papers"] = site_data["papers"]
return render_template("papers.html", **data)
@app.route("/paper_vis.html")
def paper_vis():
data = _data()
return render_template("papers_vis.html", **data)
@app.route("/calendar.html")
def schedule():
data = _data()
data["day"] = {
"speakers": site_data["speakers"],
"highlighted": [
format_paper(by_uid["papers"][h["UID"]]) for h in site_data["highlighted"]
],
}
data["speakers"] = site_data["speakers"]
data["tutorials"] = [
format_workshop(tutorial) for tutorial in site_data["tutorials"]
]
data["proceedings"] = [
format_workshop(proceeding) for proceeding in site_data["proceedings"]
]
data["workshops"] = [
format_workshop(workshop) for workshop in site_data["workshops"]
]
data["schedule"] = {
"thursday": site_data['schedule']['thursday'],
"friday": site_data['schedule']['friday']
}
data["schedule_content"] = open("./templates/content/schedule.md").read()
return render_template("schedule.html", **data)
@app.route("/program.html")
def program():
data = _data()
data["speakers"] = site_data["speakers"]
data["tutorials"] = [
format_workshop(tutorial) for tutorial in site_data["tutorials"]
]
data["proceedings"] = [
format_workshop(proceeding) for proceeding in site_data["proceedings"]
]
data["workshops"] = [
format_workshop(workshop) for workshop in site_data["workshops"]
]
return render_template("program.html", **data)
@app.route("/proceedings.html")
def proceedings():
data = _data()
data["proceedings"] = [
format_workshop(proceeding) for proceeding in site_data["proceedings"]
]
return render_template("proceedings.html", **data)
@app.route("/symposiums.html")
def symposiums():
data = _data()
data["symposiums"] = [
format_workshop(symposium) for symposium in site_data["symposiums"]
]
return render_template("symposiums.html", **data)
@app.route("/workshops.html")
def workshops():
data = _data()
data["workshops"] = [
format_workshop(workshop) for workshop in site_data["workshops"]
]
return render_template("workshops.html", **data)
@app.route("/register.html")
def register():
data = _data()
data["register"] = open("./templates/content/register.md").read()
return render_template("register.html", **data)
@app.route("/sponsor.html")
def sponsor():
data = _data()
data["sponsor"] = open("./templates/content/sponsor.md").read()
return render_template("sponsor.html", **data)
@app.route("/call-for-papers.html")
def call_for_papers():
data = _data()
data["call_for_papers"] = open("./templates/content/call-for-papers.md").read()
data["call_for_papers_author_info"] = open("./templates/content/call-for-papers-author-info.md").read()
data["call_for_papers_track_1"] = open("./templates/content/call-for-papers-track-1.md").read()
data["call_for_papers_track_2"] = open("./templates/content/call-for-papers-track-2.md").read()
data["call_for_papers_track_3"] = open("./templates/content/call-for-papers-track-3.md").read()
return render_template("call-for-papers.html", **data)
@app.route("/committee.html")
def committee():
data = _data()
data["committee"] = open("./templates/content/committee.md").read()
data["committee_governing_board"] = open(
"./templates/content/committee-governing-board.md"
).read()
data["committee_steering_committee"] = open(
"./templates/content/committee-steering-committee.md"
).read()
return render_template("committee.html", **data)
@app.route("/live.html")
def live():
data = _data()
data["live"] = open("./templates/content/live.md").read()
return render_template("live.html", **data)
@app.route("/<year>/<template>.html")
def archive(year, template):
global archive_path_root
data = _data()
data["isArchive"] = True
data["archive_year"] = year
if ((year in site_data[archive_path_root]) and (template in site_data[archive_path_root][year])):
if template == "speakers":
data[template] = site_data[archive_path_root][year][template]
return render_template(f"past-events-{template}.html", **data)
elif template == "proceedings":
data[template] = [
format_workshop(proceeding) for proceeding in site_data[archive_path_root][year][template]
]
return render_template(f"past-events-{template}.html", **data)
elif template == "symposiums":
data[template] = [
format_workshop(symposium) for symposium in site_data[archive_path_root][year][template]
]
return render_template(f"past-events-{template}.html", **data)
elif template == "workshops":
data[template] = [
format_workshop(workshop) for workshop in site_data[archive_path_root][year][template]
]
return render_template(f"past-events-{template}.html", **data)
elif template == "tutorials":
data[template] = [
format_workshop(tutorial) for tutorial in site_data[archive_path_root][year][template]
]
return render_template(f"past-events-{template}.html", **data)
elif template == "highlights":
data[template] = site_data[archive_path_root][year][template]
return render_template(f"past-events-{template}.html", **data)
else:
error = {
"title": "Oops!",
"type": "routing",
"message": f"No archive data for {template} in {year}"
}
data["error"] = error
return render_template("error.html", **data)
def archive_directory_check(dir_path):
return True if os.path.exists(dir_path) and os.path.isdir(dir_path) else False
def extract_list_field(v, key):
value = v.get(key, "")
if isinstance(value, list):
return value
else:
return value.split("|")
def format_paper(v):
list_keys = ["authors", "keywords", "sessions"]
list_fields = {}
for key in list_keys:
list_fields[key] = extract_list_field(v, key)
return {
"UID": v["UID"],
"title": v["title"],
"forum": v["UID"],
"authors": list_fields["authors"],
"keywords": list_fields["keywords"],
"abstract": v["abstract"],
"TLDR": v["abstract"],
"recs": [],
"sessions": list_fields["sessions"],
# links to external content per poster
"pdf_url": v.get("pdf_url", ""), # render poster from this PDF
"code_link": "https://github.com/Mini-Conf/Mini-Conf", # link to code
"link": "https://arxiv.org/abs/2007.12238", # link to paper
}
def format_workshop(v):
list_keys = ["authors"]
list_fields = {}
for key in list_keys:
list_fields[key] = extract_list_field(v, key)
formatted_workshop = {
"id": v["UID"],
"title": v["title"],
"organizers": list_fields["authors"],
"abstract": v["abstract"],
}
if "bio" in v:
formatted_workshop["bio"] = v["bio"]
if "slideslive_id" in v:
formatted_workshop["slideslive_id"] = v["slideslive_id"]
if "slideslive_active_date" in v:
formatted_workshop["slideslive_active_date"] = v["slideslive_active_date"]
if "rocketchat_id" in v:
formatted_workshop["rocketchat_id"] = v["rocketchat_id"]
if "doi_link" in v:
formatted_workshop["doi_link"] = v["doi_link"]
return formatted_workshop
# ITEM PAGES
@app.route("/poster_<poster>.html")
def poster(poster):
uid = poster
v = by_uid["papers"][uid]
data = _data()
data["paper"] = format_paper(v)
return render_template("poster.html", **data)
@app.route("/speaker_<speaker>.html")
def speaker(speaker):
uid = speaker
v = by_uid["speakers"][uid]
data = _data()
data["speaker"] = v
data["by_uid"] = by_uid
return render_template("speaker.html", **data)
@app.route("/<year>/speaker_<speaker>.html")
def past_speaker(year, speaker):
uid = speaker
v = by_uid["archive"][year]["speakers"][uid]
data = _data()
data["speaker"] = v
data["year"] = year
data["isArchive"] = True
data["by_uid"] = by_uid
return render_template("speaker.html", **data)
@app.route("/workshop_<workshop>.html")
def workshop(workshop):
uid = workshop
v = by_uid["workshops"][uid]
data = _data()
data["workshop"] = format_workshop(v)
return render_template("workshop.html", **data)
@app.route("/<year>/workshop_<workshop>.html")
def past_workshop(year, workshop):
uid = workshop
v = by_uid["archive"][year]["workshops"][uid]
data = _data()
data["year"] = year
data["isArchive"] = True
data["workshop"] = format_workshop(v)
return render_template("workshop.html", **data)
@app.route("/tutorial_<tutorial>.html")
def tutorial(tutorial):
uid = tutorial
v = by_uid["tutorials"][uid]
data = _data()
data["tutorial"] = format_workshop(v)
return render_template("tutorial.html", **data)
@app.route("/<year>/tutorial_<tutorial>.html")
def past_tutorial(year,tutorial):
uid = tutorial
v = by_uid["archive"][year]["tutorials"][uid]
data = _data()
data["year"] = year
data["isArchive"] = True
data["tutorial"] = format_workshop(v)
return render_template("tutorial.html", **data)
@app.route("/proceeding_<proceeding>.html")
def proceeding(proceeding):
uid = proceeding
v = by_uid["proceedings"][uid]
data = _data()
data["proceeding"] = format_workshop(v)
return render_template("proceeding.html", **data)
@app.route("/<year>/proceeding_<proceeding>.html")
def past_proceeding(year, proceeding):
uid = proceeding
v = by_uid["archive"][year]["proceedings"][uid]
data = _data()
data["year"] = year
data["isArchive"] = True
data["proceeding"] = format_workshop(v)
return render_template("proceeding.html", **data)
@app.route("/symposium_<symposium>.html")
def symposium(symposium):
uid = symposium
v = by_uid["symposiums"][uid]
data = _data()
data["symposium"] = format_workshop(v)
return render_template("symposium.html", **data)
@app.route("/<year>/symposium_<symposium>.html")
def past_symposium(year, symposium):
uid = symposium
v = by_uid["archive"][year]["symposiums"][uid]
data = _data()
data["year"] = year
data["isArchive"] = True
data["symposium"] = format_workshop(v)
return render_template("symposium.html", **data)
@app.route("/chat.html")
def chat():
data = _data()
return render_template("chat.html", **data)
# FRONT END SERVING
@app.route("/papers.json")
def paper_json():
json = []
for v in site_data["papers"]:
json.append(format_paper(v))
return jsonify(json)
@app.route("/static/<path:path>")
def send_static(path):
return send_from_directory("static", path)
@app.route("/serve_<path>.json")
def serve(path):
return jsonify(site_data[path])
# --------------- DRIVER CODE -------------------------->
# Code to turn it all static
@freezer.register_generator
def generator():
for paper in site_data["papers"]:
yield "poster", {"poster": str(paper["UID"])}
for speaker in site_data["speakers"]:
yield "speaker", {"speaker": str(speaker["UID"])}
for tutorial in site_data["tutorials"]:
yield "tutorial", {"tutorial": str(tutorial["UID"])}
for proceeding in site_data["proceedings"]:
yield "proceeding", {"proceeding": str(proceeding["UID"])}
for symposium in site_data["symposiums"]:
yield "symposium", {"symposium": str(symposium["UID"])}
for workshop in site_data["workshops"]:
yield "workshop", {"workshop": str(workshop["UID"])}
for year in site_data["archive"]["years_list"]:
if site_data["archive"]["has_summary"][year] is True:
yield f"/{year}/highlights.html"
for typ in site_data["archive"][year]:
if not typ == "highlights":
yield "archive", {"year": year, "template": typ}
routeName = | |
can\'t figure out how to log proper error codes, Ask him why !song isnt working')
#from JPARKZ
elif cmd == "rl":
if cmdargs == []:
self.post_message(usr + ', Does your carer know you are on the internet right now?')
else:
self.post_message(cmdargs[0] + ', Does your carer know you are on the internet right now?')
elif cmd == 'case':
self.open_csgo_case(usr)
elif cmd == 'csgostats':
steam_id = None
if cmdargs == []:
self.post_message("!csgostats <category> - Categories are KD, WL, LastMatch, Rifle, Pistol, SMG, Shotgun, Maps, Knife, 1337Boi, Nades, BrassandLead, Bomb")
else:
stats_category = cmdargs[0].lower()
if stats_category == 'kd':
try:
kills, deaths, timeplayed, kdratio = csgo_stats_kd(steam_id)
self.post_message ('Total Kills: ' + kills)
self.post_message ('Total Deaths: ' + deaths)
self.post_message ('KDR: ' + kdratio)
self.post_message ('Time Played: ' + timeplayed + ' hours')
except ValueError:
self.post_message('API Error - Try changing your steam privacy settings')
elif stats_category == 'wl':
try:
total_matches_won, total_matches_played, total_wins_pistolround, total_rounds_played = csgo_stats_wl(steam_id)
self.post_message ('Win/Loss Stats:')
self.post_message ('Matches won: ' + total_matches_won)
self.post_message ('Matches played: ' + total_matches_played)
self.post_message ('Pistol round wins: ' + total_wins_pistolround)
self.post_message ('Total rounds played: ' + total_rounds_played)
except ValueError:
self.post_message('API Error - Try changing your steam privacy settings')
elif stats_category == 'lastmatch':
try:
lm_kills, lm_deaths, lm_damge, lm_money_spent = csgo_stats_lastmatch(steam_id)
self.post_message ('Stats from the last match:')
self.post_message ('Kills: ' + lm_kills)
self.post_message ('Deaths: ' + lm_deaths)
self.post_message ('Damage Dealt: ' + lm_damge)
self.post_message ('Money Spent: $' + lm_money_spent)
except ValueError:
self.post_message('API Error - Try changing your steam privacy settings')
elif stats_category == 'rifle':
try:
total_kills_m4a1, total_kills_ak47, total_kills_awp, total_kills_aug, total_kills_sg556 = csgo_stats_rifle(steam_id)
self.post_message ('Rifle Kills:')
self.post_message ('M4A1: ' + total_kills_m4a1)
self.post_message ('AK47: ' + total_kills_ak47)
self.post_message ('AWP: ' + total_kills_awp)
self.post_message ('AUG: ' + total_kills_aug)
self.post_message ('SG556: ' + total_kills_sg556)
except ValueError:
self.post_message('API Error - Try changing your steam privacy settings')
elif stats_category == 'pistol':
try:
total_kills_glock, total_kills_hkp2000, total_kills_deagle, total_kills_fiveseven, total_kills_p250, total_kills_tec9 = csgo_stats_pistol(steam_id)
self.post_message ('Pistol Kills:')
self.post_message ('Glock: ' + total_kills_glock)
self.post_message ('P2000: ' + total_kills_hkp2000)
self.post_message ('Desert Eagle: ' + total_kills_deagle)
self.post_message ('Five-Seven: ' + total_kills_fiveseven)
self.post_message ('P250: ' + total_kills_p250)
self.post_message ('Tec 9: ' + total_kills_tec9)
except ValueError:
self.post_message('API Error - Try changing your steam privacy settings')
elif stats_category == 'smg':
try:
total_kills_mac10, total_kills_mp7, total_kills_mp9, total_kills_ump45, total_kills_p90, total_kills_bizon = csgo_stats_smg(steam_id)
self.post_message ('SMG Kills:')
self.post_message ('Mac 10: ' + total_kills_mac10)
self.post_message ('MP7: ' + total_kills_mp7)
self.post_message ('MP9: ' + total_kills_mp9)
self.post_message ('UMP 45: ' + total_kills_ump45)
self.post_message ('P90: ' + total_kills_p90)
self.post_message ('PP Bizon: ' + total_kills_bizon)
except ValueError:
self.post_message('API Error - Try changing your steam privacy settings')
elif stats_category == 'shotgun':
try:
total_kills_xm1014, total_kills_nova, total_kills_sawedoff, total_kills_mag7 = csgo_stats_shotgun(steam_id)
self.post_message ('Shotgun Kills:')
self.post_message ('XM1014: ' + total_kills_xm1014)
self.post_message ('Nova: ' + total_kills_nova)
self.post_message ('Sawed Off: ' + total_kills_sawedoff)
self.post_message ('Mag7: ' + total_kills_mag7)
except ValueError:
self.post_message('API Error - Try changing your steam privacy settings')
elif stats_category == 'maps':
try:
mapsmsg, total_wins_map_de_dust2, total_wins_map_de_inferno, total_wins_map_de_train, total_wins_map_de_nuke, total_wins_map_de_cbble = csgo_stats_maps(steam_id)
self.post_message ('Map Wins:')
self.post_message ('Dust 2: ' + total_wins_map_de_dust2)
self.post_message ('Inferno: ' + total_wins_map_de_inferno)
self.post_message ('Train: ' + total_wins_map_de_train)
self.post_message ('Nuke: ' + total_wins_map_de_nuke)
self.post_message ('Cobblestone: ' + total_wins_map_de_cbble)
self.post_message (mapsmsg)
except ValueError:
self.post_message('API Error - Try changing your steam privacy settings')
elif stats_category == 'knife':
try:
total_kills_knife, total_kills_knife_fight = csgo_stats_knife(steam_id)
self.post_message ('Knife Kills: ' + total_kills_knife)
self.post_message ('Knife VS Knife Kills: ' + total_kills_knife_fight)
except ValueError:
self.post_message('API Error - Try changing your steam privacy settings')
elif stats_category == '1337boi':
try:
total_broken_windows, total_mvps, total_kills_against_zoomed_sniper, total_weapons_donated, total_kills_enemy_blinded, total_damage_done, total_money_earned, total_kills_headshot, total_kills_enemy_weapon = csgo_stats_1337boi(steam_id)
self.post_message ('Broken Windows: ' + total_broken_windows)
self.post_message ('MVPs: ' + total_mvps)
self.post_message ('Kills VS Zoomed in snipers: ' + total_kills_against_zoomed_sniper)
self.post_message ('Donated Weapons: ' + total_weapons_donated)
self.post_message ('Kills VS Blind enemies : ' + total_kills_enemy_blinded)
self.post_message ('Damage Dealt: ' + total_damage_done)
self.post_message ('Career Earnings: $' + total_money_earned)
self.post_message ('Headshot kills: ' + total_kills_headshot)
self.post_message ('Kills w/ Enemy weapons: ' + total_kills_enemy_weapon)
except ValueError:
self.post_message('API Error - Try changing your steam privacy settings')
elif stats_category == 'nades':
try:
total_kills_hegrenade, total_kills_molotov = csgo_stats_nades(steam_id)
self.post_message ('Grenade Kills:')
self.post_message ('HE Grenade: ' + total_kills_hegrenade)
self.post_message ('Molotov/Incendiary: ' + total_kills_molotov)
except ValueError:
self.post_message('API Error - Try changing your steam privacy settings')
elif stats_category == 'brassandlead':
try:
total_shots_hit, total_shots_fired = csgo_stats_brassandlead(steam_id)
self.post_message ('Shots fired:')
self.post_message ('Total Shots hit: ' + total_shots_hit)
self.post_message ('Total Shots fired: ' + total_shots_fired)
except ValueError:
self.post_message('API Error - Try changing your steam privacy settings')
elif stats_category == 'bomb':
try:
total_planted_bombs, total_defused_bombs = csgo_stats_bomb(steam_id)
self.post_message ('C4 Planted: ' + total_planted_bombs)
self.post_message ('C4 Defused: ' + total_defused_bombs)
except ValueError:
self.post_message('API Error - Try changing your steam privacy settings')
else:
pass
elif cmd == 'stats':
viewers, subcount, views, followers = self.get_channel_stats(self.channel_id)
self.post_message(str(viewers) + ' Viewers')
self.post_message(str(followers) + ' Followers')
self.post_message(str(subcount) + ' Subscribers')
self.post_message(str(views) + ' Channel Views')
elif cmd == 'debug':
self.is_channel_live(self.channel_id)
else:
print ('Ignored Command: ' + cmd)
def do_mod_command(self, e, cmd, cmdargs, usr):
global chat_db
cmdargs = cmdargs
if cmd == "addcom":
self.add_command(chat_db, cmdargs)
elif cmd == "delcom":
self.delete_command(chat_db, cmdargs)
elif cmd == "title":
self.set_stream_title(cmdargs)
elif cmd == "game":
self.set_stream_game(cmdargs)
elif cmd in ['caster','shoutout', 'streamer', 'so' ]:
url_base = 'https://www.twitch.tv/'
caster = cmdargs[0]
caster_id = self.get_channel_id(caster)
channels_url = 'https://api.twitch.tv/kraken/channels/' + caster_id
headers = {'Client-ID': self.client_id, 'Accept': 'application/vnd.twitchtv.v5+json'}
r = requests.get(channels_url, headers=headers).json()
self.post_message('You should checkout ' + caster + ' over at their channel - ' + url_base + caster.lower() + " - Don't forget to drop them a follow!" )
sleep (2)
if r['game'] != None:
self.post_message(caster + ' was last playing ' + r['game'])
else:
pass
elif cmd == "clear":
self.post_message('/clear')
else:
print ('Ignored Mod Command: ' + cmd)
def add_command(self, chat_db, cmdargs):
con = sqlite3.connect(chat_db)
cursor = con.cursor()
parsedcom = " ".join(map(str, cmdargs[1:]))
print (parsedcom)
sql ="""
INSERT INTO chat_commands (command, command_result)
VALUES (?, ?)"""
cursor.execute(sql, (cmdargs[0].lower(), parsedcom))
con.commit()
con.close()
self.post_message ('New command created: !' + cmdargs[0])
print ('New command created: !' + cmdargs[0])
def delete_command(self, chat_db, cmdargs):
con = sqlite3.connect(chat_db)
cursor = con.cursor()
sql = "DELETE FROM chat_commands WHERE command = ?"
cmdname = cmdargs[0].lower()
cursor.execute(sql, [cmdname])
con.commit()
con.close()
self.post_message ('Command deleted: !' + cmdargs[0])
def set_stream_title(self, cmdargs):
channel_token = self.get_twitch_token(channel)
new_title = " ".join(map(str, cmdargs))
url = 'https://api.twitch.tv/kraken/channels/' + self.channel_id
title_headers = {'Client-ID': self.client_id, 'Accept': 'application/vnd.twitchtv.v5+json', 'Authorization': 'OAuth '+ channel_token, 'Content-Type': 'application/json'}
body_data = {'channel': {'status': new_title}}
title_payload = json.dumps(body_data)
r = requests.put(url, data=title_payload, headers=title_headers)
def set_stream_game(self, cmdargs):
channel_token = self.get_twitch_token(channel)
new_game = " ".join(map(str, cmdargs))
print (new_game)
url = 'https://api.twitch.tv/kraken/channels/' + self.channel_id
title_headers = {'Client-ID': self.client_id, 'Accept': 'application/vnd.twitchtv.v5+json', 'Authorization': 'OAuth '+ channel_token, 'Content-Type': 'application/json'}
body_data = {'channel': {'game': new_game}}
title_payload = json.dumps(body_data)
r = requests.put(url, data=title_payload, headers=title_headers)
def get_channel_subcount(self):
channel_token = self.get_twitch_token(channel)
url = 'https://api.twitch.tv/kraken/channels/' + self.channel_id + '/subscriptions'
headers = {'Client-ID': self.client_id, 'Accept': 'application/vnd.twitchtv.v5+json', 'Authorization': 'OAuth '+ channel_token, 'Content-Type': 'application/json'}
r = requests.get(url, headers=headers).json()
return r["_total"]
def open_csgo_case(self, usr):
#See Wiki
#Total 376
con = sqlite3.connect(csgo_case_db)
cursor = con.cursor()
cursor.execute("SELECT last_case_time FROM last_case WHERE display_name = ?", [usr])
raw_user_last_case_time = cursor.fetchall()
if raw_user_last_case_time == []:
Local_user_last_case_time = datetime.datetime.now() - datetime.timedelta(hours=6)
#users with unlimited cases
elif usr in ['rephl3x','phl3xbot']:
Local_user_last_case_time = datetime.datetime.now() - datetime.timedelta(hours=6)
else:
Local_user_last_case_time = datetime.datetime.strptime(raw_user_last_case_time[0][0], '%Y-%m-%d %H:%M:%S.%f')
if not Local_user_last_case_time <= (datetime.datetime.now() - datetime.timedelta(minutes=29)):
time_diff = datetime.datetime.now() - Local_user_last_case_time
minute_diff = time_diff.seconds / 60
self.post_message ('You only get one case every 30 minutes my dude rephl3Xwhut (last case was ' + str(math.floor(minute_diff)) + ' minutes ago)' )
else:
twitch_colours = {"yellow" : "goldenrod", "red" : "red", "pink" : "hotpink", "purple" : "blueviolet", "blue" : "blue", }
item_wear = random.choice(wear)
x_stattrak = random.randint(1, 10)
if x_stattrak == 10:
item_stattrak = stattrak[0]
else:
item_stattrak = None
cheats = []
if usr in cheats:
x_item = random.randint(1, 376)
else:
x_item = random.randint(1, 376)
if 1 <= x_item <= 300:
c = 'blue'
elif 301 <= x_item | |
<gh_stars>0
################################################################################
# Copyright (c) 2009-2019, National Research Foundation (Square Kilometre Array)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Delay model and correction.
This implements the basic delay model used to calculate the delay
contribution from each antenna, as well as a class that performs
delay correction for a correlator.
"""
from __future__ import print_function, division, absolute_import
from builtins import object, zip
from past.builtins import basestring
import logging
import json
import numpy as np
from .model import Parameter, Model
from .conversion import azel_to_enu
from .ephem_extra import lightspeed, is_iterable, _just_gimme_an_ascii_string
from .target import construct_radec_target
# Speed of EM wave in fixed path (typically due to cables / clock distribution).
# This number is not critical - only meant to convert delays to "nice" lengths.
# Typical factors are: fibre = 0.7, coax = 0.84.
FIXEDSPEED = 0.7 * lightspeed
logger = logging.getLogger(__name__)
class DelayModel(Model):
"""Model of the delay contribution from a single antenna.
This object is purely used as a repository for model parameters, allowing
easy construction, inspection and saving of the delay model. The actual
calculations happen in :class:`DelayCorrection`, which is more efficient
as it handles multiple antenna delays simultaneously.
Parameters
----------
model : file-like or model object, sequence of floats, or string, optional
Model specification. If this is a file-like or model object, load the
model from it. If this is a sequence of floats, accept it directly as
the model parameters (defaults to sequence of zeroes). If it is a
string, interpret it as a comma-separated (or whitespace-separated)
sequence of parameters in their string form (i.e. a description
string). The default is an empty model.
"""
def __init__(self, model=None):
# Instantiate the relevant model parameters and register with base class
params = []
params.append(Parameter('POS_E', 'm', 'antenna position: offset East of reference position'))
params.append(Parameter('POS_N', 'm', 'antenna position: offset North of reference position'))
params.append(Parameter('POS_U', 'm', 'antenna position: offset above reference position'))
params.append(Parameter('FIX_H', 'm', 'fixed additional path length for H feed due to electronics / cables'))
params.append(Parameter('FIX_V', 'm', 'fixed additional path length for V feed due to electronics / cables'))
params.append(Parameter('NIAO', 'm', 'non-intersecting axis offset - distance between az and el axes'))
Model.__init__(self, params)
self.set(model)
# The EM wave velocity associated with each parameter
self._speeds = np.array([lightspeed] * 3 + [FIXEDSPEED] * 2 + [lightspeed])
@property
def delay_params(self):
"""The model parameters converted to delays in seconds."""
return np.array(self.values()) / self._speeds
def fromdelays(self, delays):
"""Update model from a sequence of delay parameters.
Parameters
----------
delays : sequence of floats
Model parameters in delay form (i.e. in seconds)
"""
self.fromlist(delays * self._speeds)
class DelayCorrection(object):
"""Calculate delay corrections for a set of correlator inputs / antennas.
This uses delay models from multiple antennas connected to a correlator to
produce delay and phase corrections for a given target and timestamp, for
all correlator inputs at once. The delay corrections are guaranteed to be
strictly positive. Each antenna is assumed to have two polarisations (H
and V), resulting in two correlator inputs per antenna.
For now, the reference antenna position must match the reference positions
of each antenna in the array, so that the ENU offset in each antenna's
delay model directly represent the baseline between that antenna and the
reference antenna. This should be fine as this is the standard case, but
may cause problems for e.g. VLBI with a geocentric reference antenna.
Parameters
----------
ants : sequence of *M* :class:`Antenna` objects or string
Sequence of antennas forming an array and connected to correlator;
alternatively, a description string representing the entire object
ref_ant : :class:`Antenna` object or None, optional
Reference antenna for the array (only optional if `ants` is a string)
sky_centre_freq : float, optional
RF centre frequency that serves as reference for fringe phase
extra_delay : None or float, optional
Additional delay, in seconds, added to all inputs to ensure strictly
positive delay corrections (automatically calculated if None)
Attributes
----------
ant_models : dict mapping string to :class:`DelayModel` object
Dict mapping antenna name to corresponding delay model
Raises
------
ValueError
If all antennas do not share the same reference position as `ref_ant`
or `ref_ant` was not specified, or description string is invalid
"""
# Maximum size for delay cache
CACHE_SIZE = 1000
def __init__(self, ants, ref_ant=None, sky_centre_freq=0.0, extra_delay=None):
# Unpack JSON-encoded description string
if isinstance(ants, basestring):
try:
descr = json.loads(ants)
except ValueError:
raise ValueError("Trying to construct DelayCorrection with an "
"invalid description string %r" % (ants,))
# JSON only returns Unicode, even on Python 2... Remedy this.
ref_ant_str = _just_gimme_an_ascii_string(descr['ref_ant'])
# Antenna needs DelayModel which also lives in this module...
# This is messy but avoids a circular dependency and having to
# split this file into two small bits.
from .antenna import Antenna
ref_ant = Antenna(ref_ant_str)
sky_centre_freq = descr['sky_centre_freq']
extra_delay = descr['extra_delay']
ant_models = {}
for ant_name, ant_model_str in descr['ant_models'].items():
ant_model = DelayModel()
ant_model.fromstring(_just_gimme_an_ascii_string(ant_model_str))
ant_models[_just_gimme_an_ascii_string(ant_name)] = ant_model
else:
# `ants` is a sequence of Antennas - verify and extract delay models
if ref_ant is None:
raise ValueError('No reference antenna provided')
# Tolerances translate to micrometre differences (assume float64)
if any([not np.allclose(ant.ref_position_wgs84,
ref_ant.position_wgs84, rtol=0., atol=1e-14)
for ant in list(ants) + [ref_ant]]):
msg = "Antennas '%s' do not all share the same reference " \
"position of the reference antenna %r" % \
("', '".join(ant.description for ant in ants),
ref_ant.description)
raise ValueError(msg)
ant_models = {ant.name: ant.delay_model for ant in ants}
# Initialise private attributes
self._inputs = [ant + pol for ant in ant_models for pol in 'hv']
self._params = np.array([ant_models[ant].delay_params
for ant in ant_models])
# With no antennas, let params still have correct shape
if not ant_models:
self._params = np.empty((0, len(DelayModel())))
self._cache = {}
# Now calculate and store public attributes
self.ant_models = ant_models
self.ref_ant = ref_ant
self.sky_centre_freq = sky_centre_freq
# Add a 1% safety margin to guarantee positive delay corrections
self.extra_delay = 1.01 * self.max_delay \
if extra_delay is None else extra_delay
@property
def max_delay(self):
"""The maximum (absolute) delay achievable in the array, in seconds."""
# Worst case is wavefront moving along baseline connecting ant to ref
max_delay_per_ant = np.sqrt((self._params[:, :3] ** 2).sum(axis=1))
# Pick largest fixed delay
max_delay_per_ant += self._params[:, 3:5].max(axis=1)
# Worst case for NIAO is looking at the horizon
max_delay_per_ant += self._params[:, 5]
return max(max_delay_per_ant) if self.ant_models else 0.0
@property
def description(self):
"""Complete string representation of object that allows reconstruction."""
descr = {'ref_ant': self.ref_ant.description,
'sky_centre_freq': self.sky_centre_freq,
'extra_delay': self.extra_delay,
'ant_models': {ant: model.description
for ant, model in self.ant_models.items()}}
return json.dumps(descr, sort_keys=True)
def _calculate_delays(self, target, timestamp, offset=None):
"""Calculate delays for all inputs / antennas for a given target.
Parameters
----------
target : :class:`Target` object
Target providing direction for geometric delays
timestamp : :class:`Timestamp` object or equivalent
Timestamp in UTC seconds since Unix epoch
offset : dict or None, optional
Keyword arguments for :meth:`Target.plane_to_sphere` to offset
delay centre relative to target (see method for details)
Returns
-------
delays : sequence of *2M* floats
Delays (one per correlator input) in seconds
"""
if not offset:
az, el = target.azel(timestamp, self.ref_ant)
else:
coord_system = offset.get('coord_system', 'azel')
if coord_system == 'radec':
ra, dec = target.plane_to_sphere(timestamp=timestamp,
antenna=self.ref_ant, **offset)
offset_target = construct_radec_target(ra, dec)
az, el = offset_target.azel(timestamp, self.ref_ant)
else:
az, el = target.plane_to_sphere(timestamp=timestamp,
antenna=self.ref_ant, **offset)
targetdir = np.array(azel_to_enu(az, el))
cos_el = np.cos(el)
design_mat = np.array([np.r_[-targetdir, 1.0, 0.0, cos_el],
np.r_[-targetdir, 0.0, 1.0, cos_el]])
return np.dot(self._params, design_mat.T).ravel()
def _cached_delays(self, target, timestamp, offset=None):
"""Try to load delays from cache, else calculate it.
This uses the timestamp to look up previously calculated delays in
a cache. If not found, calculate the delays and store it in the
cache instead. Each cache value is used only once. Clean out the
| |
import warnings
class response_handler():
"""
The Class handles the creation of Dialogflow Responses
.. note:: There are 2 types of Rich Responses which can be created using this class. They are: Generic Rich Responses and Google Assistant Rich Responses. Generic Responses work on all platforms except Google Assistant. Functions that create generic responses start with 'generic'. For Google Assistant, you should use Google Assistant Rich Responses. These functions start with 'google_assistant'
"""
def __init__(self):
"""
Constructor
"""
self.cardbtnlist = []
self.gsuglist = []
self.googleijson = []
self.genericmessages = []
self.contextlist = []
self.gencardindex = -1
self.gcarouselindex = -1
self.gtableindex = -1
self.gpermissionavail = False
self.fulfiltextavail = False
self.eventavail = False
self.contextavail = False
self.gcardadded = False
#Context
def add_context(self,sessionID,contextName,lifespan=0,params={}):
"""
Adds/Changes a Dialogflow Context
:param sessionID: The Session ID
:type sessionID: str
:param contextName: The name of the Context to add/edit
:type contextName: str
:param lifespan: The number of conversational turns for which the context remains active, defaults to 0
:type lifespan: int, optional
:param params: The Dictionary of Data to store in the context, defaults to {}
:type params: dict, optional
"""
self.contextlist.append({"name":sessionID+"/contexts/"+contextName,"lifespanCount":lifespan,"parameters":params})
self.contextavail = True
#Event Triggers
def trigger_event(self,event,params,langcode="en-US"):
"""
Triggers a Dialogflow Event
:param event: The Name of the Event to Trigger
:type event: str
:param params: The Dictionary of Parameters
:type params: dict
:param langcode: The Language Code of the Agent, defaults to "en-US"
:type langcode: str, optional
.. note:: When the response contains event, other things are ignored (except Contexts)
"""
self.trigeventname = event
self.trigeventparams = params
self.triglangcode = langcode
self.eventavail = True
#Generic Responses
def simple_response(self,speech):
"""
A Generic Text to be displayed or told to the user.
:param speech: The Text to be displayed or said to the user
:type speech: str
.. note:: ``simple_response`` works on all platforms including Google Assistant. However, it is recommended to use ``google_assistant_response`` for Google Assistant and ``generic_rich_text_response`` for text responses on other platforms.
"""
self.ftext = speech
self.fulfiltextavail = True
#Generic Rich Responses
def generic_rich_text_response(self,text):
"""
A Generic Rich Text Response to display to the user. Unlike ``generic_response``, you can have multiple ``generic_rich_text_response``
:param text: The Text to be displayed to the user
:type text: str
"""
self.genericmessages.append({"text":{"text":[text]}})
def generic_card(self,title,**kwargs):
"""
A Generic Card to be displayed to the user
:param title: The Title of the Card
:type title: str
:param subtitle: The Subitle of the Card
:type subtitle: str, optional
:param imageURL: The Link of the Image to be displayed on the card
:type imageURL: str, optional
"""
imgurl = kwargs.get("imageURL","")
subtitle = kwargs.get("subtitle","")
fjson = {}
if imgurl == "":
fjson = {"card":{"title":title,"subtitle":subtitle}}
else:
fjson = {"card":{"title":title,"subtitle":subtitle,"imageUri":imgurl}}
self.genericmessages.append(fjson)
self.gencardindex = len(self.genericmessages)-1
def generic_card_add_button(self,btntitle,btnlink):
"""
Adds a button to a Generic Card. When clicked, directs to a website
:param btntitle: The button's title
:type btntitle: str
:param btnlink: The link to redirect to on click
:type btnlink: str
:raises AttributeError: This Error is Raised if a new button is added before calling ``generic_card``
"""
if self.gencardindex == -1:
raise AttributeError("generic_card is not created")
else:
try:
self.genericmessages[self.gencardindex]["card"]["buttons"].append({"text":btntitle,"postback":btnlink})
except:
self.genericmessages[self.gencardindex]["card"]["buttons"] = []
self.genericmessages[self.gencardindex]["card"]["buttons"].append({"text":btntitle,"postback":btnlink})
def generic_add_suggestions(self,suggestionList,**kwargs):
"""
Adds Suggestion Chips/Quick Replies to be displayed.
:param suggestionList: The List of Suggestions/Quick Replies
:type suggestionList: list
:param title: The title of the Suggestions
:type suggestionList: str, optional
"""
title = kwargs.get("title","")
self.genericmessages.append({"quick_replies":{"title":title,"quickReplies":suggestionList}})
def generic_image(self,imageURL,imgalt):
"""
Sends an Image to the User
:param imageURL: The URL of the Image
:type imageURL: str
:param imgalt: The Alt Text for the Image
:type imgalt: str
"""
self.genericmessages.append({"image":{"image_uri":imageURL,"accessibility_text":imgalt}})
#Google Assistant Rich Responses
def google_assistant_response(self,speech, **kwargs):
"""
A Google Assistant speech to be said (and displayed) to the user
:param speech: The Text to be said to the user
:type speech: str
:param displayText: The text to be displayed in the chat bubble while telling the speech
:type displayText: str, optional
:param endConversation: Specifies wheather this response should end the conversation or not
:type endConversation: bool
.. note:: This MUST Before any Google Assistant Rich Response. Failing to do so will result in an error in Google Assistant
"""
gstts = speech
gsdisplay = kwargs.get("displayText", "")
self.gendcon = kwargs.get("endConversation",False)
if gsdisplay != "":
self.googleijson.append({"simpleResponse": {"textToSpeech":gstts,"displayText":gsdisplay}})
else:
self.googleijson.append({"simpleResponse": {"textToSpeech":gstts}})
def google_assistant_card(self,title,**kwargs):
"""
A Google Assistant Card to be displayed to the user
:param title: The Title of the Card
:type title: str
:param subtitle: The subtitle of the Card
:type subtitle: str, optional
:param formatted_text: The text to be displayed along with the card
:type formatted_text: str, optional
:param btnName: The Name of the button to be displayed on the card
:type btnName: str, optional
:param btnLink: The link to redirect on button click
:type btnLink: str, optional
:param imageURL: The URL of the image to be displayed on the card
:type imageURL: str, optional
:param imageAlt: The Alt Text of the image to be displayed on the card
:type imageAlt: str, optional
:param imageDisplayOption: The Display options for the image (`Click here For a list of image display options <https://developers.google.com/assistant/conversational/webhook/reference/rest/Shared.Types/ImageDisplayOptions>`_)
:type imageDisplayOption: str, optional
"""
if self.gcardadded == True:
warnings.warn("You can have only one Google Assistant Card. More than one cards will lead to an error in Google Assistant")
return
self.gcardadded = True
gcardtitle = title
gcardsub = kwargs.get("subtitle","")
gcardftext = kwargs.get("formatted_text","")
gcardbtn = kwargs.get("btnName","")
gcardurl = kwargs.get("btnLink","")
imgurl = kwargs.get("imageURL","")
imgalt = kwargs.get("imageAlt","")
imgdisopt = kwargs.get("imageDisplayOption","")
toappend = {}
if gcardbtn == "":
toappend = {"basicCard":{"title":gcardtitle,"subtitle":gcardsub,"formatted_text":gcardftext}}
else:
toappend = {"basicCard":{"title":gcardtitle,"subtitle":gcardsub,"formatted_text":gcardftext,"buttons":[{"title":gcardbtn,"openUrlAction":{"url":gcardurl}}]}}
if imgurl != "":
toappend["basicCard"]["image"] = {"url":imgurl,"accessibilityText":imgalt}
if imgdisopt != "":
toappend["basicCard"]["imageDisplayOptions"] = imgdisopt
self.googleijson.append(toappend)
def google_assistant_new_carousel(self):
"""
Creates a New Google Assistant Carousel
"""
if self.gcarouselindex != -1:
warnings.warn("You can have only one Google Assistant Carousel. More than one Carousels will lead to an error in Google Assistant")
return
self.googleijson.append({"carouselBrowse":{"items":[]}})
self.gcarouselindex = len(self.googleijson)-1
def google_assistant_carousel_add_item(self,title,url,imageURL,imgalt,description="",footer=""):
"""
Adds a new item to a Google Assistant Carousel
:param title: The title of the carousel item
:type title: str
:param url: The URL to redirect to when the Carousel item is clicked
:type url: str
:param imageURL: The URL of the image to be displayed on the caarousel item
:type imageURL: str
:param imgalt: The Alt text of the image to be displayed on the caarousel item
:type imgalt: str
:param description: The description to be displayed on the carousel item, defaults to ""
:type description: str, optional
:param footer: The footer to be displayed on the carousel item, defaults to ""
:type footer: str, optional
:raises AttributeError: This Error is raised if a new item is added before calling ``google_assistant_new_carousel``
"""
try:
self.googleijson[self.gcarouselindex]["carouselBrowse"]["items"].append({"title":title,"openUrlAction": {"url":url},"description":description,"footer":footer,"image":{"url":imageURL,"accessibilityText":imgalt}})
except:
raise AttributeError("google_assistant_new_carousel is not created")
def google_assistant_add_suggestions(self,suggestionList):
"""
Adds Google Assistant Suggestion Chips to be displayed
:param suggestionList: The list containing the suggestions to be displayed
:type suggestionList: list
"""
for i in suggestionList:
self.gsuglist.append({"title":i})
def google_assistant_new_table(self,**kwargs):
"""
Creates a new Google Assistant Table Card
:param title: The title of the Table Card
:type title: str, optional
:param subtitle: The subtitle of the Table Card
:type subtitle: str, optional
:param imageURL: The URL of the image to be displayed on the table card
:type imageURL: str, optional
:param imageAlt: The Alt text of the image to be displayed on the table card
:type imageAlt: str, optional
"""
if self.gtableindex != -1:
warnings.warn("You can have only one Google Assistant Table. More than one Tables will lead to an error in Google Assistant")
return
imgurl = kwargs.get("imageURL","")
imgalt = kwargs.get("imageAlt","")
tabtitle = kwargs.get("title","")
tabsub = kwargs.get("subtitle","")
fjson = {}
fjson = {"tableCard": {"rows":[],"columnProperties": []}}
if imgurl != "":
fjson["tableCard"]["image"] = {"url":imgurl,"accessibilityText":imgalt}
if tabtitle != "":
fjson["tableCard"]["title"] = tabtitle
if tabsub != "":
fjson["tableCard"]["subtitle"] = tabsub
self.googleijson.append(fjson)
self.gtableindex = self.googleijson.index(fjson)
def google_assistant_table_add_header_row(self,headerList):
"""
Adds a Header row to a Google Assistant Table Card
:param headerList: The list containing the header rows to be added
:type headerList: list
:raises AttributeError: This Error | |
view.
"""
CONTENT_MODE_BOTTOM_RIGHT = ui_constants.CONTENT_MODE_BOTTOM_RIGHT
"""
The option to align the content in the bottom-right corner of the view.
"""
# MARK: - Horizontal Alignment
HORIZONTAL_ALIGNMENT = ui_constants.HORIZONTAL_ALIGNMENT
HORIZONTAL_ALIGNMENT_CENTER = ui_constants.HORIZONTAL_ALIGNMENT_CENTER
"""
Aligns the content horizontally in the center of the control.
"""
HORIZONTAL_ALIGNMENT_FILL = ui_constants.HORIZONTAL_ALIGNMENT_FILL
"""
Aligns the content horizontally to fill the content rectangles; text may wrap and images may be stretched.
"""
HORIZONTAL_ALIGNMENT_LEADING = ui_constants.HORIZONTAL_ALIGNMENT_LEADING
"""
Aligns the content horizontally from the leading edge of the control.
"""
HORIZONTAL_ALIGNMENT_LEFT = ui_constants.HORIZONTAL_ALIGNMENT_LEFT
"""
Aligns the content horizontally from the left of the control (the default).
"""
HORIZONTAL_ALIGNMENT_RIGHT = ui_constants.HORIZONTAL_ALIGNMENT_RIGHT
"""
Aligns the content horizontally from the right of the control.
"""
HORIZONTAL_ALIGNMENT_TRAILING = ui_constants.HORIZONTAL_ALIGNMENT_TRAILING
"""
Aligns the content horizontally from the trailing edge of the control.
"""
# MARK: - Vertical Alignment
VERTICAL_ALIGNMENT = ui_constants.VERTICAL_ALIGNMENT
VERTICAL_ALIGNMENT_BOTTOM = ui_constants.VERTICAL_ALIGNMENT_BOTTOM
"""
Aligns the content vertically at the bottom in the control.
"""
VERTICAL_ALIGNMENT_CENTER = ui_constants.VERTICAL_ALIGNMENT_CENTER
"""
Aligns the content vertically in the center of the control.
"""
VERTICAL_ALIGNMENT_FILL = ui_constants.VERTICAL_ALIGNMENT_FILL
"""
Aligns the content vertically to fill the content rectangle; images may be stretched.
"""
VERTICAL_ALIGNMENT_TOP = ui_constants.VERTICAL_ALIGNMENT_TOP
"""
Aligns the content vertically at the top in the control (the default).
"""
# MARK: - Button Type
BUTTON_TYPE = ui_constants.BUTTON_TYPE
BUTTON_TYPE_SYSTEM = ui_constants.BUTTON_TYPE_SYSTEM
"""
A system style button, such as those shown in navigation bars and toolbars.
"""
BUTTON_TYPE_CONTACT_ADD = ui_constants.BUTTON_TYPE_CONTACT_ADD
"""
A contact add button.
"""
BUTTON_TYPE_CUSTOM = ui_constants.BUTTON_TYPE_CUSTOM
"""
No button style.
"""
BUTTON_TYPE_DETAIL_DISCLOSURE = ui_constants.BUTTON_TYPE_DETAIL_DISCLOSURE
"""
A detail disclosure button.
"""
BUTTON_TYPE_INFO_DARK = ui_constants.BUTTON_TYPE_INFO_DARK
"""
An information button that has a dark background.
"""
BUTTON_TYPE_INFO_LIGHT = ui_constants.BUTTON_TYPE_INFO_LIGHT
"""
An information button that has a light background.
"""
# MARK: - Text Alignment
TEXT_ALIGNMENT = ui_constants.TEXT_ALIGNMENT
TEXT_ALIGNMENT_LEFT = ui_constants.TEXT_ALIGNMENT_LEFT
"""
Text is visually left aligned.
"""
TEXT_ALIGNMENT_RIGHT = ui_constants.TEXT_ALIGNMENT_RIGHT
"""
Text is visually right aligned.
"""
TEXT_ALIGNMENT_CENTER = ui_constants.TEXT_ALIGNMENT_CENTER
"""
Text is visually center aligned.
"""
TEXT_ALIGNMENT_JUSTIFIED = ui_constants.TEXT_ALIGNMENT_JUSTIFIED
"""
Text is justified.
"""
TEXT_ALIGNMENT_NATURAL = ui_constants.TEXT_ALIGNMENT_NATURAL
"""
Use the default alignment associated with the current localization of the app. The default alignment for left-to-right scripts is left, and the default alignment for right-to-left scripts is right.
"""
# MARK: - Line Break Mode
LINE_BREAK_MODE = ui_constants.LINE_BREAK_MODE
LINE_BREAK_MODE_BY_WORD_WRAPPING = ui_constants.LINE_BREAK_MODE_BY_WORD_WRAPPING
"""
Wrapping occurs at word boundaries, unless the word itself doesn’t fit on a single line.
"""
LINE_BREAK_MODE_BY_CHAR_WRAPPING = ui_constants.LINE_BREAK_MODE_BY_CHAR_WRAPPING
"""
Wrapping occurs before the first character that doesn’t fit.
"""
LINE_BREAK_MODE_BY_CLIPPING = ui_constants.LINE_BREAK_MODE_BY_CLIPPING
"""
Lines are simply not drawn past the edge of the text container.
"""
LINE_BREAK_MODE_BY_TRUNCATING_HEAD = ui_constants.LINE_BREAK_MODE_BY_TRUNCATING_HEAD
"""
The line is displayed so that the end fits in the container and the missing text at the beginning of the line is indicated by an ellipsis glyph. Although this mode works for multiline text, it is more often used for single line text.
"""
LINE_BREAK_MODE_BY_TRUNCATING_TAIL = ui_constants.LINE_BREAK_MODE_BY_TRUNCATING_TAIL
"""
The line is displayed so that the beginning fits in the container and the missing text at the end of the line is indicated by an ellipsis glyph. Although this mode works for multiline text, it is more often used for single line text.
"""
LINE_BREAK_MODE_BY_TRUNCATING_MIDDLE = ui_constants.LINE_BREAK_MODE_BY_TRUNCATING_MIDDLE
"""
The line is displayed so that the beginning and end fit in the container and the missing text in the middle is indicated by an ellipsis glyph. This mode is used for single-line layout; using it with multiline text truncates the text into a single line.
"""
# MARK: - Touch Type
TOUCH_TYPE = ui_constants.TOUCH_TYPE
TOUCH_TYPE_DIRECT = ui_constants.TOUCH_TYPE_DIRECT
"""
A touch resulting from direct contact with the screen.
"""
TOUCH_TYPE_INDIRECT = ui_constants.TOUCH_TYPE_INDIRECT
"""
A touch that did not result from contact with the screen.
"""
TOUCH_TYPE_PENCIL = ui_constants.TOUCH_TYPE_PENCIL
"""
A touch from Apple Pencil.
"""
# MARK: - Gesture State
GESTURE_STATE = ui_constants.GESTURE_STATE
GESTURE_STATE_POSSIBLE = ui_constants.GESTURE_STATE_POSSIBLE
"""
The gesture recognizer has not yet recognized its gesture, but may be evaluating touch events. This is the default state.
"""
GESTURE_STATE_BEGAN = ui_constants.GESTURE_STATE_BEGAN
"""
The gesture recognizer has received touch objects recognized as a continuous gesture. It sends its action message (or messages) at the next cycle of the run loop.
"""
GESTURE_STATE_CHANGED = ui_constants.GESTURE_STATE_CHANGED
"""
The gesture recognizer has received touches recognized as a change to a continuous gesture. It sends its action message (or messages) at the next cycle of the run loop.
"""
GESTURE_STATE_ENDED = ui_constants.GESTURE_STATE_ENDED
"""
The gesture recognizer has received touches recognized as the end of a continuous gesture. It sends its action message (or messages) at the next cycle of the run loop and resets its state to possible.
"""
GESTURE_STATE_CANCELLED = ui_constants.GESTURE_STATE_CANCELLED
"""
The gesture recognizer has received touches resulting in the cancellation of a continuous gesture. It sends its action message (or messages) at the next cycle of the run loop and resets its state to possible.
"""
GESTURE_STATE_RECOGNIZED = ui_constants.GESTURE_STATE_RECOGNIZED
"""
The gesture recognizer has received a multi-touch sequence that it recognizes as its gesture. It sends its action message (or messages) at the next cycle of the run loop and resets its state to possible.
"""
# MARK: - Table View Cell Style
TABLE_VIEW_CELL_STYLE = ui_constants.TABLE_VIEW_CELL_STYLE
TABLE_VIEW_CELL_STYLE_DEFAULT = ui_constants.TABLE_VIEW_CELL_STYLE_DEFAULT
"""
A simple style for a cell with a text label (black and left-aligned) and an optional image view.
"""
TABLE_VIEW_CELL_STYLE_SUBTITLE = ui_constants.TABLE_VIEW_CELL_STYLE_SUBTITLE
"""
A style for a cell with a left-aligned label across the top and a left-aligned label below it in smaller gray text.
"""
TABLE_VIEW_CELL_STYLE_VALUE1 = ui_constants.TABLE_VIEW_CELL_STYLE_VALUE1
"""
A style for a cell with a label on the left side of the cell with left-aligned and black text; on the right side is a label that has smaller blue text and is right-aligned. The Settings application uses cells in this style.
"""
TABLE_VIEW_CELL_STYLE_VALUE2 = ui_constants.TABLE_VIEW_CELL_STYLE_VALUE2
"""
A style for a cell with a label on the left side of the cell with text that is right-aligned and blue; on the right side of the cell is another label with smaller text that is left-aligned and black. The Phone/Contacts application uses cells in this style.
"""
# MARK: - Table View Cell Accessory Type
ACCESSORY_TYPE = ui_constants.ACCESSORY_TYPE
ACCESSORY_TYPE_NONE = ui_constants.ACCESSORY_TYPE_NONE
"""
No accessory view.
"""
ACCESSORY_TYPE_CHECKMARK = ui_constants.ACCESSORY_TYPE_CHECKMARK
"""
A checkmark image.
"""
ACCESSORY_TYPE_DETAIL_BUTTON = ui_constants.ACCESSORY_TYPE_DETAIL_BUTTON
"""
An information button.
"""
ACCESSORY_TYPE_DETAIL_DISCLOSURE_BUTTON = (
ui_constants.ACCESSORY_TYPE_DETAIL_DISCLOSURE_BUTTON
)
"""
An information button and a disclosure (chevron) control.
"""
ACCESSORY_TYPE_DISCLOSURE_INDICATOR = ui_constants.ACCESSORY_TYPE_DISCLOSURE_INDICATOR
"""
A chevron-shaped control for presenting new content.
"""
# MARK: - Table View Style
TABLE_VIEW_STYLE = ui_constants.TABLE_VIEW_STYLE
TABLE_VIEW_STYLE_PLAIN = ui_constants.TABLE_VIEW_STYLE_PLAIN
"""
A plain table view.
"""
TABLE_VIEW_STYLE_GROUPED = ui_constants.TABLE_VIEW_STYLE_GROUPED
"""
A table view whose sections present distinct groups of rows.
"""
# MARK: - Text Field Border Style
TEXT_FIELD_BORDER_STYLE = ui_constants.TEXT_FIELD_BORDER_STYLE
TEXT_FIELD_BORDER_STYLE_NONE = ui_constants.TEXT_FIELD_BORDER_STYLE_NONE
"""
The text field does not display a border.
"""
TEXT_FIELD_BORDER_STYLE_BEZEL = ui_constants.TEXT_FIELD_BORDER_STYLE_BEZEL
"""
Displays a bezel-style border for the text field. This style is typically used for standard data-entry fields.
"""
TEXT_FIELD_BORDER_STYLE_LINE = ui_constants.TEXT_FIELD_BORDER_STYLE_LINE
"""
Displays a thin rectangle around the text field.
"""
TEXT_FIELD_BORDER_STYLE_ROUNDED_RECT = ui_constants.TEXT_FIELD_BORDER_STYLE_ROUNDED_RECT
"""
Displays a rounded-style border for the text field.
"""
# MARK: - Button Item Style
BUTTON_ITEM_STYLE = ui_constants.BUTTON_ITEM_STYLE
BUTTON_ITEM_STYLE_PLAIN = ui_constants.BUTTON_ITEM_STYLE_PLAIN
"""
Glows when tapped. The default item style.
"""
BUTTON_ITEM_STYLE_DONE = ui_constants.BUTTON_ITEM_STYLE_DONE
"""
The style for a done button—for example, a button that completes some task and returns to the previous view.
"""
# MARK: - Button Item System Item
SYSTEM_ITEM = ui_constants.SYSTEM_ITEM
SYSTEM_ITEM_ACTION = ui_constants.SYSTEM_ITEM_ACTION
"""
The system action button.
"""
SYSTEM_ITEM_ADD = ui_constants.SYSTEM_ITEM_ADD
"""
The system plus button containing an icon of a plus sign.
"""
SYSTEM_ITEM_BOOKMARKS = ui_constants.SYSTEM_ITEM_BOOKMARKS
"""
The system bookmarks button.
"""
SYSTEM_ITEM_CAMERA = ui_constants.SYSTEM_ITEM_CAMERA
"""
The system camera button.
"""
SYSTEM_ITEM_CANCEL = ui_constants.SYSTEM_ITEM_CANCEL
"""
The system Cancel button, localized.
"""
SYSTEM_ITEM_COMPOSE = ui_constants.SYSTEM_ITEM_COMPOSE
"""
The system compose button.
"""
SYSTEM_ITEM_DONE = ui_constants.SYSTEM_ITEM_DONE
"""
The system Done button, localized.
"""
SYSTEM_ITEM_EDIT = ui_constants.SYSTEM_ITEM_EDIT
"""
The system Edit button, localized.
"""
SYSTEM_ITEM_FAST_FORWARD = ui_constants.SYSTEM_ITEM_FAST_FORWARD
"""
The system fast forward button.
"""
SYSTEM_ITEM_FLEXIBLE_SPACE = ui_constants.SYSTEM_ITEM_FLEXIBLE_SPACE
"""
Blank space to add between other items. The space is distributed equally between the other items. Other item properties are ignored when this value is set.
"""
SYSTEM_ITEM_ORGANIZE = ui_constants.SYSTEM_ITEM_ORGANIZE
"""
The system organize button.
"""
SYSTEM_ITEM_PAUSE = ui_constants.SYSTEM_ITEM_PAUSE
"""
The system pause button.
"""
SYSTEM_ITEM_PLAY = ui_constants.SYSTEM_ITEM_PLAY
"""
The system play button.
"""
SYSTEM_ITEM_REDO = ui_constants.SYSTEM_ITEM_REDO
"""
The system redo button.
"""
SYSTEM_ITEM_REFRESH = ui_constants.SYSTEM_ITEM_REFRESH
"""
The system refresh button.
"""
SYSTEM_ITEM_REPLY = ui_constants.SYSTEM_ITEM_REPLY
"""
The system reply button.
"""
SYSTEM_ITEM_REWIND = ui_constants.SYSTEM_ITEM_REWIND
"""
The system rewind button.
"""
SYSTEM_ITEM_SAVE = ui_constants.SYSTEM_ITEM_SAVE
"""
The system Save button, localized.
"""
SYSTEM_ITEM_SEARCH = ui_constants.SYSTEM_ITEM_SEARCH
"""
The system search button.
"""
SYSTEM_ITEM_STOP = ui_constants.SYSTEM_ITEM_STOP
"""
The system stop button.
"""
SYSTEM_ITEM_TRASH = ui_constants.SYSTEM_ITEM_TRASH
"""
The system trash button.
"""
SYSTEM_ITEM_UNDO = ui_constants.SYSTEM_ITEM_UNDO
"""
The system undo button.
"""
###############
# MARK: - Other Classes
###############
# MARK: - Color
class Color:
"""
A ``Color`` object represents a color to be displayed on screen.
Example:
.. highlight:: python
.. code-block:: python
import pyto_ui as ui
# RGB
black = ui.Color.rgb(0, 0, 0, 1)
# White
white = ui.Color.white(1, 1)
# Dynamic
background = ui.Color.dynamic(light=white, dark=black)
For pre-defined colors, see `Color <constants.html#ui-elements-colors>`_ constants.
"""
__py_color__ = None
def _hex_to_rgb(self, hx, hsl=False):
if re.compile(r'#[a-fA-F0-9]{3}(?:[a-fA-F0-9]{3})?$').match(hx):
div = 255.0 if hsl else 0
if len(hx) <= 4:
return tuple(int(hx[i]*2, 16) / div if div else
int(hx[i]*2, 16) for i in (1, 2, 3))
return tuple(int(hx[i:i+2], 16) / div if div else
int(hx[i:i+2], 16) for i in (1, 3, 5))
raise ValueError(f'"{hx}" is not a valid HEX code.')
def configure_from_dictionary(self, obj):
cls = Color
if isinstance(obj, str):
if obj.startswith("#"):
color = self._hex_to_rgb(obj)
self.__py_color__ = cls.rgb(color[0]/255, color[1]/255, color[2]/255).__py_color__
else:
name = "color_"+obj
name = name.upper()
self.__py_color__ = globals()[name].__py_color__
elif isinstance(obj, dict):
if "dark" in obj and "light" in obj:
light = cls.__new__()
| |
in opt.get('datatype', ''):
# we only set up optimizers when training
# we only set this up for the original instance or hogwild ones
self.clip = opt.get('gradient_clip', -1)
# set up optimizer
lr = opt['learningrate']
optim_class = Seq2seqAgent.OPTIM_OPTS[opt['optimizer']]
kwargs = {'lr': lr}
if opt.get('momentum') > 0 and opt['optimizer'] in ['sgd', 'rmsprop']:
kwargs['momentum'] = opt['momentum']
if opt['optimizer'] == 'sgd':
kwargs['nesterov'] = True
if opt['optimizer'] == 'adam':
# https://openreview.net/forum?id=ryQu7f-RZ
kwargs['amsgrad'] = True
if opt['embedding_type'].endswith('fixed'):
print('Seq2seq: fixing embedding weights.')
self.model.decoder.lt.weight.requires_grad = False
self.model.encoder.lt.weight.requires_grad = False
if opt['lookuptable'] in ['dec_out', 'all']:
self.model.decoder.e2s.weight.requires_grad = False
self.optimizer = optim_class([p for p in self.model.parameters() if p.requires_grad], **kwargs)
if states.get('optimizer'):
if states['optimizer_type'] != opt['optimizer']:
print('WARNING: not loading optim state since optim class '
'changed.')
else:
try:
self.optimizer.load_state_dict(states['optimizer'])
except ValueError:
print('WARNING: not loading optim state since model '
'params changed.')
if self.use_cuda:
for state in self.optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda()
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer, 'min', factor=0.5, patience=3, verbose=True)
self.reset()
def override_opt(self, new_opt):
"""Set overridable opts from loaded opt file.
Print out each added key and each overriden key.
Only override args specific to the model.
"""
model_args = {'hiddensize', 'embeddingsize', 'numlayers', 'optimizer',
'encoder', 'decoder', 'lookuptable', 'attention',
'attention_length', 'rnn_class'}
for k, v in new_opt.items():
if k not in model_args:
# skip non-model args
continue
if k not in self.opt:
print('[ Adding new option: | {k}: {v} | ]'.format(k=k, v=v))
elif self.opt[k] != v:
print('[ Overriding option: | {k}: {old} => {v} | ]'.format(
k=k, old=self.opt[k], v=v))
self.opt[k] = v
if 'dict_file' in new_opt and not self.opt.get('dict_file'):
print('[ No dictionary path detected, trying to load previous '
'path {} ]'.format(new_opt['dict_file']))
self.opt['dict_file'] = new_opt['dict_file']
return self.opt
def parse(self, text):
"""Convert string to token indices."""
return self.dict.txt2vec(text)
def v2t(self, vec):
"""Convert token indices to string of tokens."""
new_vec = []
for i in vec:
if i == self.END_IDX:
break
elif i != self.START_IDX:
new_vec.append(i)
return self.dict.vec2txt(new_vec)
def zero_grad(self):
"""Zero out optimizer."""
self.optimizer.zero_grad()
def update_params(self):
"""Do one optimization step."""
if self.clip > 0:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)
self.optimizer.step()
def reset(self):
"""Reset observation and episode_done."""
self.observation = None
self.history.clear()
for i in range(len(self.answers)):
self.answers[i] = None
self.reset_metrics()
def reset_metrics(self):
"""Reset metrics for reporting loss and perplexity."""
self.metrics['loss'] = 0.0
self.metrics['num_tokens'] = 0
self.metrics['correct_tokens'] = 0
def report(self):
"""Report loss and perplexity from model's perspective.
Note that this includes predicting __END__ and __UNK__ tokens and may
differ from a truly independent measurement.
"""
m = {}
num_tok = self.metrics['num_tokens']
if num_tok > 0:
if self.metrics['correct_tokens'] > 0:
m['token_acc'] = self.metrics['correct_tokens'] / num_tok
m['loss'] = self.metrics['loss'] / num_tok
try:
m['ppl'] = math.exp(m['loss'])
except OverflowError:
m['ppl'] = float('inf')
if self.metrics['total_skipped_batches'] > 0:
m['total_skipped_batches'] = self.metrics['total_skipped_batches']
for k, v in m.items():
# clean up: rounds to sigfigs and converts tensors to floats
m[k] = round_sigfigs(v, 4)
return m
def share(self):
"""Share internal states between parent and child instances."""
shared = super().share()
shared['opt'] = self.opt
shared['answers'] = self.answers
shared['dict'] = self.dict
shared['START_IDX'] = self.START_IDX
shared['END_IDX'] = self.END_IDX
shared['NULL_IDX'] = self.NULL_IDX
shared['model'] = self.model
if self.opt.get('numthreads', 1) > 1:
# we're doing hogwild so share the model too
if type(self.metrics) == dict:
# move metrics and model to shared memory
self.metrics = SharedTable(self.metrics)
self.model.share_memory()
shared['states'] = { # don't share optimizer states
'optimizer_type': self.opt['optimizer'],
}
shared['metrics'] = self.metrics # do after numthreads check
return shared
def observe(self, observation):
"""Save observation for act.
If multiple observations are from the same episode, concatenate them.
"""
# shallow copy observation (deep copy can be expensive)
obs = observation.copy()
if not obs.get('preprocessed', False) or 'text2vec' not in obs:
obs['text2vec'] = maintain_dialog_history(
self.history, obs,
reply=self.answers[self.batch_idx],
historyLength=self.truncate,
useReplies=self.opt.get('history_replies'),
dict=self.dict,
useStartEndIndices=self.use_person_tokens)
else:
obs['text2vec'] = deque(obs['text2vec'], maxlen=self.truncate)
self.observation = obs
self.answers[self.batch_idx] = None
return obs
def predict(self, xs, ys=None, cands=None, valid_cands=None, is_training=False):
"""Produce a prediction from our model.
Update the model using the targets if available, otherwise rank
candidates as well if they are available and param is set.
"""
predictions, cand_preds = None, None
if is_training:
self.model.train()
self.zero_grad()
out = None
try:
out = self.model(xs, ys, rank_during_training=cands is not None)
# generated response
_preds, scores, cand_preds = out[0], out[1], out[2]
score_view = scores.view(-1, scores.size(-1))
loss = self.criterion(score_view, ys.view(-1))
# save loss to metrics
y_ne = ys.ne(self.NULL_IDX)
target_tokens = y_ne.long().sum().item()
correct = ((ys == _preds) * y_ne).sum().item()
self.metrics['correct_tokens'] += correct
self.metrics['loss'] += loss.item()
self.metrics['num_tokens'] += target_tokens
loss /= target_tokens # average loss per token
loss.backward()
except RuntimeError as e:
# catch out of memory exceptions during fwd/bck (skip batch)
if 'out of memory' in str(e):
print('| WARNING: ran out of memory, skipping batch. '
'if this happens frequently, decrease batchsize or '
'truncate the inputs to the model.')
self.metrics['total_skipped_batches'] += 1
return predictions, cand_preds
else:
raise e
self.update_params()
else:
self.model.eval()
if valid_cands:
out = self.model(xs, ys=None, cands=cands, valid_cands=valid_cands, beam_size=self.beam_size, topk=self.topk)
else:
out = self.model(xs, ys=None, cands=cands, beam_size=self.beam_size, topk=self.topk)
predictions, cand_preds = out[0], out[2]
if ys is not None:
# calculate loss on targets
out = self.model(xs, ys)
scores = out[1]
score_view = scores.view(-1, scores.size(-1))
loss = self.criterion(score_view, ys.view(-1))
# save loss to metrics
target_tokens = ys.ne(self.NULL_IDX).long().sum().item()
self.metrics['loss'] += loss.item()
self.metrics['num_tokens'] += target_tokens
return predictions, cand_preds
def vectorize(self, observations):
"""Convert a list of observations into input & target tensors."""
is_training = any(['labels' in obs for obs in observations])
xs, ys, labels, valid_inds, _, _ = PaddingUtils.pad_text(
observations, self.dict, end_idx=self.END_IDX,
null_idx=self.NULL_IDX, dq=True, eval_labels=True,
truncate=self.truncate)
if xs is None:
return None, None, None, None, None, None, None
xs = torch.LongTensor(xs)
if ys is not None:
ys = torch.LongTensor(ys)
if self.use_cuda:
# copy to gpu
xs = xs.cuda()
if ys is not None:
ys = ys.cuda()
cands = None
valid_cands = None
if not is_training and self.rank:
# set up candidates
cands = []
valid_cands = []
for i, v in enumerate(valid_inds):
if 'label_candidates' in observations[v]:
curr_lcs = list(observations[v]['label_candidates'])
curr_cands = [{'text': c} for c in curr_lcs]
cs, _, _, valid_c_inds, *_ = PaddingUtils.pad_text(curr_cands, self.dict, null_idx=self.NULL_IDX, dq=True, truncate=self.truncate)
valid_cands.append((i, v, [curr_lcs[j] for j in valid_c_inds]))
cs = torch.LongTensor(cs)
if self.use_cuda:
cs = cs.cuda()
cands.append(cs)
return xs, ys, labels, valid_inds, cands, valid_cands, is_training
def init_cuda_buffer(self, batchsize):
if self.use_cuda and not hasattr(self, 'buffer_initialized'):
try:
print('preinitializing pytorch cuda buffer')
bsz = self.opt.get('batchsize', batchsize)
maxlen = self.truncate or 180
dummy = torch.ones(bsz, maxlen).long().cuda()
sc = self.model(dummy, dummy)[1]
loss = self.criterion(sc.view(-1, sc.size(-1)), dummy.view(-1))
loss.backward()
self.buffer_initialized = True
except RuntimeError as e:
if 'out of memory' in str(e):
m = ('CUDA OOM: Lower batch size (-bs) from {} or lower max'
' sequence length (-tr) from {}'.format(bsz, maxlen))
raise RuntimeError(m)
else:
raise e
def batch_act(self, observations):
batchsize = len(observations)
self.init_cuda_buffer(batchsize)
# initialize a table of replies with this agent's id
batch_reply = [{'id': self.getID()} for _ in range(batchsize)]
# convert the observations into batches of inputs and targets
# valid_inds tells us the indices of all valid examples
# e.g. for input [{}, {'text': 'hello'}, {}, {}], valid_inds is [1]
# since the other three elements had no 'text' field
xs, ys, labels, valid_inds, cands, valid_cands, is_training = self.vectorize(observations)
if xs is None:
# no valid examples, just return empty responses
return batch_reply
# produce predictions, train on targets if availables
cand_inds = [i[0] for i in valid_cands] if valid_cands is not None else None
predictions, cand_preds = self.predict(xs, ys, cands, cand_inds, is_training)
if is_training:
report_freq = 0
else:
report_freq = self.report_freq
if predictions is not None:
PaddingUtils.map_predictions(
predictions, valid_inds, batch_reply, observations,
self.dict, self.END_IDX, report_freq=report_freq, labels=labels,
answers=self.answers, ys=ys.data if ys is not None else None)
if cand_preds is not None:
if valid_cands is None:
valid_cands = [(None, i, labels) for i in valid_inds]
for i in range(len(valid_cands)):
order = cand_preds[i]
_, batch_idx, curr_cands = valid_cands[i]
curr = batch_reply[batch_idx]
curr['text_candidates'] = [curr_cands[idx] for idx in order
if idx < len(curr_cands)]
#ipdb.set_trace()
return batch_reply
def act(self):
# call batch_act with this batch of one
return self.batch_act([self.observation])[0]
def save(self, path=None):
"""Save model parameters if model_file is set."""
| |
redishash(data))
routes = data.get('routes')
if routes:
for route in routes[:2]:
pipe.sadd(f'engagement:intcon:out:{route}', nameid)
pipe.execute()
response.status_code, result = 200, {'passed': True}
except Exception as e:
response.status_code, result = 500, None
logify(f"module=liberator, space=libreapi, action=create_routing_table, requestid={get_request_uuid()}, exception={e}, traceback={traceback.format_exc()}")
finally:
return result
@librerouter.put("/libreapi/routing/table/{identifier}", status_code=200)
def update_routing_table(reqbody: RoutingTableModel, response: Response, identifier: str=Path(..., regex=_NAME_)):
result = None
try:
pipe = rdbconn.pipeline()
name = reqbody.name
_nameid = f'table:{identifier}'; _name_key = f'routing:{_nameid}'
nameid = f'table:{name}'; name_key = f'routing:{nameid}'
if not rdbconn.exists(_name_key):
response.status_code, result = 403, {'error': 'nonexistent routing table identifier'}; return
if name != identifier:
if rdbconn.exists(name_key):
response.status_code, result = 403, {'error': 'existent routing table name'}; return
else:
response.status_code, result = 403, {'error': 'change name routing table is not allowed'}; return
# get current data
_data = jsonhash(rdbconn.hgetall(_name_key))
_routes = _data.get('routes')
# transaction block
pipe.multi()
if _routes:
for _route in _routes[:2]:
pipe.srem(f'engagement:intcon:out:{_route}', _nameid)
data = jsonable_encoder(reqbody)
pipe.hmset(name_key, redishash(data))
routes = data.get('routes')
if routes:
for route in _routes[:2]:
pipe.sadd(f'engagement:intcon:out:{route}', nameid)
# remove unintended field
for field in _data:
if field not in data:
pipe.hdel(name_key, field)
if name != identifier:
_engaged_key = f'engagement:{_name_key}'
engaged_key = f'engagement:{name_key}'
engagements = rdbconn.smembers(_engaged_key)
for engagement in engagements:
pipe.hset(f'intcon:{engagement}', 'routing', name)
if rdbconn.exists(_engaged_key):
pipe.rename(_engaged_key, engaged_key)
pipe.delete(_name_key)
pipe.execute()
response.status_code, result = 200, {'passed': True}
except Exception as e:
response.status_code, result = 500, None
logify(f"module=liberator, space=libreapi, action=update_routing_table, requestid={get_request_uuid()}, exception={e}, traceback={traceback.format_exc()}")
finally:
return result
@librerouter.delete("/libreapi/routing/table/{identifier}", status_code=200)
def delete_routing_table(response: Response, identifier: str=Path(..., regex=_NAME_)):
result = None
try:
pipe = rdbconn.pipeline()
_nameid = f'table:{identifier}'; _name_key = f'routing:{_nameid}'
_engaged_key = f'engagement:{_name_key}'
if not rdbconn.exists(_name_key):
response.status_code, result = 403, {'error': 'nonexistent routing table'}; return
if rdbconn.scard(_engaged_key):
response.status_code, result = 403, {'error': 'engaged routing table'}; return
# check if routing records exists in table
_ROUTING_KEY_PATTERN = f'routing:record:{identifier}:*'
next, records = rdbconn.scan(0, _ROUTING_KEY_PATTERN, SCAN_COUNT)
if records:
response.status_code, result = 400, {'error': 'routing table in used'}; return
else:
while next:
next, records = rdbconn.scan(next, _ROUTING_KEY_PATTERN, SCAN_COUNT)
if records:
response.status_code, result = 400, {'error': 'routing table in used'}; return
# get current data
_routes = fieldjsonify(rdbconn.hget(_name_key, 'routes'))
if _routes:
for _route in _routes[:2]:
pipe.srem(f'engagement:intcon:out:{_route}', _nameid)
pipe.delete(_engaged_key)
pipe.delete(_name_key)
pipe.execute()
response.status_code, result = 200, {'passed': True}
except Exception as e:
response.status_code, result = 500, None
logify(f"module=liberator, space=libreapi, action=delete_routing_table, requestid={get_request_uuid()}, exception={e}, traceback={traceback.format_exc()}")
finally:
return result
@librerouter.get("/libreapi/routing/table/{identifier}", status_code=200)
def detail_routing_table(response: Response, identifier: str=Path(..., regex=_NAME_)):
result = None
try:
_name_key = f'routing:table:{identifier}'
_engaged_key = f'engagement:{_name_key}'
if not rdbconn.exists(_name_key):
response.status_code, result = 403, {'error': 'nonexistent routing table identifier'}; return
data = jsonhash(rdbconn.hgetall(_name_key))
_routes = data.get('routes')
if _routes:
data['routes'] = {'primary': _routes[0], 'secondary': _routes[1], 'load': int(_routes[2])}
# get records
pipe = rdbconn.pipeline()
KEYPATTERN = f'routing:record:{identifier}:*'
next, mainkeys = rdbconn.scan(0, KEYPATTERN, SCAN_COUNT)
while next:
next, tmpkeys = rdbconn.scan(next, KEYPATTERN, SCAN_COUNT)
mainkeys += tmpkeys
for mainkey in mainkeys:
if mainkey.endswith(':compare:'): pipe.hgetall(mainkey)
else: pipe.get(mainkey)
details = pipe.execute()
records = list()
for mainkey, detail in zip(mainkeys, details):
_, _, _, match, value = listify(mainkey)
if value==__EMPTY_STRING__: value = __DEFAULT_ENTRY__
if match=='compare':
for hashfield, valuefield in detail.items():
compare, param = listify(hashfield)
recordvalue = listify(valuefield)
action = recordvalue[0]
record = {'matching': compare, 'value': param, 'action': action}
if action != 'block':
record.update({'routes':{'primary': recordvalue[1], 'secondary': recordvalue[2], 'load': int(recordvalue[3])}})
records.append(record)
else:
splitdetail = listify(detail)
action = splitdetail[0]
record = {'match': match, 'value': value, 'action': action}
if action != _BLOCK:
record.update({'routes': {'primary': splitdetail[1], 'secondary': splitdetail[2], 'load': int(splitdetail[3])}})
records.append(record)
engagements = rdbconn.smembers(_engaged_key)
data.update({'records': records, 'engagements': engagements})
response.status_code, result = 200, data
except Exception as e:
response.status_code, result = 500, None
logify(f"module=liberator, space=libreapi, action=detail_routing_table, requestid={get_request_uuid()}, exception={e}, traceback={traceback.format_exc()}")
finally:
return result
@librerouter.get("/libreapi/routing/table", status_code=200)
def list_routing_table(response: Response):
result = None
try:
pipe = rdbconn.pipeline()
KEYPATTERN = f'routing:table:*'
next, mainkeys = rdbconn.scan(0, KEYPATTERN, SCAN_COUNT)
while next:
next, tmpkeys = rdbconn.scan(next, KEYPATTERN, SCAN_COUNT)
mainkeys += tmpkeys
for mainkey in mainkeys: pipe.hgetall(mainkey)
details = pipe.execute()
data = list()
for mainkey, detail in zip(mainkeys, details):
detail = jsonhash(detail)
routes = detail.get('route')
if routes:
detail['routes'] = {'primary': routes[0], 'secondary': routes[1], 'load': int(routes[2])}
data.append(detail)
response.status_code, result = 200, data
except Exception as e:
response.status_code, result = 500, None
logify(f"module=liberator, space=libreapi, action=list_routing_table, requestid={get_request_uuid()}, exception={e}, traceback={traceback.format_exc()}")
finally:
return result
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
# ROUTING RECORD
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
class MatchingEnum(str, Enum):
lpm = 'lpm'
em = 'em'
eq = 'eq'
ne = 'ne'
gt = 'gt'
lt = 'lt'
_COMPARESET = {'eq', 'ne', 'gt', 'lt'}
class RoutingRecordActionEnum(str, Enum):
route = _ROUTE
block = _BLOCK
jumps = _JUMPS
class RoutingRecordModel(BaseModel):
table: str = Field(regex=_NAME_, max_length=32, description='name of routing table')
match: MatchingEnum = Field(description='matching options, include lpm: longest prefix match, em: exact match, eq: equal, ne: not equal, gt: greater than, lt: less than',)
value: str = Field(min_length=1, max_length=128, regex=_DIAL_, description=f'value of variable that declared in routing table. {__DEFAULT_ENTRY__} is predefined value for default entry')
action: RoutingRecordActionEnum = Field(default=_ROUTE, description=f'routing action: {_JUMPS} - jumps to other routing table; {_BLOCK} - block the call; {_ROUTE} - route call to outbound interconnection')
routes: Optional[RouteModel] = Field(description='route model data')
# validation and transform data
@root_validator()
def routing_record_agreement(cls, values):
#try:
values = jsonable_encoder(values)
table = values.get('table')
action = values.pop('action')
if not rdbconn.exists(f'routing:table:{table}'):
raise ValueError('nonexistent routing table')
if action==_BLOCK:
values.pop('routes', None)
if action in [_JUMPS, _ROUTE]:
routes = values.get('routes')
if not routes:
raise ValueError(f'routes parameter is required for {action} action')
else:
primary = routes.get('primary')
secondary = routes.get('secondary')
load = routes.get('load')
if action == _ROUTE:
for intconname in [primary, secondary]:
if not rdbconn.exists(f'intcon:out:{intconname}'):
raise ValueError('nonexistent outbound interconnect')
else:
for _table in [primary, secondary]:
if _table == table:
raise ValueError(f'routing loop to itself')
if not rdbconn.exists(f'routing:table:{_table}'):
raise ValueError('nonexistent routing table')
values['routes'] = [action, primary, secondary, load]
return values
@librerouter.post("/libreapi/routing/record", status_code=200)
def create_routing_record(reqbody: RoutingRecordModel, response: Response):
result = None
try:
pipe = rdbconn.pipeline()
data = jsonable_encoder(reqbody)
table = data.get('table')
match = data.get('match')
value = data.get('value')
if value == __DEFAULT_ENTRY__: value = __EMPTY_STRING__
routes = data.get('routes')
nameid = f'record:{table}:{match}:{value}'
hashfield = None
if match in _COMPARESET:
record_key = f'routing:record:{table}:compare:'
hashfield = f'{match}:{value}'
if rdbconn.hexists(record_key, hashfield):
response.status_code, result = 403, {'error': 'existent routing record'}; return
else:
record_key = f'routing:{nameid}'
if rdbconn.exists(record_key):
response.status_code, result = 403, {'error': 'existent routing record'}; return
action = routes[0]
if action==_ROUTE:
for route in routes[1:3]:
pipe.sadd(f'engagement:intcon:out:{route}', nameid)
elif action==_JUMPS:
for route in routes[1:3]:
pipe.sadd(f'engagement:routing:table:{route}', nameid)
if hashfield: pipe.hset(record_key, hashfield, stringify(map(str, routes)))
else: pipe.set(record_key, stringify(map(str, routes)))
pipe.execute()
response.status_code, result = 200, {'passed': True}
except Exception as e:
response.status_code, result = 500, None
logify(f"module=liberator, space=libreapi, action=create_routing_record, requestid={get_request_uuid()}, exception={e}, traceback={traceback.format_exc()}")
finally:
return result
@librerouter.put("/libreapi/routing/record", status_code=200)
def update_routing_record(reqbody: RoutingRecordModel, response: Response):
result = None
try:
pipe = rdbconn.pipeline()
data = jsonable_encoder(reqbody)
table = data.get('table')
match = data.get('match')
value = data.get('value')
if value == __DEFAULT_ENTRY__: value = __EMPTY_STRING__
nameid = f'record:{table}:{match}:{value}'
hashfield = None
if match in _COMPARESET:
record_key = f'routing:record:{table}:compare:'
hashfield = f'{match}:{value}'
if not rdbconn.hexists(record_key, hashfield):
response.status_code, result = 403, {'error': 'non existent routing record'}; return
else:
record_key = f'routing:{nameid}'
if not rdbconn.exists(record_key):
response.status_code, result = 403, {'error': 'non existent routing record'}; return
# process old data
if hashfield: _routes = listify(rdbconn.hget(record_key, hashfield))
else: _routes = listify(rdbconn.get(record_key))
_action = _routes[0]
if _action==_ROUTE:
for _route in _routes[1:3]:
pipe.srem(f'engagement:intcon:out:{_route}', nameid)
elif _action==_JUMPS:
for _route in _routes[1:3]:
pipe.srem(f'engagement:routing:table:{_route}', nameid)
else: pass
# process new data
routes = data.get('routes')
action = routes[0]
if action==_ROUTE:
for route in routes[1:3]:
pipe.sadd(f'engagement:intcon:out:{route}', nameid)
elif action==_JUMPS:
for route in routes[1:3]:
pipe.sadd(f'engagement:routing:table:{route}', nameid)
else: pass
if hashfield: pipe.hset(record_key, hashfield, stringify(map(str, routes)))
else: pipe.set(record_key, stringify(map(str, routes)))
pipe.execute()
response.status_code, result = 200, {'passed': True}
except Exception as e:
response.status_code, result = 500, None
logify(f"module=liberator, space=libreapi, action=update_routing_record, requestid={get_request_uuid()}, exception={e}, traceback={traceback.format_exc()}")
finally:
return result
@librerouter.delete("/libreapi/routing/record/{table}/{match}/{value}", status_code=200)
def delete_routing_record(response: Response, value:str=Path(..., regex=_DIAL_), table:str=Path(..., regex=_NAME_),
match:str=Path(..., regex='^(em|lpm|eq|ne|gt|lt)$')):
result = None
try:
pipe = rdbconn.pipeline()
if value == __DEFAULT_ENTRY__: value = __EMPTY_STRING__
nameid = f'record:{table}:{match}:{value}'
hashfield = None
if match in _COMPARESET:
record_key = f'routing:record:{table}:compare:'
hashfield = f'{match}:{value}'
if not rdbconn.hexists(record_key, hashfield):
response.status_code, result = 403, {'error': 'non existent routing record'}; return
else:
record_key = f'routing:{nameid}'
if not rdbconn.exists(record_key):
response.status_code, result = 403, {'error': 'non existent routing record'}; return
if hashfield: _routes = listify(rdbconn.hget(record_key, hashfield))
else: _routes = listify(rdbconn.get(record_key))
_action = _routes[0]
if _action==_ROUTE:
for _route in _routes[1:3]:
pipe.srem(f'engagement:intcon:out:{_route}', nameid)
if _action==_JUMPS:
for _route in _routes[1:3]:
pipe.srem(f'engagement:routing:table:{_route}', nameid)
if hashfield: pipe.hdel(record_key, hashfield)
else: pipe.delete(record_key)
pipe.execute()
response.status_code, result = | |
<gh_stars>0
# *************************************************************************
# Copyright (c) 2013 Cisco Systems, Inc. All rights reserved.
# *************************************************************************
from cobraj.mit.request import ConfigRequestType
from cobra.mit.naming import Dn
from .jsoncodec import toJSONStr
from .xmlcodec import toXMLStr
class AbstractRequest(object):
"""
AbstractRequest is the base class for all other request types, including
AbstractQuery, ConfigRequest, UploadPackage, LoginRequest and RefreshRequest
"""
def __init__(self):
self.__options = {}
self.id = None
self.__uriBase = ""
@classmethod
def makeOptions(cls, options):
"""
Returns a string containing the concatenated values of all key/value
pairs for the options defined in dict options
"""
optionStr = ''
if options:
options = ['%s=%s' % (n, str(v)) if v else None
for n, v in options.items()]
optionStr += '&'.join(filter(None, options))
return optionStr
def getUriPathAndOptions(self, session):
return "%s.%s%s%s" % (self.uriBase, session.formatStr,
'?' if self.options else '', self.options)
@property
def options(self):
"""
Return the HTTP request query string string for this object
"""
return AbstractRequest.makeOptions(self.__options)
# property setters / getters for this class
@property
def id(self):
"""
Returns the id of this query if set, else None
"""
return self.__options.get('_dc', None)
@id.setter
def id(self, value):
"""
Sets the id of this query. The id is an internal troubleshooting
value useful for tracing the processing of a request within the cluster
"""
self.__options['_dc'] = value
@property
def uriBase(self):
return self.__uriBase
@uriBase.setter
def uriBase(self, value):
self.__uriBase = value
def getUrl(self, session):
"""
Returns the dn query URL containing all the query options defined on
this object
"""
return session.url + self.getUriPathAndOptions(session)
class DeploymentQuery(AbstractRequest):
"""
Class to create a deployment query to find the impacted deployment entities for changes in specific dn.
"""
def __init__(self, dn):
"""
Args:
dnStr (str): DN to query
"""
super(DeploymentQuery, self).__init__()
self.__dnStr = str(dn)
self.__options = {'rsp-subtree-include': 'full-deployment'}
self.uriBase = "/api/mo/%s" % self.__dnStr
@property
def options(self):
"""
Returns the concatenation of the class and base class options for HTTP
request query string
"""
return '&'.join(filter(None, [AbstractRequest.makeOptions(
self.__options), super(DeploymentQuery, self).options]))
# property setters / getters for this class
@property
def dnStr(self):
"""
Returns the base dnString for this DnQuery
"""
return self.__dnStr
@property
def targetNode(self):
"""
Returns the node specific information
"""
return self.__options.get('target-node', None)
@targetNode.setter
def targetNode(self, value):
"""
Args:
value (int): id of the node
"""
self.__options['target-node'] = value
@property
def targetPath(self):
"""
Returns the path selected for the query
"""
return self.__options.get('target-path', None)
@targetNode.setter
def targetPath(self, value):
"""
Args:
value (str): specific target path for this query
"""
self.__options['target-path'] = value
@property
def includeRelations(self):
"""
Returns the path selected for the query
"""
return self.__options.get('include-relns', None)
@includeRelations.setter
def includeRelations(self, value):
"""
Args:
value (str): specific target path for this query
"""
value = 'yes' if value else 'no'
self.__options['include-relns'] = value
class AbstractQuery(AbstractRequest):
"""
Class representing an abstract query. The class is used by classQuery
and Dnquery.
"""
def __init__(self):
super(AbstractQuery, self).__init__()
self.__options = {}
@property
def options(self):
"""
Returns the concatenation of the class and base class options for HTTP
request query string
"""
return '&'.join(filter(None, [AbstractRequest.makeOptions(
self.__options), super(AbstractQuery, self).options]))
# property setters / getters for this class
@property
def propInclude(self):
"""
Returns the current response property include filter
"""
return self.__options.get('rsp-prop-include', None)
@propInclude.setter
def propInclude(self, value):
"""
Filters that can specify the properties that should be included in the
response body
"""
allowedValues = {'_all_', 'naming-only', 'config-explicit',
'config-all', 'config-only', 'oper'}
if value not in allowedValues:
raise ValueError('"%s" is invalid, valid values are "%s"' %
(value, str(allowedValues)))
self.__options['rsp-prop-include'] = value
@property
def subtreePropFilter(self):
"""
Returns the subtree prop filter.
"""
return self.__options.get('rsp-subtree-filter', None)
@subtreePropFilter.setter
def subtreePropFilter(self, pFilter):
"""
Returns the subtree prop filter.
"""
self.__options['rsp-subtree-filter'] = str(pFilter)
@property
def subtreeClassFilter(self):
"""
Returns the current subtree class filter.
"""
return self.__options.get('rsp-subtree-class', None)
@subtreeClassFilter.setter
def subtreeClassFilter(self, value):
"""
Returns the children of a single class.
"""
if isinstance(value, list):
value = ','.join(value)
self.__options['rsp-subtree-class'] = value
@property
def subtreeInclude(self):
"""
Returns the current subtree query values.
"""
return self.__options.get('rsp-subtree-include', None)
@subtreeInclude.setter
def subtreeInclude(self, value):
"""
Specifies optional values for a subtree query, including:
*audit-logs
*event-logs
*faults
*fault-records
*ep-records
*health
*health-records
*relations
*stats
*tasks
*count
*no-scoped
*required
*subtree
"""
allowedValues = {'audit-logs', 'event-logs', 'faults', 'fault-records', 'ep-records',
'health', 'health-records', 'deployment-records', 'relations', 'stats',
'tasks', 'count', 'no-scoped', 'required', 'subtree'}
allValues = value.split(',')
for v in allValues:
if v not in allowedValues:
raise ValueError('"%s" is invalid, valid values are "%s"' %
(value, str(allowedValues)))
self.__options['rsp-subtree-include'] = value
@property
def queryTarget(self):
"""
Returns the query type.
"""
return self.__options.get('query-target', None)
@queryTarget.setter
def queryTarget(self, value):
"""
Sets the query type. You can query the object (self), child objects
(children), or all objects lower in the heirarchy (subtree).
"""
allowedValues = {'self', 'children', 'subtree'}
if value not in allowedValues:
raise ValueError('"%s" is invalid, valid values are "%s"' %
(value, str(allowedValues)))
self.__options['query-target'] = value
@property
def classFilter(self):
"""
Returns the current class filter type.
"""
return self.__options.get('target-subtree-class', None)
@classFilter.setter
def classFilter(self, value):
"""
Filters by a specified class.
"""
if isinstance(value, str):
value = value.split(',')
value = [name.replace('.', '') for name in value]
value = ','.join(value)
self.__options['target-subtree-class'] = value
@property
def propFilter(self):
"""
Returns the current property filter type.
"""
return self.__options.get('query-target-filter', None)
@propFilter.setter
def propFilter(self, pFilter):
"""
Filters by a specified property value.
"""
self.__options['query-target-filter'] = str(pFilter)
@property
def subtree(self):
"""
Returns the current type of subtree filter.
"""
return self.__options.get('rsp-subtree', None)
@subtree.setter
def subtree(self, value):
"""
Filters query values within a subtree- you can filter by MOs (children)
or return the entire subtree (full).
"""
allowedValues = {'no', 'children', 'full'}
if value not in allowedValues:
raise ValueError('"%s" is invalid, valid values are "%s"' %
(value, str(allowedValues)))
self.__options['rsp-subtree'] = value
@property
def replica(self):
"""
Returns the current value for the replica option.
"""
return self.__options.get('replica', None)
@replica.setter
def replica(self, value):
"""
Direct the query to a specific replica
"""
allowedValues = set([1, 2, 3])
if value not in allowedValues:
raise ValueError('"%s" is invalid, valid values are "%s"' %
(value, str(allowedValues)))
self.__options['replica'] = value
@property
def orderBy(self):
"""Get the orderBy sort specifiers string.
Returns:
str: The order-by string of sort specifiers.
"""
return self.__options.get('order-by', None)
@orderBy.setter
def orderBy(self, sortSpecifiers):
"""Set the orderBy sort specifiers.
Args:
sortSpecifiers (str or list of str): A list of sort specifier strings
or a comma separated string of sort specifiers.
"""
if isinstance(sortSpecifiers, list):
sortSpecifiers = ','.join(sortSpecifiers)
self.__options['order-by'] = sortSpecifiers
@property
def pageSize(self):
"""Get the pageSize value.
Returns:
int: The number of results to be returned by a query.
"""
return self.__options.get('page-size', None)
@pageSize.setter
def pageSize(self, pageSize):
"""Set the pageSize value.
Args:
pageSize (int): The number of results to be returned by a query.
"""
try:
numVal = int(pageSize)
except:
raise ValueError('{} pageSize needs to be an integer'.format(pageSize))
self.__options['page-size'] = str(numVal)
@property
def page(self):
"""Get the page value.
Returns:
int: The number of the page returned in the query.
"""
return self.__options.get('page', None)
@page.setter
def page(self, value):
"""Set the page value.
Args:
page (int): The position in the query which should be retrieved.
"""
try:
numVal = int(value)
except:
raise ValueError('{} page needs to be an integer'.format(value))
self.__options['page'] = str(numVal)
@property
def cacheId(self):
return self.__options.get('cache-session', None)
@cacheId.setter
def cacheId(self, value):
if value is None and 'cache-session' in self.__options:
del self.__options['cache-session']
return
try:
numVal = int(value)
except:
raise ValueError('{} cache id needs to be an integer'.format(value))
self.__options['cache-session'] = str(numVal)
@property
def deleteCacheId(self):
return self.__options.get('delete-session', None)
@deleteCacheId.setter
def deleteCacheId(self, value):
try:
numVal = int(value)
except:
raise ValueError('{} delete cache id needs to be an integer'.format(value))
self.__options['delete-session'] = str(numVal)
class DnQuery(AbstractQuery):
"""
Class to create a query based on distinguished name (Dn).
"""
def __init__(self, dn):
"""
Args:
dnStr (str): DN to query
"""
super(DnQuery, self).__init__()
self.__dnStr = str(dn)
self.__options = {}
self.uriBase = "/api/mo/{0}".format(self.__dnStr)
@property
def options(self):
"""
Returns the concatenation of the class and base class options for HTTP
request query string
"""
return '&'.join(filter(None, [AbstractRequest.makeOptions(
self.__options), super(DnQuery, self).options]))
# property setters / getters for this class
@property
def dnStr(self):
"""
Returns the base dnString for this DnQuery
"""
return self.__dnStr
def __hash__(self):
url = '{0}/{1}'.format(self.__dnStr, | |
import inspect
import torch
import collections
import textwrap
import functools
import warnings
from typing import Dict, List, Set, Type
import torch._jit_internal as _jit_internal
from torch.jit.frontend import get_default_args, get_jit_def, get_class_properties
from torch.jit._builtins import _find_builtin
from torch.nn import Module
from torch._six import get_function_from_type, bind_method
ScriptMethodStub = collections.namedtuple('ScriptMethodStub', ('resolution_callback', 'def_', 'original_method'))
PropertyStub = collections.namedtuple('Property', ('resolution_callback', 'def_'))
# TODO: there should be a more principled way of doing this.
ignored_attributes = [
"_version",
"_parameters",
"_buffers",
"_modules",
"_initializing",
"_backward_hooks",
"_forward_hooks",
"_forward_pre_hooks",
"_state_dict_hooks",
"_load_state_dict_pre_hooks",
"dump_patches",
]
def make_stub(func, name):
rcb = _jit_internal.createResolutionCallbackFromClosure(func)
ast = get_jit_def(func, name, self_name="RecursiveScriptModule")
return ScriptMethodStub(rcb, ast, func)
def make_stub_from_method(nn_module, method_name):
func = getattr(nn_module, method_name)
if isinstance(func, ScriptMethodStub):
return func
# Make sure the name present in the resulting AST will match the name
# requested here. The only time they don't match is if you do something
# like:
# def _forward(self):
# pass
# forward = _forward
# In this case, the actual function object will have the name `_forward`,
# even though we requested a stub for `forward`.
return make_stub(func, method_name)
def make_stubs_from_exported_methods(mod):
stubs = []
for name in dir(mod):
item = getattr(mod, name, None)
if (
_jit_internal.get_torchscript_modifier(item)
is _jit_internal.FunctionModifiers.EXPORT
):
stubs.append(make_stub_from_method(mod, name))
return stubs
# base types that can be constants
# in addition, tuples and lists of these base types are also considered constants
# If you edit this list, then you also need to edit the handlers in
# ConstantValue in jit/script/init.cpp
_constant_types = (bool, float, int, str, type(None), torch.device, torch.layout, torch.dtype)
def _get_valid_constant(attr, v, owner_type):
if isinstance(v, _constant_types):
return v
elif isinstance(v, tuple) or isinstance(v, list):
return tuple(_get_valid_constant(attr, x, owner_type) for x in v)
constants = ", ".join(torch.typename(typ) for typ in _constant_types)
raise TypeError(textwrap.dedent("""
'{}' object in attribute '{}.{}' is not a valid constant.
Valid constants are:
1. a nn.ModuleList
2. a value of type {{{}}}
3. a list or tuple of (2)
""".format(torch.typename(type(v)), owner_type, attr, constants)))
class SourceContext(torch._C._jit_tree_views.SourceRangeFactory):
def __init__(self, source, filename, file_lineno, leading_whitespace_len):
super(SourceContext, self).__init__(source, filename, file_lineno, leading_whitespace_len)
def infer_concrete_type_builder(nn_module, share_types=True):
"""
Build a ConcreteModuleTypeBuilder from an nn.Module. This
ConcreteModuleType doesn't have a JIT type associated with it yet, it
must be filled in by the caller.
"""
concrete_type_builder = torch._C.ConcreteModuleTypeBuilder(type(nn_module))
if isinstance(nn_module, (torch.nn.ModuleDict)):
concrete_type_builder.set_module_dict()
if isinstance(nn_module, (torch.nn.ModuleList, torch.nn.Sequential)):
concrete_type_builder.set_module_list()
class_annotations = getattr(nn_module, '__annotations__', {})
if isinstance(nn_module, (torch.quantization.QuantWrapper)):
class_annotations = {}
# Get user-annotated ignored attributes.
user_annotated_ignored_attributes = getattr(nn_module, "__jit_ignored_attributes__", list())
concrete_type_builder.add_ignored_attributes(user_annotated_ignored_attributes)
# try to infer the type from type annotation or from the object itself
def infer_type(name, item):
# The forward function from Module is special; never use this annotations; we
# need to infer type directly using JIT. I originally wanted to write
# this test as isinstance(class_annotations[name], Callable) but
# isinstance on typing things doesn't seem to work: isinstance(list, Callable)
# is also true!
inferred = False
if name in class_annotations and class_annotations[name] != torch.nn.Module.__annotations__["forward"]:
ann_to_type = torch.jit.annotations.ann_to_type(class_annotations[name], _jit_internal.fake_range())
attr_type = torch._C.InferredType(ann_to_type)
elif isinstance(item, torch.jit.Attribute):
ann_to_type = torch.jit.annotations.ann_to_type(item.type, _jit_internal.fake_range())
attr_type = torch._C.InferredType(ann_to_type)
else:
attr_type = torch._C._jit_try_infer_type(item)
inferred = True
return attr_type, inferred
added_names = set()
for name, item in nn_module._parameters.items():
if name in user_annotated_ignored_attributes:
continue
assert item is None or isinstance(item, torch.Tensor)
attr_type, _ = infer_type(name, item)
# We currently have the invariant in various places in our code
# that parameters must be Tensors. However, the nn.Module API also
# allows NoneType parameters. These parameters are not returned as
# part of `parameters()` and its variants, but are available
# through direct attribute access.
concrete_type_builder.add_attribute(name, attr_type.type(), True, False)
added_names.add(name)
for name, item in nn_module._buffers.items():
if name in user_annotated_ignored_attributes:
continue
assert item is None or isinstance(item, torch.Tensor)
attr_type, _ = infer_type(name, item)
concrete_type_builder.add_attribute(name, attr_type.type(), False, True)
added_names.add(name)
for name, item in nn_module._modules.items():
if name in user_annotated_ignored_attributes:
continue
attr_type, _ = infer_type(name, item)
if item is None:
# Modules can be None. We don't have direct support for optional
# Modules, so the register it as an NoneType attribute instead.
concrete_type_builder.add_attribute(name, attr_type.type(), False, False)
continue
if attr_type.success():
assert attr_type.type().is_interface_type()
# if the type can be inferred, it should be a module interface type
sub_concrete_type = torch._C.ConcreteModuleType.from_jit_type(attr_type.type())
else:
# otherwise we get the concrete module type for item and add it to concrete_type
sub_concrete_type = get_module_concrete_type(item, share_types)
concrete_type_builder.add_module(name, sub_concrete_type)
added_names.add(name)
# populate constants_set
constants_set = getattr(nn_module, "__constants__", set())
# Constants annotated via `Final[T]` rather than being added to `__constants__`
for name, ann in class_annotations.items():
if torch._jit_internal.is_final(ann):
constants_set.add(name)
for name in constants_set:
if name in added_names:
# TODO: We should really error in this case, but its bc-breaking so
# we need to warn for at least one release
if name in nn_module._modules:
hint = "submodule"
elif name in nn_module._buffers:
hint = "buffer"
elif name in nn_module._parameters:
hint = "parameter"
else:
raise AssertionError("added_names must be submodule, parameter, or buffer")
warnings.warn("'{}' was found in ScriptModule constants, "
" but it is a non-constant {}. Consider removing it.".format(name, hint))
continue
if not hasattr(nn_module, name):
# TODO: We should really error in this case, but its bc-breaking so
# we need to warn for at least one release
warnings.warn("'{}' was found in ScriptModule constants, "
"but was not actually set in __init__. "
"Consider removing it.".format(name))
continue
value = getattr(nn_module, name)
concrete_type_builder.add_constant(name, _get_valid_constant(name, value, type(nn_module).__name__))
added_names.add(name)
# populate overloads
overloads = getattr(nn_module, "__overloads__", {})
# update with any annotated overloads
overloads.update(get_overload_name_mapping(get_overload_annotations(nn_module)))
for name, overloaded_names in overloads.items():
concrete_type_builder.add_overload(name, overloaded_names)
for name, value in nn_module.__dict__.items():
if name in ignored_attributes or name.startswith("__"):
# Python objects have lots of random attributes attached to them;
# PyTorch adds a few more. Prevent these from getting compiled.
continue
if name in user_annotated_ignored_attributes:
continue
if name in added_names:
# Don't re-add anything we already added
continue
# Handle Python function attributes
if inspect.isfunction(value):
try:
scripted_fn = torch.jit.script(value)
concrete_type_builder.add_function_attribute(
name,
torch._C._jit_try_infer_type(scripted_fn).type(),
value)
except Exception as e:
# If we fail to script the function, it isn't a hard error.
# Instead, we will add it to the list of attributes we failed
# to convert, with the compilation error.
hint = ("(This function exists as an attribute on the Python module, "
"but we failed to compile it to a TorchScript function. "
"\nThe error stack is reproduced here:\n{}").format(e)
concrete_type_builder.add_failed_attribute(name, hint)
pass
continue
# Handle calls to builtin functions (either bespoke builtins from torch.jit._builtins or
# a call to an aten function like torch.add)
builtin_symbol_name = _find_builtin(value)
if builtin_symbol_name:
concrete_type_builder.add_builtin_function(name, builtin_symbol_name)
continue
# Handle Script function attributes
if isinstance(value, torch.jit.ScriptFunction):
concrete_type_builder.add_function_attribute(
name,
torch._C._jit_try_infer_type(value).type(),
value)
continue
# If we got here, this is a regular "data" attribute, Add it to the concrete type
attr_type, inferred = infer_type(name, value)
if attr_type.success():
concrete_type_builder.add_attribute(name, attr_type.type(), False, False)
else:
# TODO: could add more detail here. For example, what the user should do
# when the pytype is `list` or `NoneType`
inferred_msg = "Its type was inferred; try adding a type annotation for the attribute." if inferred else ""
additional_info = f"{attr_type.reason()}. {inferred_msg}"
hint = "(This attribute exists on the Python module, " \
f"but we failed to convert Python type: '{torch.typename(type(value))}' " \
f"to a TorchScript type. {additional_info})"
concrete_type_builder.add_failed_attribute(name, hint)
# add hooks to concrete type
for hook in nn_module._forward_hooks.values():
concrete_type_builder.add_forward_hook(hook)
for pre_hook in nn_module._forward_pre_hooks.values():
concrete_type_builder.add_forward_pre_hook(pre_hook)
return concrete_type_builder
class ConcreteTypeStore(object):
type_store: Dict[Type[Module], List[torch._C.ConcreteModuleType]]
methods_compiled: Set[torch._C.ConcreteModuleType]
def __init__(self):
# Python module type => List[ConcreteModuleType)]
self.type_store = {}
# ConcreteTypes that have had their methods already compiled
self.methods_compiled = set()
def get_or_create_concrete_type(self, nn_module):
"""
Infer a ConcreteType from this `nn.Module` instance. Underlying JIT
types are re-used if possible.
"""
concrete_type_builder = infer_concrete_type_builder(nn_module)
nn_module_type = type(nn_module)
if nn_module_type not in self.type_store:
self.type_store[nn_module_type] = []
# Search the type store for an already-available JIT type
known_types = self.type_store[nn_module_type]
for known_type in known_types:
if known_type.equals(concrete_type_builder):
return known_type
# We didn't find anything; generate a new JIT type from this concrete type
concrete_type = concrete_type_builder.build()
self.type_store[nn_module_type].append(concrete_type)
return concrete_type
concrete_type_store = ConcreteTypeStore()
def create_methods_and_properties_from_stubs(concrete_type, method_stubs, property_stubs):
method_defs = [m.def_ for m in method_stubs]
method_rcbs = [m.resolution_callback for m in method_stubs]
method_defaults = [get_default_args(m.original_method) for m in method_stubs]
property_defs = [p.def_ for p in property_stubs]
property_rcbs = | |
import sys
import warnings
from collections import defaultdict
from itertools import product, combinations
import numpy as np
import pandas as pd
import scipy.stats
from networkx import Graph
from plotly.graph_objs import Scatter, Figure, Layout
from scipy.stats._continuous_distns import _distn_names
from scipy.stats.kde import gaussian_kde
from lyner.click_extras import Pipe
idx = pd.IndexSlice
class Density(Scatter):
def __init__(self, *args, kde=gaussian_kde, bw_method=None, **kwargs):
orientation = kwargs.pop('orientation', 'v')
super(Density, self).__init__(*args, **kwargs)
x = kwargs.get('x', kwargs.get('y', None))
y = kwargs.get('y', kwargs.get('x', None))
x = x[~(np.isnan(x) | np.isinf(x))]
y = y[~(np.isnan(y) | np.isinf(y))]
xmin, xmax = np.min(x), np.max(x)
ymin, ymax = np.min(y), np.max(y)
data = x
if 'x' in kwargs and 'y' in kwargs:
data = np.vstack([data, y])
xpos, ypos = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([xpos.ravel(), ypos.ravel()])
else:
xpos = np.linspace(xmin, xmax, 100)
positions = xpos
kernel = kde(data, bw_method=bw_method)
Z = np.reshape(kernel(positions).T, xpos.shape)
if orientation == 'h':
self.x = Z
self.y = xpos
self.fill = 'tozerox'
else:
self.x = xpos
self.y = Z
self.fill = 'tozeroy'
self.mode = 'lines'
# mostly taken from by https://gist.github.com/arose13/bc6eb9e6b76e8bd940eefd7a0989ac81
def find_best_fit_distribution(data, distributions=None, criterion='sse', nbins=None):
if nbins is None:
nbins = estimate_n_bins(data)
y, x = np.histogram(data, bins=nbins, density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
# Distributions to check
dist_names = set(_distn_names) - {'vonmises', 'digamma', 'dweibull'}
distributions = [getattr(scipy.stats, dist_name, None) for dist_name in
dist_names] if distributions is None else distributions
# Best holders
best_distributions = {'aicc': scipy.stats.norm, 'sse': scipy.stats.norm}
best_params = {'aicc': (0.0, 1.0), 'sse': (0.0, 1.0)}
best_criterionvalue = {'aicc': np.inf, 'sse': np.inf}
def aicc(distr, args, loc, scale, data, *_):
llh = np.sum(distr.logpdf(data, *arg, loc=loc, scale=scale))
print(f"{args} {loc} {scale}")
k = len(args) + (1 if loc else 0) + (1 if scale else 0)
aic = 2 * k - 2 * llh
aicc = aic + 2 * k * (k + 1) / (n - k - 1)
return aicc
def sse(distr, args, loc, scale, data, x, y, *_):
pdf = distr.pdf(x, *args, loc=loc, scale=scale)
sse = np.sum(np.power(y - pdf, 2.0))
return sse
crit_calculation = {'aicc': aicc, 'sse': sse}
for distribution in distributions:
try:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
params = distribution.fit(data)
arg, loc, scale = params[:-2], params[-2], params[-1]
value = crit_calculation[criterion](distribution, arg, loc, scale, data, x, y)
if best_criterionvalue[criterion] > value > -np.inf:
best_distributions[criterion] = distribution
best_params[criterion] = params
best_criterionvalue[criterion] = value
except NotImplementedError:
pass
return best_distributions[criterion], best_params[criterion], best_criterionvalue[criterion]
def estimate_n_bins(x):
"""
Uses the Freedman Diaconis Rule for generating the number of bins required
https://en.wikipedia.org/wiki/Freedman%E2%80%93Diaconis_rule
Bin Size = 2 IQR(x) / (n)^(1/3)
"""
from scipy.stats import iqr
x = np.asarray(x)
hat = 2 * iqr(x, nan_policy='omit') / (np.power(x.shape[0], 1.0 / 3))
n_bins = int(np.sqrt(x.shape[0])) if hat == 0 else int(np.ceil((np.nanmax(x) - np.nanmin(x)) / hat))
if not n_bins or np.isnan(n_bins) or n_bins <= 0:
n_bins = x.shape[0] // 2
return n_bins
def _mk_networkx_figure(G: Graph, pos, use_weights=True, node_cliques=defaultdict(list)):
nodelist = list(G.nodes())
edgelist = list(G.edges())
node_xy = np.asarray([pos[v] for v in nodelist])
edge_xy = np.asarray([(pos[e[0]], pos[e[1]]) for e in edgelist])
if use_weights:
weights = np.asarray([G.get_edge_data(e[0], e[1])['weight'] for e in edgelist])
weights = (weights - np.min(weights) + 0.05) / (np.max(weights) - np.min(weights))
else:
weights = np.zeros_like(edge_xy)
edge_trace = Scatter(
x=[],
y=[],
text=[],
line=dict(width=0.0, color='rgba(0,0,0,0)') if use_weights else dict(width=1.0, color='rgba(0,0,0,255)'),
hoverinfo='text',
mode='lines')
shapes = []
for (((x0, y0), (x1, y1)), w) in zip(edge_xy, weights):
edge_trace['x'] += tuple([x0, x1, None])
edge_trace['y'] += tuple([y0, y1, None])
if use_weights:
edge_trace['text'] += (f'{w}',)
shapes.append(dict(type='line',
x0=x0, x1=x1, y0=y0, y1=y1,
line=dict(width=w, color=f'rgba(0, 0, 0, {w})'),
layer='below'))
node_trace = Scatter(
x=[],
y=[],
text=[],
mode='markers',
hoverinfo='text',
marker=dict(
# showscale=True,
# colorscale options
# 'Greys' | 'YlGnBu' | 'Greens' | 'YlOrRd' | 'Bluered' | 'RdBu' |
# 'Reds' | 'Blues' | 'Picnic' | 'Rainbow' | 'Portland' | 'Jet' |
# 'Hot' | 'Blackbody' | 'Earth' | 'Electric' | 'Viridis' |
# colorscale='YlGnBu',
# reversescale=True,
# color=[],
size=10,
# colorbar=dict(
# thickness=15,
# title='Node Connections',
# xanchor='left',
# titleside='right'
# ),
line=dict(width=2)))
clique_map = {}
i = 0
for cliques in node_cliques.values():
for clique in cliques:
k = tuple(sorted(clique))
if k not in clique_map:
clique_map[k] = i
i += 1
num_cliques = len(clique_map)
clique_colors = {c: f'hsv({(360 / (num_cliques + 1)) * i}, 0.75, 0.75)' for c, i in clique_map.items()}
colors = []
for i, ((x, y), node) in enumerate(zip(node_xy, nodelist)):
possible_cliques = node_cliques[node]
if len(possible_cliques) > 0:
largest_clique = sorted(possible_cliques, key=lambda x: len(x), reverse=True)[0]
colors.append(clique_colors[tuple(sorted(largest_clique))])
else:
colors.append('hsv(0, 0, 0)')
node_trace['x'] += tuple([x])
node_trace['y'] += tuple([y])
node_trace['text'] += tuple([nodelist[i]])
node_trace['marker']['color'] = colors
return Figure(data=[edge_trace, node_trace],
layout=Layout(
showlegend=False,
hovermode='closest',
shapes=shapes,
# margin=dict(b=20, l=5, r=5, t=40),
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False)
))
class AutoDist(Scatter):
def __init__(self, criterion='sse', nbins=None, distributions=None, *args, **kwargs):
super(AutoDist, self).__init__(*args, **kwargs)
data = kwargs.pop('x', None)
n = data.shape[0] if hasattr(data, 'shape') else len(data)
best_distribution, best_param, best_critvalue = find_best_fit_distribution(data, criterion=criterion,
nbins=nbins,
distributions=distributions)
print(f"Using {best_distribution.name} with params {best_param}, ({criterion} = {best_critvalue})",
file=sys.stderr)
args, loc, scale = best_param[:-2], best_param[-2], best_param[-1]
self.x = np.linspace(np.min(data), np.max(data), np.max([n, nbins * 2]))
self.y = best_distribution.pdf(self.x, *args, loc=loc, scale=scale) if args else best_distribution.pdf(self.x,
loc=loc,
scale=scale)
self.mode = 'lines'
self.fill = 'tozeroy'
self.customdata = (best_distribution, best_param)
def _figure_add_changes(pipe: Pipe, fig):
from plotly.graph_objs import Histogram, XAxis, Figure, Layout, Bar
matrix = pipe.matrix
chngs = pipe.changes
values = chngs[chngs.columns[0]].values
values = np.array(values, dtype=np.float)
minvalue = np.nanmin(values)
i = 1
while minvalue <= -np.inf:
minvalue = np.nanpercentile(values, i)
i += 1
values[values == -np.inf] = minvalue
maxvalue = np.nanmax(values)
i = 99
while maxvalue >= np.inf:
maxvalue = np.nanpercentile(values, i)
i -= 1
values[values == np.inf] = maxvalue
values[values == np.nan] = 0
values = np.nan_to_num(values)
ha, hb = (0, 90)
hues = (hb - ha) * (values - minvalue) / (maxvalue - minvalue) + ha
sa, sb = (10, 100)
saturations = (sb - sa) * (values - minvalue) / (np.max(values) - minvalue) + sa
colors = [f'hsl({hb - v}, {int(s)}%, 60%)' for v, s in zip(hues, saturations)]
hist = Bar(x=values, y=list(map(str, matrix.genes)),
xaxis='x3',
yaxis='y',
orientation='h',
opacity=0.66,
marker=dict(color=colors),
)
fig['data'].append(hist)
fig['layout']['xaxis'] = XAxis(side='left', domain=[0, 0.85])
fig['layout']['xaxis3'] = XAxis(side='right', domain=[0.86, 1])
chngs['cluster'] = pipe.matrix.index.get_level_values(0)
histdata = []
for g in chngs.groupby(by='cluster'):
cluster, data = g
histdata.append(data[data.columns[0]].values[0])
histdata = np.asarray(histdata, dtype=float)
histdata = histdata[~(np.isinf(histdata) | np.isnan(histdata))] # remove nans
histdata = np.log(histdata)
histdata = histdata[~(np.isinf(histdata) | np.isnan(histdata))] # remove -np.inf (log(0))
nbins = estimate_n_bins(histdata) * 2
hist = Histogram(x=histdata, histnorm='probability density', nbinsx=nbins)
dist = AutoDist(x=histdata, nbins=nbins, criterion='sse')
distr, distr_params = dist.customdata
dist.customdata = None
p_low = distr.ppf(0.025, *distr_params)
p_high = distr.ppf(0.975, *distr_params)
changes_fig = Figure(data=[hist, dist],
layout=Layout(shapes=
[dict(type='line', x0=p_low, x1=p_low, y0=0, y1=1, line=dict(dash='dash')),
dict(type='line', x0=p_high, x1=p_high, y0=0, y1=1,
line=dict(dash='dash'))]))
return changes_fig
def _figure_get_estimates(pipe):
from plotly.graph_objs import Histogram, Scatter, XAxis, YAxis, Figure, Layout
groups = np.unique(pipe.estimate.columns.get_level_values(0))
param_names = np.unique(pipe.estimate.columns.get_level_values(1))
from plotly import tools
# FIXME: prone to fail for len(groups) not in scales (or if 'Set1' not in scales[len(groups)])
# colors = {group: color for group, color in zip(groups, cl.scales[f'{len(groups)}']['qual']['Set1'])}
colors = defaultdict(None)
colors['all'] = None
if len(param_names) > 2:
param_tuples = list(product(param_names, param_names))
n = len(param_tuples)
nn = int(np.ceil(np.sqrt(n)))
supplementary_fig = tools.make_subplots(rows=nn, cols=nn,
subplot_titles=[(f'{a} vs {b}' if a != b else f'{a}')
for a, b in param_tuples],
print_grid=False)
for i, pt in enumerate(param_tuples):
a, b = pt
row, col = divmod(i, nn)
if a == b:
traces = [Histogram(x=pipe.estimate.loc[:, idx[group, a]].values.ravel(),
marker=dict(color=colors[group]),
name=group,
legendgroup=group,
showlegend=False)
for group in groups]
else:
traces = [Scatter(x=pipe.estimate.loc[:, idx[group, a]].values.ravel(),
y=pipe.estimate.loc[:, idx[group, b]].values.ravel(),
mode='markers',
marker=dict(color=colors[group]),
name=group,
legendgroup=group,
showlegend=row == 0 and col == 1)
# show only a single legend for all plots (with the same groups)
for group in groups]
for trace in traces:
supplementary_fig.append_trace(trace, row + 1, col + 1)
supplementary_fig['layout'][f'xaxis{i + 1}'].update(dict(title=a))
if a != b:
supplementary_fig['layout'][f'yaxis{i + 1}'].update(dict(title=b))
else:
supplementary_fig['layout'][f'yaxis{i + 1}'].update(dict(title='freq'))
supplementary_fig['layout']['title'] = pipe.distribution.name
yield (supplementary_fig, "scattermatrix")
# TODO: proper labels for triangles
triangles = defaultdict(set)
for id, row in pipe.estimate.iterrows():
for group_a, group_b in combinations(groups, 2):
x_a, y_a = row.loc[group_a, param_names[0]], row.loc[group_a, param_names[1]]
x_b, y_b = row.loc[group_b, param_names[0]], row.loc[group_b, param_names[1]]
triangles[id].add((x_a, y_a, group_a))
triangles[id].add((x_b, y_b, group_b))
areas = {}
centroids = {}
tcolors = {}
max_tcolor = 0
max_area = 0
for (k, tri) in triangles.items():
| |
max number of lines has been reached yet
if nlines is not None:
if line_count >= nlines:
break
# Bundle and save data set
# TODO: [nfr] remove this from here, return dictionary and make assignment in __init__
return dict(section_list=pd.DataFrame(section_list), feature_mapping=pd.DataFrame(feature_section_mapping),
feature_list=feature_list)
# TODO: add tests, alterate file formats
def read_file_data(self, filename: str, nlines: int = None, start_line: int = 0) -> dict:
"""
Reads in un-annotated files expecting organization to be one document per line
Sentence tokenizer will take care of splitting documents into sentences
ex. parsed_data = read_file_data(filename='../tests/data/parse_and_model/oneLinePerDoc.txt', nlines=3)
:param filename: Filename for the un-annotated data set
:param nlines: Maximum number of lines from the file to read or None to read all lines
:param start_line: Optional parameter, specific line number to start at, mostly for testing purposes
:return: a dictionary with the following data
section_list: DataFrame with the following form
| doc_id (int) | section_id (int) | section_text (str) | title (bool) |
doc_id: integer id for the document
section_id: integer id for the section
section_text: cleaned (lowercase, trimmed) section text
title: True if the line is a title, False otherwise
"""
logging.info("Reading data from file: ", filename)
doc_id = -1
section_id = 0
section_list = []
line_number = 0
with open(filename, 'r') as input_file:
for line in input_file:
# Skip line if before specified start
if line_number < start_line:
# Increment line number
line_number += 1
continue
else:
# Increment line number
line_number += 1
# Each line is new doc
doc_id += 1
# Parse doc and split into sentences
# TODO: add a sentence tokenizer here
sentence_list = sent_tokenize(line)
for sentence in sentence_list:
logging.debug(sentence)
# Add section line to data set
section_list.append(
{"doc_id": doc_id, "section_id": section_id, "section_text": sentence})
# Increment section id
section_id += 1
logging.debug(line)
# Check if max number of lines has been reached yet
if nlines is not None:
if doc_id + 1 >= nlines:
break
# Bundle and return data set
return dict(section_list=pd.DataFrame(section_list))
# TODO: Slow, needs to be optimized, unit tests need to be added
def build_explicit_models(self, remove_stopwords: bool = True,
lemmatize_words: bool = True,
log_base: int = None) -> dict:
"""
This function builds a background model, set of topic models and summarizes the counts of words in each sentence
to prepare for EM optimization
:param remove_stopwords: Set to true if stop words should be removed from document sections before models are
created
:param lemmatize_words: Set to true if lemmatization should be performed on document sections before models are
created
:param log_base: Optional parameter to specify log base, defaults to ln if not set
:return: a dictionary with six entries -
model_background: background model estimated from the entire document collection as described in section 4.2
model_feature: feature models estimated from explicit mention sections as described in section 4.2
model_background_matrix: background model estimated from the entire document collection as described in
section 4.2 in dense ndarray form as follows:
Word | Background probability
----------------------------------------
word 1 | p(w1 | B)
... ... | ...
word v | p(wv | B)
model_feature_matrix: feature models estimated from explicit mention sections as described in section 4.2 in
dense ndarray form as follows:
Word/feature | feature 1 ... ... feature k
-----------------------------------------------------
word 1 | p(w1 | f1) ... ... p(w1 | fk)
... ... ... ... ...
word v | p(wv | fk) ... ... p(wv | fk)
section_word_counts_matrix: sparse csr matrix with word counts in each section as needed by the EM alg as
follows:
Section/Word | word 1 ... ... ... ... word v
---------------------------------------------------
Section 1 | count(s_1,w_1) ... ... count(s_1, w_v)
Section 2 | count(s_2,w_2) ... ... count(s_2, w_v)
... ... ... ... ... ... ...
Section m | count(s_m, w_1)... ... count(s_m, w_v)
vocabulary_lookup: a dictionary with
key: word id used in models, matrices, etc.
value: actual word
feature_section_mapping: a data frame with the mapping between explicit features and sections
explicit_feature_id | section_id
---------------------------------------
explicit feature id 1 | section id 1
explicit feature id 2 | section id 2
"""
logging.info("Building explicit models.")
text_set = self.parsed_text["section_list"]
feature_set = self.formatted_feature_list
section_word_list = dict() # list of all words in each section
section_word_counts = dict() # count of words in each section
collection_word_counts = Counter() # count of all words in all section
word_section_counter = Counter() # count of number of sections with word
feature_word_counter = defaultdict(
Counter) # keep track of words appearing in section w/ explicit feature mention
feature_section_mapping = [] # keeps a list of the sentence ids associated with each feature (many-to-many mapping)
vocabulary = OrderedDict()
current_word_id = -1
unique_feature_ids = feature_set.feature_id.unique()
# initialize Spacy model
nlp = en_core_web_sm.load()
annotated_text = text_set['section_text'].values.tolist()
docs = nlp.pipe(annotated_text, batch_size=1000, n_threads=4)
section_list = []
for item in docs:
section_list.append(item)
# loop over all rows in input data set
for index, row in text_set.iterrows():
# print the current text for debugging
logging.debug(str(row["section_id"]) + ": " + row["section_text"])
# input the sentence into Spacy
section = section_list[index] # nlp(row["section_text"])
# add each parsed word into a list
current_section_words = []
for word in section:
if word.lower_.strip() == '':
continue
# convert word to lowercase, strip stop words and lemmatize if requested
if (not word.is_stop or not remove_stopwords) and not word.is_punct:
cleaned_word = word.lemma_ if (word.lemma_ != '-PRON-' and lemmatize_words) else word.lower_
current_section_words.append(cleaned_word)
# assign word an id if it doesn't have one already
if cleaned_word not in vocabulary:
current_word_id += 1
vocabulary[cleaned_word] = current_word_id
# get a count of distinct words in the section - this might need to be switched to default dict later
current_section_word_counts = Counter(current_section_words)
# get keys for distinct words to add to idf counter
word_section_counter.update(current_section_word_counts.keys())
# add these counts to the all section counter
collection_word_counts.update(current_section_words)
# add to section counts dictionary
section_word_counts[row["section_id"]] = current_section_word_counts
# add to dictionary holding word parsing
section_word_list[row["section_id"]] = current_section_words
# initialize list to keep track of found features (in case of synonyms)
found_features = set()
# get all explicit topics for this sentence and add these words to the list
for index_f, row_f in feature_set.iterrows():
# word was found in the section, record find and add words to feature topic model
if row_f["feature"] in current_section_words:
logging.debug("feature " + str(row_f["feature_id"]))
if row_f["feature_id"] in found_features:
# already found explicit feature mention in sentence as synonym, skip
continue
else:
# feature has not been found yet, add to the list
found_features.add(row_f["feature_id"])
# record that feature was explicitly found
feature_section_mapping.append({"section_id": row["section_id"], "explicit_feature_id": row_f["feature_id"]})
# if we only count each word once
# feature_word_counter[row_f["feature_id"]].update(current_section_word_counts.keys())
# if we count each words as many times as it occurs (consistent with Santu's code)
feature_word_counter[row_f["feature_id"]].update(current_section_words)
# At this point we have all the counts we need to build the topic models
####################################
# Calculations for background model
####################################
# total number of words
vocabulary_size = len(collection_word_counts.values())
total_word_count = sum(collection_word_counts.values())
# change counter to dictionary for calculations
collection_word_counts = dict(collection_word_counts)
# calculate background model - ensure words are in key order
model_background = []
for word, word_id in vocabulary.items():
model_background.append(collection_word_counts[word] / total_word_count)
###############################
# Calculations for topic model
###############################
tfidf_feature = defaultdict(dict)
model_feature_norms = Counter()
# count of sentences
section_count = len(section_word_list)
for word in collection_word_counts.keys():
for current_feature in unique_feature_ids:
#######################################################################################################
# Formula 4, section 4.2, using base e logs by default but can be changed, also adds +1 from Formula 5
#######################################################################################################
if log_base is None:
tfidf = math.log(1 + feature_word_counter[current_feature][word]) \
* math.log(1 + section_count / word_section_counter[word]) \
+ 1
else:
tfidf = math.log(1 + feature_word_counter[current_feature][word], log_base) \
* math.log(1 + section_count / word_section_counter[word], log_base) \
+ 1
logging.debug(str(tfidf))
tfidf_feature[current_feature][word] = tfidf
model_feature_norms[current_feature] += tfidf
# normalize values of all dictionaries with totals
model_feature = []
for index in feature_set["feature_id"].unique():
logging.debug("normalizing " + str(index))
#########################################################################################################
# Formula 5, section 4.2, using base e logs by default, +1 in | |
= ds.set_coords(
['yyyyddd', 'mission_time', 'start_time', 'start_latitude', 'start_longitude', 'start_altitude', 'end_time',
'end_latitude', 'end_longitude', 'end_altitude', ])
return ds
def l2_v5_1_5_2_binary_to_dataset(file) -> xr.Dataset:
"""
Read the Level 2 Solar Event Species Profiles for a version 5.1/5.2 SAGE III or SAGE III ISS binary file.
https://eosweb.larc.nasa.gov/sites/default/files/project/sage3/guide/Data_Product_User_Guide.pdf
"""
print(file)
# Read all the data into memory
with open(file, 'rb') as f:
# Read the File Header
(event_id, date, fraction_time, latitude, longitude, time, fill_value_int, fill_value_float, mission_id) = \
unpack('>iifffiifi', f.read(9 * 4))
# Read the Version Tracking data
(L0DO_ver, CCD_ver, L0_ver, software_ver, dataproduct_ver, spectroscopy_ver, gram95_ver, met_ver) = \
unpack('>fiffffff', f.read(8 * 4))
# Read the File Description
(altitude_spacing, num_bins, num_met_grid, num_aer_wavelengths, num_ground_tracks, num_aer_bins) = \
unpack('>fiiiii', f.read(6 * 4))
# Read the Event Type data
(event_type_spacecraft, event_type_earth, beta_angle, aurora_flag, ephemeris_source) = \
unpack('>iifii', f.read(5 * 4))
# Read Ground Track Information
gt_date = np.array(unpack('>' + 'i' * num_ground_tracks, f.read(num_ground_tracks * 4)), dtype=np.int32)
gt_time = np.array(unpack('>' + 'i' * num_ground_tracks, f.read(num_ground_tracks * 4)), dtype=np.int32)
gt_latitude = np.array(unpack('>' + 'f' * num_ground_tracks, f.read(num_ground_tracks * 4)), dtype=np.float32)
gt_longitude = np.array(unpack('>' + 'f' * num_ground_tracks, f.read(num_ground_tracks * 4)), dtype=np.float32)
gt_ray_dir = np.array(unpack('>' + 'f' * num_ground_tracks, f.read(num_ground_tracks * 4)), dtype=np.float32)
# Read Space Craft Information
sc_latitude = np.array(unpack('>' + 'f' * num_ground_tracks, f.read(num_ground_tracks * 4)), dtype=np.float32)
sc_longitude = np.array(unpack('>' + 'f' * num_ground_tracks, f.read(num_ground_tracks * 4)), dtype=np.float32)
sc_altitude = np.array(unpack('>' + 'f' * num_ground_tracks, f.read(num_ground_tracks * 4)), dtype=np.float32)
# Read Profile Altitude Levels data
homogeneity = np.array(unpack('>' + 'i' * num_bins, f.read(num_bins * 4)), dtype=np.int32)
altitude = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
geopotential_altitude = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
# Read the Input Temp/Pres/ND for Retrievals
input_temperature = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
input_temperature_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
input_pressure = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
input_pressure_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
input_neutral_density = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
input_neutral_density_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
input_tp_source_flags = np.array(unpack('>' + 'i' * num_bins, f.read(num_bins * 4)), dtype=np.int32)
# Read the Derived Tropopause data
(temperature_tropopause, altitude_tropopause, pressure_tropopause) = unpack('>fff', f.read(3 * 4))
# Read the Met Temp/Press profiles
met_pressure = np.array(unpack('>' + 'f' * num_met_grid, f.read(num_met_grid * 4)), dtype=np.float32)
met_temperature = np.array(unpack('>' + 'f' * num_met_grid, f.read(num_met_grid * 4)), dtype=np.float32)
met_temperature_error = np.array(unpack('>' + 'f' * num_met_grid, f.read(num_met_grid * 4)), dtype=np.float32)
met_altitude = np.array(unpack('>' + 'f' * num_met_grid, f.read(num_met_grid * 4)), dtype=np.float32)
(met_source, ) = unpack('>i', f.read(4))
# Read the CCD and Bit flags
(ccd_temperature, spectrometer_zenith_temperature, ccd_temperature_minus_tec, ephemeris_quality, speccalshift,
speccalstretch, qaflag) = unpack('>ffffffi', f.read(7 * 4))
qaflag_altitude = np.array(unpack('>' + 'i' * num_bins, f.read(num_bins * 4)), dtype=np.int32)
# Read the Composite Ozone data
o3_composite = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
o3_composite_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
o3_composite_qa_flags = np.array(unpack('>' + 'i' * num_bins, f.read(num_bins * 4)), dtype=np.int32)
# Read the Mesospheric Ozone data
o3_mesospheric = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
o3_mesospheric_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
o3_mesospheric_qa_flags = np.array(unpack('>' + 'i' * num_bins, f.read(num_bins * 4)), dtype=np.int32)
# Read the MLR Ozone data
o3_mlr = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
o3_mlr_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
o3_mlr_qa_flags = np.array(unpack('>' + 'i' * num_bins, f.read(num_bins * 4)), dtype=np.int32)
# Read the Ozone Least Squares data
o3 = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
o3_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
o3_qa_flags = np.array(unpack('>' + 'i' * num_bins, f.read(num_bins * 4)), dtype=np.int32)
# Read Water Vapor data
water_vapor = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
water_vapor_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
water_vapor_qa_flags = np.array(unpack('>' + 'i' * num_bins, f.read(num_bins * 4)), dtype=np.int32)
# Read the NO2 data
no2 = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
no2_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
no2_qa_flags = np.array(unpack('>' + 'i' * num_bins, f.read(num_bins * 4)), dtype=np.int32)
# Read the Retrieved T/P data
temperature = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
temperature_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
pressure = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
pressure_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
tp_qa_flags = np.array(unpack('>' + 'i' * num_bins, f.read(num_bins * 4)), dtype=np.int32)
# Read the Aerosol Information
aerosol_wavelengths = np.array(unpack('>' + 'f' * num_aer_wavelengths,
f.read(num_aer_wavelengths * 4)), dtype=np.float32)
aerosol_half_bandwidths = np.array(unpack('>' + 'f' * num_aer_wavelengths,
f.read(num_aer_wavelengths * 4)), dtype=np.float32)
molecular_sct = np.array(unpack('>' + 'f' * num_aer_wavelengths,
f.read(num_aer_wavelengths * 4)), dtype=np.float32)
molecular_sct_uncert = np.array(unpack('>' + 'f' * num_aer_wavelengths,
f.read(num_aer_wavelengths * 4)), dtype=np.float32)
stratospheric_optical_depth = np.array(unpack('>' + 'f' * num_aer_wavelengths,
f.read(num_aer_wavelengths * 4)), dtype=np.float32)
stratospheric_optical_depth_error = np.array(unpack('>' + 'f' * num_aer_wavelengths,
f.read(num_aer_wavelengths * 4)), dtype=np.float32)
stratospheric_optical_depth_qa_flags = np.array(unpack('>' + 'i' * num_aer_wavelengths,
f.read(num_aer_wavelengths * 4)), dtype=np.int32)
# Read the Aerosol Extinction data
aerosol_extinction = np.ndarray((num_aer_wavelengths, num_aer_bins), dtype=np.float32)
aerosol_extinction_error = np.ndarray((num_aer_wavelengths, num_aer_bins), dtype=np.float32)
aerosol_extinction_qa_flags = np.ndarray((num_aer_wavelengths, num_aer_bins), dtype=np.int32)
for i in range(num_aer_wavelengths):
aerosol_extinction[i] = np.array(unpack('>' + 'f' * num_aer_bins,
f.read(num_aer_bins * 4)))
aerosol_extinction_error[i] = np.array(unpack('>' + 'f' * num_aer_bins,
f.read(num_aer_bins * 4)))
aerosol_extinction_qa_flags[i] = np.array(unpack('>' + 'i' * num_aer_bins,
f.read(num_aer_bins * 4)))
# Decode QABits
(HexErrFlag, ContWindowClosedFlag, TimeQualFlag, DMPExoFlag, BlockExoFlag, SpectCalFlag, SolarEclipseFlag) = \
(lambda x: [x >> bit & 1 for bit in range(7)])(qaflag)
event_datetime = yyyymmddhhmmss_to_datetime(date, time) if date != fill_value_int else np.datetime64('NaT')
gt_datetime = [yyyymmddhhmmss_to_datetime(date, time) if date != fill_value_int else np.datetime64('NaT')
for (date, time) in zip(gt_date, gt_time)]
# Return the data as an xarray dataset
ds = xr.Dataset(
{
'time': event_datetime,
'year_fraction': np.float32(fraction_time),
'latitude': np.float32(latitude),
'longitude': np.float32(longitude),
'event_type_spacecraft': np.int32(event_type_spacecraft),
'event_type_earth': np.int32(event_type_earth),
'beta_angle': np.float32(beta_angle),
'aurora_flag': np.int32(aurora_flag),
'ephemeris source': np.int32(ephemeris_source),
'gt_time': (['num_ground_tracks'], gt_datetime),
'gt_latitude': (['num_ground_tracks'], gt_latitude),
'gt_longitude': (['num_ground_tracks'], gt_longitude),
'gt_ray_dir': (['num_ground_tracks'], gt_ray_dir),
'sc_latitude': (['num_ground_tracks'], sc_latitude),
'sc_longitude': (['num_ground_tracks'], sc_longitude),
'sc_altitude': (['num_ground_tracks'], sc_altitude),
'homogeneity': (['altitude'], np.int32(homogeneity)),
'geopotential_alt': (['altitude'], geopotential_altitude),
'input_temperature': (['altitude'], input_temperature),
'input_temperature_error': (['altitude'], input_temperature_error),
'input_pressure': (['altitude'], input_pressure),
'input_pressure_error': (['altitude'], input_pressure_error),
'input_neutral_density': (['altitude'], input_neutral_density),
'input_neutral_density_error': (['altitude'], input_neutral_density_error),
'input_tp_source_flags': (['altitude'], input_tp_source_flags),
'temperature_tropopause': np.float32(temperature_tropopause),
'altitude_tropopause': np.float32(altitude_tropopause),
'pressure_tropopause': np.float32(pressure_tropopause),
'met_temperature': (['met_pressure'], met_temperature),
'met_temperature_error': (['met_pressure'], met_temperature_error),
'met_altitude': (['met_pressure'], met_altitude),
'met_source': np.float32(met_source),
'CCD_Temperature': np.float32(ccd_temperature),
'Spectrometer_Zenith_Temperature': np.float32(spectrometer_zenith_temperature),
'CCD_Temperature_minus_TEC': np.float32(ccd_temperature_minus_tec),
'Ephemeris_Quality': np.float32(ephemeris_quality),
'SpecCalShift': np.float32(speccalshift),
'SpecCalStretch': np.float32(speccalstretch),
'o3_composite': (['altitude'], o3_composite),
'o3_composite_error': (['altitude'], o3_composite_error),
'o3_composite_qa_flags': (['altitude'], o3_composite_qa_flags),
'o3_mesospheric': (['altitude'], o3_mesospheric),
'o3_mesospheric_error': (['altitude'], o3_mesospheric_error),
'o3_mesospheric_qa_flags': (['altitude'], o3_mesospheric_qa_flags),
'o3_mlr': (['altitude'], o3_mlr),
'o3_mlr_error': (['altitude'], o3_mlr_error),
'o3_mlr_qa_flags': (['altitude'], o3_mlr_qa_flags),
'o3': (['altitude'], o3),
'o3_error': (['altitude'], o3_error),
'o3_qa_flags': (['altitude'], o3_qa_flags),
'water_vapor': (['altitude'], water_vapor),
'water_vapor_error': (['altitude'], water_vapor_error),
'water_vapor_qa_flags': (['altitude'], water_vapor_qa_flags),
'no2': (['altitude'], no2),
'no2_error': (['altitude'], no2_error),
'no2_qa_flags': (['altitude'], no2_qa_flags),
'temperature': (['altitude'], temperature),
'temperature_error': (['altitude'], temperature_error),
'pressure': (['altitude'], pressure),
'pressure_error': (['altitude'], pressure_error),
'tp_qa_flags': (['altitude'], tp_qa_flags),
'aerosol_wavelengths': (['Aerosol_channel'], aerosol_wavelengths),
'Half Bandwidths of Aerosol Channels': (['Aerosol_channel'], aerosol_half_bandwidths),
'Molecular_SCT': (['Aerosol_channel'], molecular_sct),
'Molecular_SCT_uncert': (['Aerosol_channel'], molecular_sct_uncert),
'stratospheric_optical_depth': (['Aerosol_channel'], stratospheric_optical_depth),
'stratospheric_optical_depth_error': (['Aerosol_channel'], stratospheric_optical_depth_error),
'stratospheric_optical_depth_qa_flags': (['Aerosol_channel'], stratospheric_optical_depth_qa_flags),
'aerosol_extinction': (['Aerosol_channel', 'Aerosol_altitude'], aerosol_extinction),
'aerosol_extinction_error': (['Aerosol_channel', 'Aerosol_altitude'], aerosol_extinction_error),
'aerosol_extinction_qa_flags': (['Aerosol_channel', 'Aerosol_altitude'],
aerosol_extinction_qa_flags),
'HexErrFlag': np.int8(HexErrFlag),
'ContWindowClosedFlag': np.int8(ContWindowClosedFlag),
'TimeQualFlag': np.int8(TimeQualFlag),
'DMPExoFlag': np.int8(DMPExoFlag),
'BlockExoFlag': np.int8(BlockExoFlag),
'SpectralCalFlag': np.int8(SpectCalFlag),
'SolarEclipseFlag': np.int8(SolarEclipseFlag),
'DMPAltFlag': (['altitude'], np.int8(qaflag_altitude))
},
coords={
'event_id': np.int32(event_id),
'altitude': altitude,
'met_pressure': [1.00e+03, 9.75e+02, 9.50e+02, 9.25e+02, 9.00e+02, 8.75e+02, 8.50e+02,
8.25e+02, 8.00e+02, 7.75e+02, 7.50e+02, 7.25e+02, 7.00e+02, 6.50e+02,
6.00e+02, 5.50e+02, 5.00e+02, 4.50e+02, 4.00e+02, 3.50e+02, 3.00e+02,
2.50e+02, 2.00e+02, 1.50e+02, 1.00e+02, 7.00e+01, 5.00e+01, 4.00e+01,
3.00e+01, 2.00e+01, 1.00e+01, 7.00e+00, 5.00e+00, 4.00e+00, 3.00e+00,
2.00e+00, 1.00e+00, 7.00e-01, 5.00e-01, 4.00e-01, 3.00e-01, 1.00e-01],
'Aerosol_channel': range(num_aer_wavelengths),
'Aerosol_altitude': altitude[:num_aer_bins]
},
attrs={
'Mission Identification': mission_id,
'Version: Definitive Orbit Processing': np.float32(L0DO_ver),
'Version: CCD Table': np.float32(CCD_ver),
'Version: Level 0 Processing': np.float32(L0_ver),
'Version: Software Processing': np.float32(software_ver),
'Version: Data Product': np.float32(dataproduct_ver),
'Version: Spectroscopy': np.float32(spectroscopy_ver),
'Version: GRAM 95': np.float32(gram95_ver),
'Version: Meteorological': np.float32(met_ver),
'Altitude Based Grid Spacing': altitude_spacing,
'_FillValue': fill_value_int
})
# Assert dimension lengths are correct
assert(len(ds.num_ground_tracks) == num_ground_tracks)
assert(len(ds.altitude) == num_bins)
assert(len(ds.met_pressure) == num_met_grid)
assert(len(ds.Aerosol_channel) == num_aer_wavelengths)
for var in ds.variables:
if | |
timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Closure
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["closure_id"] = closure_id
return self.get_closure_endpoint.call_with_http_info(**kwargs)
def get_parking_lorry(self, lorry_id, **kwargs):
"""Details eines Rastplatzes # noqa: E501
Gibt Details eines konkreten Rastplatzes zurück. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_parking_lorry(lorry_id, async_req=True)
>>> result = thread.get()
Args:
lorry_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ParkingLorry
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["lorry_id"] = lorry_id
return self.get_parking_lorry_endpoint.call_with_http_info(**kwargs)
def get_roadwork(self, roadwork_id, **kwargs):
"""Details einer Baustelle # noqa: E501
Gibt Details zu einer konkreten Baustelle zurück. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_roadwork(roadwork_id, async_req=True)
>>> result = thread.get()
Args:
roadwork_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Roadwork
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["roadwork_id"] = roadwork_id
return self.get_roadwork_endpoint.call_with_http_info(**kwargs)
def get_warning(self, warning_id, **kwargs):
"""Details zu einer Verkehrsmeldung # noqa: E501
Gibt Details zu einer konkreten Verkehrsmeldung zurück. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_warning(warning_id, async_req=True)
>>> result = thread.get()
Args:
warning_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Warning
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["warning_id"] = warning_id
return self.get_warning_endpoint.call_with_http_info(**kwargs)
def get_webcam(self, webcam_id, **kwargs):
"""Details einer Webcam # noqa: E501
Gibt Details einer konkreten Webcam zurück. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_webcam(webcam_id, async_req=True)
>>> result = thread.get()
Args:
webcam_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Webcam
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["webcam_id"] = webcam_id
return self.get_webcam_endpoint.call_with_http_info(**kwargs)
def list_autobahnen(self, **kwargs):
"""Liste verfügbarer Autobahnen # noqa: E501
Gibt eine Liste der verfügbaren Autobahnen zurück. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_autobahnen(async_req=True)
>>> result = thread.get()
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Roads
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
return self.list_autobahnen_endpoint.call_with_http_info(**kwargs)
def list_charging_stations(self, road_id, **kwargs):
"""Liste aktueller Ladestationen # noqa: E501
Gibt eine Liste der Ladestationen zu einer Autobahn zurück. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_charging_stations(road_id, async_req=True)
>>> result = thread.get()
Args:
road_id (RoadId):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
| |
<filename>rmsd/calculate_rmsd.py
#!/usr/bin/env python
__doc__ = \
"""
Calculate Root-mean-square deviation (RMSD) of Two Molecules Using Rotation
===========================================================================
Calculate Root-mean-square deviation (RMSD) between structure A and B, in XYZ
or PDB format, using transformation and rotation. The order of the atoms *must*
be the same for both structures.
For more information, usage, example and citation read more at
https://github.com/charnley/rmsd
"""
__version__ = '1.2.5'
import re
import numpy as np
# Python 2/3 compatibility
# Make range a iterator in Python 2
try:
range = xrange
except NameError:
pass
def kabsch_rmsd(P, Q):
"""
Rotate matrix P unto Q using Kabsch algorithm and calculate the RMSD.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
root-mean squared deviation
"""
P = kabsch_rotate(P, Q)
return rmsd(P, Q)
def kabsch_rotate(P, Q):
"""
Rotate matrix P unto matrix Q using Kabsch algorithm.
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
P : array
(N,D) matrix, where N is points and D is dimension,
rotated
"""
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P
def kabsch(P, Q):
"""
The optimal rotation matrix U is calculated and then used to rotate matrix
P unto matrix Q so the minimum root-mean-square deviation (RMSD) can be
calculated.
Using the Kabsch algorithm with two sets of paired point P and Q, centered
around the centroid. Each vector set is represented as an NxD
matrix, where D is the the dimension of the space.
The algorithm works in three steps:
- a translation of P and Q
- the computation of a covariance matrix C
- computation of the optimal rotation matrix U
http://en.wikipedia.org/wiki/Kabsch_algorithm
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
Q : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
U : matrix
Rotation matrix (D,D)
Example
-----
TODO
"""
# Computation of the covariance matrix
C = np.dot(np.transpose(P), Q)
# Computation of the optimal rotation matrix
# This can be done using singular value decomposition (SVD)
# Getting the sign of the det(V)*(W) to decide
# whether we need to correct our rotation matrix to ensure a
# right-handed coordinate system.
# And finally calculating the optimal rotation matrix U
# see http://en.wikipedia.org/wiki/Kabsch_algorithm
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
return U
def quaternion_rmsd(P, Q):
"""
Rotate matrix P unto Q and calculate the RMSD
based on doi:10.1016/1049-9660(91)90036-O
Parameters
----------
P : array
(N,D) matrix, where N is points and D is dimension.
P : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
"""
rot = quaternion_rotate(P, Q)
P = np.dot(P, rot)
return rmsd(P, Q)
def quaternion_transform(r):
"""
Get optimal rotation
note: translation will be zero when the centroids of each molecule are the
same
"""
Wt_r = makeW(*r).T
Q_r = makeQ(*r)
rot = Wt_r.dot(Q_r)[:3, :3]
return rot
def makeW(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
W = np.asarray([
[r4, r3, -r2, r1],
[-r3, r4, r1, r2],
[r2, -r1, r4, r3],
[-r1, -r2, -r3, r4]])
return W
def makeQ(r1, r2, r3, r4=0):
"""
matrix involved in quaternion rotation
"""
Q = np.asarray([
[r4, -r3, r2, r1],
[r3, r4, -r1, r2],
[-r2, r1, r4, r3],
[-r1, -r2, -r3, r4]])
return Q
def quaternion_rotate(X, Y):
"""
Calculate the rotation
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Y: array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rot : matrix
Rotation matrix (D,D)
"""
N = X.shape[0]
W = np.asarray([makeW(*Y[k]) for k in range(N)])
Q = np.asarray([makeQ(*X[k]) for k in range(N)])
Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)])
W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)])
A = np.sum(Qt_dot_W, axis=0)
eigen = np.linalg.eigh(A)
r = eigen[1][:, eigen[0].argmax()]
rot = quaternion_transform(r)
return rot
def centroid(X):
"""
Calculate the centroid from a vectorset X.
https://en.wikipedia.org/wiki/Centroid
Centroid is the mean position of all the points in all of the coordinate
directions.
C = sum(X)/len(X)
Parameters
----------
X : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
C : float
centeroid
"""
C = X.mean(axis=0)
return C
def rmsd(V, W):
"""
Calculate Root-mean-square deviation from two sets of vectors V and W.
Parameters
----------
V : array
(N,D) matrix, where N is points and D is dimension.
W : array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
rmsd : float
Root-mean-square deviation
"""
D = len(V[0])
N = len(V)
rmsd = 0.0
for v, w in zip(V, W):
rmsd += sum([(v[i] - w[i]) ** 2.0 for i in range(D)])
return np.sqrt(rmsd / N)
def write_coordinates(atoms, V, title=""):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of atomic types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
"""
N, D = V.shape
print(str(N))
print(title)
for i in range(N):
atom = atoms[i]
atom = atom[0].upper() + atom[1:]
print("{0:2s} {1:15.8f} {2:15.8f} {3:15.8f}".format(
atom, V[i, 0], V[i, 1], V[i, 2]))
def get_coordinates(filename, fmt):
"""
Get coordinates from filename in format fmt. Supports XYZ and PDB.
Parameters
----------
filename : string
Filename to read
fmt : string
Format of filename. Either xyz or pdb.
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
if fmt == "xyz":
return get_coordinates_xyz(filename)
elif fmt == "pdb":
return get_coordinates_pdb(filename)
exit("Could not recognize file format: {:s}".format(fmt))
def get_coordinates_pdb(filename):
"""
Get coordinates from the first chain in a pdb file
and return a vectorset with all the coordinates.
Parameters
----------
filename : string
Filename to read
Returns
-------
atoms : list
List of atomic types
V : array
(N,3) where N is number of atoms
"""
# PDB files tend to be a bit of a mess. The x, y and z coordinates
# are supposed to be in column 31-38, 39-46 and 47-54, but this is
# not always the case.
# Because of this the three first columns containing a decimal is used.
# Since the format doesn't require a space between columns, we use the
# above column indices as a fallback.
x_column = None
V = list()
# Same with atoms and atom naming.
# The most robust way to do this is probably
# to assume that the atomtype is given in column 3.
atoms = list()
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith("TER") or line.startswith("END"):
break
if line.startswith("ATOM"):
tokens = line.split()
# Try to get the atomtype
try:
atom = tokens[2][0]
if atom in ("H", "C", "N", "O", "S", "P"):
atoms.append(atom)
else:
# e.g. 1HD1
atom = tokens[2][1]
if atom == "H":
atoms.append(atom)
else:
raise Exception
except:
exit("Error parsing atomtype for the following line: \n{0:s}".format(line))
if x_column == None:
try:
# look for x column
for i, x in enumerate(tokens):
if "." in x and "." in tokens[i + 1] and "." in tokens[i + 2]:
x_column = i
break
except IndexError:
exit("Error parsing coordinates for the following line: \n{0:s}".format(line))
# Try to read the coordinates
try:
V.append(np.asarray(tokens[x_column:x_column + 3], dtype=float))
except:
# If that doesn't work, use hardcoded indices
try:
x = line[30:38]
y = line[38:46]
z = line[46:54]
V.append(np.asarray([x, y, z], dtype=float))
except:
exit("Error parsing input for the following line: \n{0:s}".format(line))
V = np.asarray(V)
atoms = np.asarray(atoms)
assert (V.shape[0] | |
# example_1 --> calculator
"""
print("\tCALCULATOR\n")
sayi1 = float(input("enter a value:"))
operator = input("operator:")
sayi2 = float(input("enter a value"))
if operator == "+":
print(sayi1+sayi2)
elif operator == "-":
print(sayi1-sayi2)
elif operator == "*":
print(sayi1*sayi2)
elif operator == "/":
print(sayi1/sayi2)
else:
print("THIS CALCULATOR CAN'T DO YOUR OPERATION")
"""
# example_2 --> hipotenus
"""
import math
a = float(input("ucgenin birinci dik kenarinin uzunlugu : "))
b = float(input("ucgenin ikinci dik kenarinin uzunlugu : "))
print(math.sqrt(a*a+b**2))
"""
# example_3 --> akim
"""
V = float(input("potansiyel : "))
R = float(input("direnc : "))
print("akim :",(V/R))
"""
# example_4 --> dictionary
"""
while True:
dictionary = dict()
word = input("word : ")
meaning = input("meaning : ")
dictionary[word] = [meaning]
y_n = input("add another (y,n):")
if y_n == "n":
break
print(dictionary)
"""
# example_5 --> bir dosyaya ogrencinin not ortalamasini yazdirma
"""
ad = input("ogrenci : ")
vize = int(input("vize notu : "))
final = int(input("final : "))
donem_sonu_notu = (vize*0.4)+(final*0.6)
if donem_sonu_notu >= 85:
harf_not = "AA"
elif donem_sonu_notu >= 70:
harf_not = "BA"
elif donem_sonu_notu >= 60:
harf_not = "BB"
elif donem_sonu_notu >= 50:
harf_not = "CB"
elif donem_sonu_notu >= 40:
harf_not = "CC"
else:
harf_not = "FF"
# 'a' !!
with open("notlar.txt","a") as dosya:
dosya.writelines("{} ogrencisinin notu : {} --> {}\n".format(ad,donem_sonu_notu,harf_not))
print(donem_sonu_notu,harf_not)
"""
# example_6 --> yazi-tura
"""
tekrar = int(input("bilgisayar kac kez yazi tura atsin : "))
yazi = 0
tura = 0
import random
parayuzeyi = ("yazi", "tura")
while tekrar > 0:
tekrar -= 1
secim = random.choice(parayuzeyi)
if secim == "yazi":
yazi += 1
else:
tura += 1
print("{} atis icerisinden {} kez yazi, {} kez tura geldi.".format((yazi + tura), yazi, tura))
"""
# example_7 --> asal sayi yazdirma
"""
sinir = int(input("hangi sayiya kadar asal sayilari yazdirayim : "))
list = []
for sayi in range(2, sinir):
i = 2
while i <= int(sayi / 2) and sayi % i != 0:
i += 1
if i == int(sayi / 2) + 1:
list.append(sayi)
print("{}'den kucuk asal sayilar --> ".format(sinir), *list)
"""
# example_8 --> faktoriyel
"""
faktoriyeli_alinacak_sayi = int(input("Kac faktoriyel : "))
total = 1
for x in range(1,faktoriyeli_alinacak_sayi+1):
total *= x
print(faktoriyeli_alinacak_sayi,"! = ",total,sep="")
print("{}! = {}".format(faktoriyeli_alinacak_sayi,total))
"""
# example_9 --> ATM
"""
bakiye = 2000
bilgi = "<NAME>"
while 1:
islem = int(input("1)Para cekme\n2)Para yatirma\n3)Kart bilgileri\n4)Kart iade\nSeciniz : "))
if islem == 1:
miktar = int(input("Cekilecek miktar : "))
if miktar > bakiye:
print("Bakiye yetersiz.")
else :
bakiye -= miktar
print("Yeni bakiyeniz : {}".format(bakiye))
elif islem == 2:
miktar = int(input("Yatirilacak miktar : "))
bakiye += miktar
print("Yeni bakiyeniz : {}".format(bakiye))
elif islem == 3:
print(bilgi,bakiye)
elif islem == 4:
break
else:
print("Islem secemediniz. Tekrar deneyiniz.")
"""
# example_10 --> girilen sayinin tam sayi bolenlerini bulma
"""
def bulma(sayi):
for i in range(1,sayi+1):
if sayi % i == 0:
dizi.append(i)
return dizi
dizi = []
sayi = int(input("sayi giriniz : "))
print(bulma(sayi))
"""
# example_11 --> ekok
"""
sayi1 = int(input("Birinci sayi : "))
sayi2 = int(input("Ikinci sayi : "))
carpım = sayi1 * sayi2
for i in range (max(sayi1,sayi2),carpım+1):
if i % sayi1 == 0 and i % sayi2 == 0:
print("{} ve {} sayilarinin ekoku : {}".format(sayi1, sayi2, i))
break
"""
# example_12 --> sayi tahmin oyunu
"""
from random import randint
devam = "e"
while devam == "e":
print("7 tahmin hakkiniz var.\nSayi (1,100) arasindadir.")
sayi = randint(1, 100)
deneme = 1
while deneme < 8:
tahmin = int(input("{}. tahmininizi giriniz : ".format(deneme)))
deneme += 1
if tahmin > sayi:
print("Asagi")
elif tahmin < sayi:
print("Yukari")
else:
print("Tebrikler bildiniz.")
break
if deneme == 8:
print("Bulamadiginiz sayi {} idi".format(sayi))
devam = input("Tekrar oynamak istiyor musunuz? (e,h): ")
"""
# example_13 --> class sirket dosya
"""
class sirket():
def __init__(self, calisans = [],butce = 0):
self.Calisanlar = calisans
self.butce = butce
def calisan_sayisi_bul(self):
return len(self.Calisanlar)
def calisan_ekleme(self,ekle):
self.Calisanlar.append(ekle)
def calisan_cikarma(self, cikar):
self.Calisanlar.remove(cikar)
def butce_ekle_cikar(self, sayi):
self.butce += sayi
def output(self):
print("\nCalisanlar :", *self.Calisanlar, sep=",")
print("butce = {}".format(self.butce))
print("*************CREATED BYE OUT OF WORLD COMPANY*************")
calisanlarR = []
sirketlerR = []
butcelerR = []
try:
with open("sirketlerin_bilgileri.txt", "r") as fpr:
sirketlerin_bilgileri = fpr.read()
bol = sirketlerin_bilgileri.split("/")
count = 0
print(bol)
while count < 3:
if count == 0:
sirketlerR.append(bol[0].split(","))
elif count == 1:
workers = bol[1].split(".")
for i in workers:
calisanlarR.append(i.split(","))
elif count == 2:
butcelerR.append(bol[2].split(","))
count += 1
print(sirketlerR,calisanlarR,butcelerR)
a = 0
for name in sirketlerR[0]:
y = name
vars()[y] = sirket(calisanlarR[a],int(butcelerR[0][a]))
a += 1
except IOError:
print("Have not opened yet")
with open("sirketlerin_bilgileri.txt", "a") as fpr:
pass
finally:
fpr.close()
while 1:
sec = int(input("Var olan sirket uzerinden islem mi yapicaksiniz (1) yoksa yeni sirket bilgileri mi ekleyeceksiniz (2) , cikis icin (3) :"))
if sec == 1:
s = input("Sirket adi : ")
eval(s)
elif sec == 2:
y_s = input("Sirket adi : ")
s = y_s
s = sirket()
elif sec == 3:
break
while 1:
if sec == 1:eval(s).output()
if sec == 2:s.output()
print("1-Calisan kaydetme 2-Calisan silme 3-Toplam maas",
"4-Sirket butce ogrenme 5-Butce ekleme 6-Butce silme","\t\t\t\t7-Cikis",sep="\n")
secim = int(input("Seciniz : "))
if secim == 1:
ekle = input("Yeni calisan : ")
if sec == 1:eval(s).calisan_ekleme(ekle)
if sec == 2:s.calisan_ekleme(ekle)
elif secim == 2:
cikar = input("Silinecek calisan : ")
if sec == 1:eval(s).calisan_cikarma(cikar)
if sec == 2:s.calisan_cikarma(cikar)
elif secim == 3:
maas = int(input("Ortalama bir calisanin maasi nedir : "))
if sec == 1:print("Yaklasik toplam maas =", maas * len(eval(s).Calisanlar))
if sec == 2:print("Yaklasik toplam maas =", maas * len(s.Calisanlar))
elif secim == 4:
if sec == 1:print(eval(s).butce)
if sec == 2:print(s.butce)
elif secim == 5:
sayi = int(input("Ne kadar eklenecek : "))
if sec == 1:eval(s).butce_ekle_cikar(sayi)
if sec == 2:s.butce_ekle_cikar(sayi)
elif secim == 6:
sayi = -int(input("Ne kadar azalacak : "))
if sec == 1:eval(s).butce_ekle_cikar(sayi)
if sec == 2:s.butce_ekle_cikar(sayi)
elif secim == 7:
if sec == 2:
sirketlerR[0].append(y_s)
calisanlarR.append(s.Calisanlar)
butcelerR[0].append(str(s.butce))
else:
i = 0
while s != sirketlerR[0][i]:
i += 1
calisanlarR[i] = eval(s).Calisanlar
butcelerR[0][i] = (str(eval(s).butce))
break
#print(*sirketlerR[0],*calisanlarR,*butcelerR)
yazilacak = ""
i = 0
for company in sirketlerR[0]:
yazilacak += company+","
i += 1
yazilacak = yazilacak[:-1]
yazilacak += "/"
for x in range(0, i):
for worker in calisanlarR[x]:
yazilacak += worker+","
yazilacak = yazilacak[:-1]
yazilacak += "."
yazilacak = yazilacak[:-1]
yazilacak += "/"
for money in butcelerR[0]:
yazilacak += money+","
yazilacak += "/"
yazilacak = yazilacak[:-2]
#print(yazilacak)
with open("sirketlerin_bilgileri.txt", "w") as fpw:
fpw.write(yazilacak)
"""
# example_14 --> basit mp3 calar
"""
import random
import os
class Mp3Calar:
def __init__(self,sarkilar_g=[], ses_g=50, calan="YOK"):
self.ses = ses_g
self.sarkilar = sarkilar_g
self.calan_sarki = calan
def sesArttir(self):
if self.ses == 100:
print("Max ses duzeyi")
else:
self.ses += 10
def sesAzalt(self):
if self.ses == 0:
print("Min ses duzeyi")
else:
self.ses -= 10
def SarkiSec(self):
sarki = input("Calacak sarki : ")
while self.sarkilar.count(sarki) == 0 and sarki != "-":
sarki = input("Sectiginiz sarki listede yok tekrar girin (cikmak icin '-') : ")
self.calan_sarki = sarki
def rastgeleSarkiSec(self):
self.calan_sarki = random.choice(self.sarkilar)
choice = 1
mp3 = Mp3Calar()
while choice != 7:
choice = int(input("Sarki listesi : {}\n".format(mp3.sarkilar) +
"Suan calan sarki : {}\n".format(mp3.calan_sarki) +
"Ses : {}\n".format(mp3.ses) +
"1-Sarki sec\n"
"2-Ses arttir\n"
"3-Ses azalt\n"
"4-Rastgele sarki sec\n"
"5-Sarki ekle\n"
"6-Sarki sil\n"
"7-Kapat\n"
"Seciniz : "))
if choice == 1:
mp3.SarkiSec()
elif choice == 2:
mp3.sesArttir()
elif choice == 3:
mp3.sesAzalt()
elif choice == 4:
mp3.rastgeleSarkiSec()
elif choice == 5:
sarki = "1"
print("Sarki eklemeyi bitirdiginizde '-' giriniz.")
while sarki != "-":
sarki = input("Yeni Sarki : ")
if mp3.sarkilar.count(sarki) == 1:
print("Sarki zaten bulunmakta.")
continue
mp3.sarkilar.append(sarki)
mp3.sarkilar.pop()
elif choice == 6:
sarki = "1"
print("Sarki silmeyi bitirdiginizde '-' giriniz.")
while sarki != "-":
sarki = input("Sarki listesi : {}".format(mp3.sarkilar) + "\nSilinecek Sarki : ")
try:
mp3.sarkilar.remove(sarki)
except:
pass
os.system("clear")
"""
# example_15 --> X-O-X Game
"""
class Game:
def __init__(self):
self.table = {
11: "-", 12: "-", 13: "-",
21: "-", 22: "-", 23: "-",
31: "-", 32: "-", 33: "-"}
self.bitmedi = 1
def hamleal(self, sira):
print("\nHamle sirasi O")
satir = int(input("satir : "))
sutun = int(input("sutun : "))
key = satir*10 +sutun
while self.table[key] != "-":
print("DOLU")
satir = int(input("satir : "))
sutun = int(input("sutun : "))
key = satir * 10 + sutun
self.table[key] = sira
def bitti_mi(self):
if self.table[11] == self.table[12] == self.table[13] != "-":
self.bitmedi = 0
elif self.table[21] == self.table[22] == self.table[23] != "-":
self.bitmedi = 0
elif self.table[31] == self.table[32] == self.table[33] != "-":
self.bitmedi = 0
elif self.table[11] == self.table[21] == self.table[31] != "-":
self.bitmedi = 0
elif self.table[12] == self.table[22] == self.table[32] != "-":
self.bitmedi = 0
elif self.table[13] == self.table[23] == self.table[33] != "-":
self.bitmedi = 0
elif self.table[11] == self.table[22] | |
<reponame>Sim7b/jellyfin_ha<filename>__init__.py
"""The jellyfin component."""
import logging
import time
import re
import traceback
import collections.abc
from typing import Mapping, MutableMapping, Optional, Sequence, Iterable, List, Tuple
import voluptuous as vol
from homeassistant.exceptions import ConfigEntryNotReady
from jellyfin_apiclient_python import JellyfinClient
from jellyfin_apiclient_python.connection_manager import CONNECTION_STATE
from homeassistant.core import HomeAssistant
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ( # pylint: disable=import-error
ATTR_ENTITY_ID,
CONF_URL,
CONF_USERNAME,
CONF_PASSWORD,
CONF_VERIFY_SSL,
CONF_CLIENT_ID,
EVENT_HOMEASSISTANT_STOP,
)
import homeassistant.helpers.config_validation as cv # pylint: disable=import-error
from homeassistant.helpers.dispatcher import ( # pylint: disable=import-error
async_dispatcher_send,
)
from .const import (
DOMAIN,
SIGNAL_STATE_UPDATED,
SERVICE_SCAN,
STATE_OFF,
STATE_IDLE,
STATE_PAUSED,
STATE_PLAYING,
)
from .device import JellyfinDevice
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor", "media_player"]
UPDATE_UNLISTENER = None
USER_APP_NAME = "Home Assistant"
CLIENT_VERSION = "1.0"
PATH_REGEX = re.compile("^(https?://)?([^/:]+)(:[0-9]+)?(/.*)?$")
SCAN_SERVICE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
}
)
def autolog(message):
"Automatically log the current function details."
import inspect
# Get the previous frame in the stack, otherwise it would
# be this function!!!
func = inspect.currentframe().f_back.f_code
# Dump the message + the name of this function to the log.
_LOGGER.debug("%s: %s in %s:%i" % (
message,
func.co_name,
func.co_filename,
func.co_firstlineno
))
async def async_setup(hass: HomeAssistant, config: dict):
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {}
return True
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry):
global UPDATE_UNLISTENER
if UPDATE_UNLISTENER:
UPDATE_UNLISTENER()
if not config_entry.unique_id:
hass.config_entries.async_update_entry(
config_entry, unique_id=config_entry.title
)
config = {}
for key, value in config_entry.data.items():
config[key] = value
for key, value in config_entry.options.items():
config[key] = value
if config_entry.options:
hass.config_entries.async_update_entry(config_entry, data=config, options={})
UPDATE_UNLISTENER = config_entry.add_update_listener(_update_listener)
hass.data[DOMAIN][config.get(CONF_URL)] = {}
_jelly = JellyfinClientManager(hass, config)
try:
await _jelly.connect()
hass.data[DOMAIN][config.get(CONF_URL)]["manager"] = _jelly
except:
_LOGGER.error("Cannot connect to Jellyfin server.")
raise
async def service_trigger_scan(service):
entity_id = service.data.get(ATTR_ENTITY_ID)
for sensor in hass.data[DOMAIN][config.get(CONF_URL)]["sensor"]["entities"]:
if sensor.entity_id == entity_id:
await sensor.async_trigger_scan()
hass.services.async_register(
DOMAIN,
SERVICE_SCAN,
service_trigger_scan,
schema=SCAN_SERVICE_SCHEMA,
)
for component in PLATFORMS:
hass.data[DOMAIN][config.get(CONF_URL)][component] = {}
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, component)
)
async_dispatcher_send(hass, SIGNAL_STATE_UPDATED)
async def stop_jellyfin(event):
"""Stop Jellyfin connection."""
await _jelly.stop()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_jellyfin)
await _jelly.start()
return True
async def _update_listener(hass, config_entry):
"""Update listener."""
await hass.config_entries.async_reload(config_entry.entry_id)
class JellyfinClientManager(object):
def __init__(self, hass: HomeAssistant, config_entry):
self.hass = hass
self.callback = lambda client, event_name, data: None
self.jf_client: JellyfinClient = None
self.is_stopping = True
self._event_loop = hass.loop
self.host = config_entry[CONF_URL]
self._info = None
self.config_entry = config_entry
self.server_url = ""
self._sessions = None
self._devices: Mapping[str, JellyfinDevice] = {}
# Callbacks
self._new_devices_callbacks = []
self._stale_devices_callbacks = []
self._update_callbacks = []
@staticmethod
def expo(max_value = None):
n = 0
while True:
a = 2 ** n
if max_value is None or a < max_value:
yield a
n += 1
else:
yield max_value
@staticmethod
def clean_none_dict_values(obj):
"""
Recursively remove keys with a value of None
"""
if not isinstance(obj, collections.abc.Iterable) or isinstance(obj, str):
return obj
queue = [obj]
while queue:
item = queue.pop()
if isinstance(item, collections.abc.Mapping):
mutable = isinstance(item, collections.abc.MutableMapping)
remove = []
for key, value in item.items():
if value is None and mutable:
remove.append(key)
elif isinstance(value, str):
continue
elif isinstance(value, collections.abc.Iterable):
queue.append(value)
if mutable:
# Remove keys with None value
for key in remove:
item.pop(key)
elif isinstance(item, collections.abc.Iterable):
for value in item:
if value is None or isinstance(value, str):
continue
elif isinstance(value, collections.abc.Iterable):
queue.append(value)
return obj
async def connect(self):
autolog(">>>")
is_logged_in = await self.hass.async_add_executor_job(self.login)
if is_logged_in:
_LOGGER.info("Successfully added server.")
else:
raise ConfigEntryNotReady
@staticmethod
def client_factory(config_entry):
client = JellyfinClient(allow_multiple_clients=True)
client.config.data["app.default"] = True
client.config.app(
USER_APP_NAME, CLIENT_VERSION, USER_APP_NAME, config_entry[CONF_CLIENT_ID]
)
client.config.data["auth.ssl"] = config_entry[CONF_VERIFY_SSL]
return client
def login(self):
autolog(">>>")
self.server_url = self.config_entry[CONF_URL]
if self.server_url.endswith("/"):
self.server_url = self.server_url[:-1]
protocol, host, port, path = PATH_REGEX.match(self.server_url).groups()
if not protocol:
_LOGGER.warning("Adding http:// because it was not provided.")
protocol = "http://"
if protocol == "http://" and not port:
_LOGGER.warning("Adding port 8096 for insecure local http connection.")
_LOGGER.warning(
"If you want to connect to standard http port 80, use :80 in the url."
)
port = ":8096"
if protocol == "https://" and not port:
port = ":443"
self.server_url = "".join(filter(bool, (protocol, host, port, path)))
self.jf_client = self.client_factory(self.config_entry)
self.jf_client.auth.connect_to_address(self.server_url)
result = self.jf_client.auth.login(self.server_url, self.config_entry[CONF_USERNAME], self.config_entry[CONF_PASSWORD])
if "AccessToken" not in result:
return False
credentials = self.jf_client.auth.credentials.get_credentials()
self.jf_client.authenticate(credentials)
return True
async def start(self):
autolog(">>>")
def event(event_name, data):
_LOGGER.debug("Event: %s", event_name)
if event_name == "WebSocketConnect":
self.jf_client.wsc.send("SessionsStart", "0,1500")
elif event_name == "WebSocketDisconnect":
timeout_gen = self.expo(100)
while not self.is_stopping:
timeout = next(timeout_gen)
_LOGGER.warning(
"No connection to server. Next try in {0} second(s)".format(
timeout
)
)
self.jf_client.stop()
time.sleep(timeout)
if self.login():
break
elif event_name == "Sessions":
self._sessions = self.clean_none_dict_values(data)["value"]
self.update_device_list()
else:
self.callback(self.jf_client, event_name, data)
self.jf_client.callback = event
self.jf_client.callback_ws = event
await self.hass.async_add_executor_job(self.jf_client.start, True)
self.is_stopping = False
self._info = await self.hass.async_add_executor_job(self.jf_client.jellyfin._get, "System/Info")
self._sessions = self.clean_none_dict_values(await self.hass.async_add_executor_job(self.jf_client.jellyfin.get_sessions))
async def stop(self):
autolog(">>>")
await self.hass.async_add_executor_job(self.jf_client.stop)
self.is_stopping = True
def update_device_list(self):
""" Update device list. """
autolog(">>>")
# _LOGGER.debug("sessions: %s", str(sessions))
if self._sessions is None:
_LOGGER.error('Error updating Jellyfin devices.')
return
try:
new_devices = []
active_devices = []
dev_update = False
for device in self._sessions:
# _LOGGER.debug("device: %s", str(device))
dev_name = '{}.{}'.format(device['DeviceId'], device['Client'])
try:
_LOGGER.debug('Session msg on %s of type: %s, themeflag: %s',
dev_name, device['NowPlayingItem']['Type'],
device['NowPlayingItem']['IsThemeMedia'])
except KeyError:
pass
active_devices.append(dev_name)
if dev_name not in self._devices and \
device['DeviceId'] != self.config_entry[CONF_CLIENT_ID]:
_LOGGER.debug('New Jellyfin DeviceID: %s. Adding to device list.',
dev_name)
new = JellyfinDevice(device, self)
self._devices[dev_name] = new
new_devices.append(new)
elif device['DeviceId'] != self.config_entry[CONF_CLIENT_ID]:
# Before we send in new data check for changes to state
# to decide if we need to fire the update callback
if not self._devices[dev_name].is_active:
# Device wasn't active on the last update
# We need to fire a device callback to let subs now
dev_update = True
do_update = self.update_check(
self._devices[dev_name], device)
self._devices[dev_name].update_data(device)
self._devices[dev_name].set_active(True)
if dev_update:
self._do_new_devices_callback(0)
dev_update = False
if do_update:
self._do_update_callback(dev_name)
# Need to check for new inactive devices and flag
for dev_id in self._devices:
if dev_id not in active_devices:
# Device no longer active
if self._devices[dev_id].is_active:
self._devices[dev_id].set_active(False)
self._do_update_callback(dev_id)
self._do_stale_devices_callback(dev_id)
# Call device callback if new devices were found.
if new_devices:
self._do_new_devices_callback(0)
except Exception as e:
_LOGGER.critical(traceback.format_exc())
raise
def update_check(self, existing: JellyfinDevice, new: JellyfinDevice):
""" Check device state to see if we need to fire the callback.
True if either state is 'Playing'
False if both states are: 'Paused', 'Idle', or 'Off'
True on any state transition.
"""
autolog(">>>")
old_state = existing.state
if 'NowPlayingItem' in existing.session_raw:
try:
old_theme = existing.session_raw['NowPlayingItem']['IsThemeMedia']
except KeyError:
old_theme = False
else:
old_theme = False
if 'NowPlayingItem' in new:
if new['PlayState']['IsPaused']:
new_state = STATE_PAUSED
else:
new_state = STATE_PLAYING
try:
new_theme = new['NowPlayingItem']['IsThemeMedia']
except KeyError:
new_theme = False
else:
new_state = STATE_IDLE
new_theme = False
if old_theme or new_theme:
return False
elif old_state == STATE_PLAYING or new_state == STATE_PLAYING:
return True
elif old_state != new_state:
return True
else:
return False
@property
def info(self):
if self.is_stopping:
return None
return self._info
async def trigger_scan(self):
await self.hass.async_add_executor_job(self.jf_client.jellyfin._post, "Library/Refresh")
async def get_item(self, id):
return await self.hass.async_add_executor_job(self.jf_client.jellyfin.get_item, id)
async def get_items(self, query=None):
response = await self.hass.async_add_executor_job(self.jf_client.jellyfin.users, "/Items", "GET", query)
#_LOGGER.debug("get_items: %s | %s", str(query), str(response))
return response["Items"]
async def set_playstate(self, session_id, state, params):
await self.hass.async_add_executor_job(self.jf_client.jellyfin.post_session, session_id, "Playing/%s" % state, params)
async def play_media(self, session_id, media_id):
params = {
"playCommand": "PlayNow",
"itemIds": media_id
}
await self.hass.async_add_executor_job(self.jf_client.jellyfin.post_session, session_id, "Playing", params)
async def get_artwork(self, media_id) -> Tuple[Optional[str], Optional[str]]:
query = {
"format": "PNG",
"maxWidth": 500,
"maxHeight": 500
}
image = await self.hass.async_add_executor_job(self.jf_client.jellyfin.items, "GET", "%s/Images/Primary" % media_id, query)
if image is not None:
return (image, "image/png")
return (None, None)
async def get_artwork_url(self, media_id) -> str:
return await self.hass.async_add_executor_job(self.jf_client.jellyfin.artwork, media_id, "Primary", 500)
@property
def api(self):
""" Return the api. """
return self.jf_client.jellyfin
@property
def devices(self) -> Mapping[str, JellyfinDevice]:
""" Return devices dictionary. """
return self._devices
@property
def is_available(self):
return not self.is_stopping
# Callbacks
def add_new_devices_callback(self, callback):
"""Register as callback for when new devices are added. """
self._new_devices_callbacks.append(callback)
_LOGGER.debug('Added new devices callback to %s', callback)
def _do_new_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._new_devices_callbacks:
_LOGGER.debug('Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_stale_devices_callback(self, callback):
"""Register as callback for when stale devices exist. """
self._stale_devices_callbacks.append(callback)
_LOGGER.debug('Added stale devices callback to %s', callback)
def _do_stale_devices_callback(self, msg):
"""Call registered callback functions."""
for callback in self._stale_devices_callbacks:
_LOGGER.debug('Stale Devices callback %s', callback)
self._event_loop.call_soon(callback, msg)
def add_update_callback(self, callback, device):
"""Register as callback for when a matching device changes."""
self._update_callbacks.append([callback, device])
_LOGGER.debug('Added update callback to %s on %s', callback, device)
def remove_update_callback(self, callback, device):
""" Remove a registered update callback. """
if | |
will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(NatGateway, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'datacenter_id',
'nat_gateway',
'pretty',
'depth',
'x_contract_number'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'response_type',
'query_params'
]
)
for local_var_params_key, local_var_params_val in six.iteritems(local_var_params['kwargs']):
if local_var_params_key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method datacenters_natgateways_post" % local_var_params_key
)
local_var_params[local_var_params_key] = local_var_params_val
del local_var_params['kwargs']
# verify the required parameter 'datacenter_id' is set
if self.api_client.client_side_validation and ('datacenter_id' not in local_var_params or # noqa: E501
local_var_params['datacenter_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `datacenter_id` when calling `datacenters_natgateways_post`") # noqa: E501
# verify the required parameter 'nat_gateway' is set
if self.api_client.client_side_validation and ('nat_gateway' not in local_var_params or # noqa: E501
local_var_params['nat_gateway'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `nat_gateway` when calling `datacenters_natgateways_post`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] > 10: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_natgateways_post`, must be a value less than or equal to `10`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_natgateways_post`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'datacenter_id' in local_var_params:
path_params['datacenterId'] = local_var_params['datacenter_id'] # noqa: E501
query_params = list(local_var_params.get('query_params', {}).items())
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'depth' in local_var_params and local_var_params['depth'] is not None: # noqa: E501
query_params.append(('depth', local_var_params['depth'])) # noqa: E501
header_params = {}
if 'x_contract_number' in local_var_params:
header_params['X-Contract-Number'] = local_var_params['x_contract_number'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'nat_gateway' in local_var_params:
body_params = local_var_params['nat_gateway']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic Authentication', 'Token Authentication'] # noqa: E501
response_type = 'NatGateway'
if 'response_type' in kwargs:
response_type = kwargs['response_type']
return self.api_client.call_api(
'/datacenters/{datacenterId}/natgateways', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=response_type, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def datacenters_natgateways_put(self, datacenter_id, nat_gateway_id, nat_gateway, **kwargs): # noqa: E501
"""Modify NAT Gateways # noqa: E501
Modify the properties of the specified NAT Gateway within the data center. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_natgateways_put(datacenter_id, nat_gateway_id, nat_gateway, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param nat_gateway_id: The unique ID of the NAT Gateway. (required)
:type nat_gateway_id: str
:param nat_gateway: The modified NAT Gateway. (required)
:type nat_gateway: NatGatewayPut
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: NatGateway
"""
kwargs['_return_http_data_only'] = True
return self.datacenters_natgateways_put_with_http_info(datacenter_id, nat_gateway_id, nat_gateway, **kwargs) # noqa: E501
def datacenters_natgateways_put_with_http_info(self, datacenter_id, nat_gateway_id, nat_gateway, **kwargs): # noqa: E501
"""Modify NAT Gateways # noqa: E501
Modify the properties of the specified NAT Gateway within the data center. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_natgateways_put_with_http_info(datacenter_id, nat_gateway_id, nat_gateway, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param nat_gateway_id: The unique ID of the NAT Gateway. (required)
:type nat_gateway_id: str
:param nat_gateway: The modified NAT Gateway. (required)
:type nat_gateway: NatGatewayPut
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(NatGateway, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'datacenter_id',
'nat_gateway_id',
'nat_gateway',
'pretty',
'depth',
'x_contract_number'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'response_type',
'query_params'
]
)
for local_var_params_key, local_var_params_val in six.iteritems(local_var_params['kwargs']):
if local_var_params_key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method datacenters_natgateways_put" % local_var_params_key
)
local_var_params[local_var_params_key] = local_var_params_val
del local_var_params['kwargs']
# verify the required parameter 'datacenter_id' is set
if self.api_client.client_side_validation and ('datacenter_id' not in local_var_params or # noqa: E501
local_var_params['datacenter_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `datacenter_id` when calling `datacenters_natgateways_put`") # noqa: E501
# verify the required parameter 'nat_gateway_id' is set
if self.api_client.client_side_validation and ('nat_gateway_id' not in local_var_params or # noqa: E501
local_var_params['nat_gateway_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `nat_gateway_id` when calling `datacenters_natgateways_put`") # noqa: E501
# verify the required parameter 'nat_gateway' is set
if self.api_client.client_side_validation and ('nat_gateway' not in local_var_params or # noqa: E501
local_var_params['nat_gateway'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `nat_gateway` when calling `datacenters_natgateways_put`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] > 10: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_natgateways_put`, must be a value less than or equal to `10`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_natgateways_put`, must be a value greater | |
not typed_ast3.BinOp:
raise NotImplementedError('root operation initialisation')
root_operation = typed_ast3.BinOp(
left=None, op=root_operator_type(), right=operand)
operation = root_operation
continue
if operation_type is not None:
assert operation_type is root_operation_type, (operation_type, root_operation_type)
operation.left = typed_ast3.BinOp(left=None, op=operator_type(), right=operand)
operation = operation.left
else:
operation.left = operand
return root_operation
def _operation_multiary_boolean(
self, operators_and_operands: t.Sequence[t.Union[typed_ast3.AST, t.Tuple[
t.Type[typed_ast3.BoolOp], t.Type[typed_ast3.AST]]]]) -> typed_ast3.BoolOp:
operators_and_operands += [(None, None)]
root_operation = None
root_operation_type = None
root_operator_type = None
zippped = zip(operators_and_operands[::2], operators_and_operands[1::2])
for operand, (operation_type, operator_type) in zippped:
if root_operation is None:
root_operation_type = operation_type
root_operator_type = operator_type
if root_operation_type is not typed_ast3.BoolOp:
raise NotImplementedError('root operation initialisation')
root_operation = typed_ast3.BoolOp(
op=root_operator_type(), values=[operand])
continue
if operation_type is not None:
assert operation_type is root_operation_type, (operation_type, root_operation_type)
assert operator_type is root_operator_type, (operator_type, root_operator_type)
root_operation.values.append(operand)
return root_operation
def _operation_multiary_comparison(
self, operators_and_operands: t.Sequence[t.Union[typed_ast3.AST, t.Tuple[
t.Type[typed_ast3.Compare], t.Type[typed_ast3.AST]]]]) -> typed_ast3.Compare:
assert len(operators_and_operands) == 3, operators_and_operands
left_operand, (operation_type, operator_type), right_operand = operators_and_operands
assert operation_type is typed_ast3.Compare
assert not isinstance(right_operand, list), right_operand
return typed_ast3.Compare(
left=left_operand, ops=[operator_type()], comparators=[right_operand])
def _operation_unary(self, node: ET.Element):
operators_and_operands = self.transform_all_subnodes(node, skip_empty=True, ignored={
'signed-operand', 'and-operand', 'parenthesized_expr', 'primary'})
assert isinstance(operators_and_operands, list), operators_and_operands
assert len(operators_and_operands) == 2, operators_and_operands
operation_type, operator_type = operators_and_operands[0]
if operation_type is typed_ast3.BinOp:
operation_type, operator_type = {
(typed_ast3.BinOp, typed_ast3.Add): (typed_ast3.UnaryOp, typed_ast3.UAdd),
(typed_ast3.BinOp, typed_ast3.Sub): (typed_ast3.UnaryOp, typed_ast3.USub)
}[operation_type, operator_type]
operand = operators_and_operands[1]
return operation_type(op=operator_type(), operand=operand)
def _operand(self, node: ET.Element) -> t.Any:
operand = self.transform_all_subnodes(node, ignored={
'add-operand__add-op', 'mult-operand__mult-op', 'and-operand__not-op'})
if len(operand) != 1:
_LOG.warning('%s', ET.tostring(node).decode().rstrip())
# _LOG.error("%s", operand)
_LOG.error([typed_astunparse.unparse(_).rstrip() for _ in operand])
raise SyntaxError(
'expected exactly one operand but got {} in:\n{}'
.format(len(operand), ET.tostring(node).decode().rstrip()))
return operand[0]
def _operator(
self, node: ET.Element) -> t.Tuple[t.Type[typed_ast3.AST], t.Type[typed_ast3.AST]]:
return FORTRAN_PYTHON_OPERATORS[node.attrib['operator'].lower()]
def _array_constructor(self, node: ET.Element) -> typed_ast3.ListComp:
value_nodes = self.get_all(node, './value')
values = []
for value_node in value_nodes:
value = self.transform_all_subnodes(value_node)
if not value:
continue
assert len(value) == 1
values.append(value[0])
if len(values) != 1:
raise NotImplementedError(
'not implemented handling of {} in:\n{}'
.format(values, ET.tostring(node).decode().rstrip()))
header = self.transform_all_subnodes(self.get_one(node, './header'),
ignored={'ac-implied-do-control'})
assert len(header) == 1
comp_target, comp_iter = header[0]
generator = typed_ast3.comprehension(
target=comp_target, iter=comp_iter, ifs=[], is_async=0)
return typed_ast3.ListComp(elt=values[0], generators=[generator])
def _array_constructor_values(self, node: ET.Element) -> typed_ast3.List:
value_nodes = self.get_all(node, './value')
values = []
for value_node in value_nodes:
value = self.transform_all_subnodes(value_node)
if not value:
continue
assert len(value) == 1
values.append(value[0])
return typed_ast3.List(elts=values, ctx=typed_ast3.Load())
def _range(self, node: ET.Element) -> typed_ast3.Slice:
lower_bound = node.find('./lower-bound')
upper_bound = node.find('./upper-bound')
step = node.find('./step')
if lower_bound is not None:
args = self.transform_all_subnodes(lower_bound)
assert len(args) == 1, args
lower_bound = args[0]
if upper_bound is not None:
args = self.transform_all_subnodes(upper_bound)
assert len(args) == 1, args
upper_bound = args[0]
if step is not None:
args = self.transform_all_subnodes(step)
assert len(args) == 1, args
step = args[0]
return typed_ast3.Slice(lower=lower_bound, upper=upper_bound, step=step)
def _dimensions(self, node: ET.Element) -> t.List[typed_ast3.AST]:
return self.transform_all_subnodes(node, ignored={'array-spec'})
def _dimension(self, node: ET.Element) -> t.Union[typed_ast3.Index, typed_ast3.Slice]:
dim_type = node.attrib['type']
if dim_type == 'simple':
values = self.transform_all_subnodes(node, ignored={'array-spec-element'})
if len(values) != 1:
_LOG.error('simple dimension should have exactly one value, but it has %i',
len(values))
return typed_ast3.Index(value=values[0])
elif dim_type == 'range':
ranges = self.transform_all_subnodes(node, ignored={'array-spec-element'})
assert len(ranges) == 1, ranges
return ranges[0]
elif dim_type == 'assumed-shape':
return typed_ast3.Slice(lower=None, upper=None, step=None)
elif dim_type == 'upper-bound-assumed-shape':
args = self.transform_all_subnodes(node, ignored={'array-spec-element'})
assert len(args) == 1, args
lower_bound = args[0]
return typed_ast3.Slice(lower=lower_bound, upper=None, step=None)
elif dim_type == 'assumed-size':
_LOG.warning('generating invalid Python')
return typed_ast3.Index(value=typed_ast3.Ellipsis())
else:
raise NotImplementedError(
'dimension type "{}" not supported in:\n{}'
.format(dim_type, ET.tostring(node).decode().rstrip()))
def _type(self, node: ET.Element) -> type:
name = node.attrib['name'].lower()
length = self.transform_one(self.get_one(node, './length')) \
if node.attrib['hasLength'] == 'true' else None
kind = self.transform_one(self.get_one(node, './kind')) \
if node.attrib['hasKind'] == 'true' else None
if length is not None and kind is not None:
raise SyntaxError(
'only one of "length" and "kind" can be provided, but both were given'
' ("{}" and "{}" respectively) in:\n{}'
.format(length, kind, ET.tostring(node).decode().rstrip()))
if name == 'character':
type_ = typed_ast3.parse(FORTRAN_PYTHON_TYPE_PAIRS[name, t.Any], mode='eval').body
if length is not None:
if isinstance(length, typed_ast3.Num):
# length = length.n
type_ = typed_ast3.Subscript(
value=type_, slice=typed_ast3.Index(value=length), ctx=typed_ast3.Load())
else:
_LOG.warning(
'ignoring string length "%s" in:\n%s',
length, ET.tostring(node).decode().rstrip())
return type_
elif length is not None:
self.ensure_import('numpy', 'np')
return typed_ast3.parse(FORTRAN_PYTHON_TYPE_PAIRS[name, length], mode='eval').body
elif kind is not None:
self.ensure_import('numpy', 'np')
if isinstance(kind, typed_ast3.Num):
kind = kind.n
if not isinstance(kind, int):
# _LOG.warning('%s', ET.tostring(node).decode().rstrip())
# raise NotImplementedError('non-literal kinds are not supported')
python_type = typed_ast3.parse(
FORTRAN_PYTHON_TYPE_PAIRS[name, None], mode='eval').body
self.ensure_import('static_typing', 'st')
static_type = typed_ast3.Attribute(
value=typed_ast3.Name(id='st', ctx=typed_ast3.Load()),
attr=python_type, ctx=typed_ast3.Load())
return typed_ast3.Subscript(
value=static_type, slice=typed_ast3.Index(value=kind), ctx=typed_ast3.Load())
# typed_ast3.parse({
# 'integer': 'st.int[0]'.format(kind),
# 'real': lambda kind: 'st.float[0]'.format(kind)}[name](kind), mode='eval')
# custom_kind_type.
# return custom_kind_type
return typed_ast3.parse(FORTRAN_PYTHON_TYPE_PAIRS[name, kind], mode='eval').body
else:
if node.attrib['type'] == 'derived':
return typed_ast3.Call(func=typed_ast3.Name(id='type', ctx=typed_ast3.Load()),
args=[typed_ast3.Name(id=name, ctx=typed_ast3.Load())],
keywords=[])
assert node.attrib['type'] == 'intrinsic'
return typed_ast3.parse(FORTRAN_PYTHON_TYPE_PAIRS[name, None], mode='eval').body
raise NotImplementedError(
'not implemented handling of:\n{}'.format(ET.tostring(node).decode().rstrip()))
def _length(self, node):
values = self.transform_all_subnodes(node, ignored={'char-length'})
if len(values) == 1:
return values[0]
raise NotImplementedError(
'not implemented handling of:\n{}'.format(ET.tostring(node).decode().rstrip()))
def _kind(self, node):
values = self.transform_all_subnodes(node, ignored={'kind-selector'})
if len(values) == 1:
return values[0]
if 'value' in node.attrib:
return int(node.attrib['value'])
raise NotImplementedError(
'not implemented handling of:\n{}'.format(ET.tostring(node).decode().rstrip()))
def _null_init(self, node: ET.Element) -> typed_ast3.NameConstant:
assert node.attrib['function-reference'] == 'null', node.attrib
return typed_ast3.NameConstant(None)
def _variable(self, node: ET.Element) -> t.Tuple[typed_ast3.Name, t.Any]:
value_node = node.find('./initial-value')
value = None
if value_node is not None:
values = self.transform_all_subnodes(value_node, ignored={'initialization'})
assert len(values) == 1, values
value = values[0]
variable = typed_ast3.Name(id=node.attrib['name'], ctx=typed_ast3.Load())
metadata = {}
dimensions_node = node.find('./dimensions')
if dimensions_node is not None:
metadata['dimensions'] = self.transform_one(dimensions_node)
if metadata:
variable.fortran_metadata = metadata
return variable, value
def _names(self, node: ET.Element) -> typed_ast3.arguments:
arguments = self._arguments(node)
for i, name in enumerate(arguments.args):
assert isinstance(name, typed_ast3.Name), type(name)
arguments.args[i] = typed_ast3.arg(arg=name.id, annotation=None)
return arguments
def _intrinsic_identity(self, call):
return call
def _intrinsic_getenv(self, call):
assert isinstance(call, typed_ast3.Call), type(call)
assert len(call.args) + len(call.keywords) == 2, (call.args, call.keywords)
self.ensure_import('os')
args_and_keywords = call.args + call.keywords
target = args_and_keywords[1]
if isinstance(target, typed_ast3.keyword):
target = target.value
return typed_ast3.Assign(
targets=[target],
value=typed_ast3.Subscript(
value=typed_ast3.Attribute(value=typed_ast3.Name(id='os', ctx=typed_ast3.Load()),
attr='environ', ctx=typed_ast3.Load()),
slice=typed_ast3.Index(value=args_and_keywords[0]), ctx=typed_ast3.Load()),
type_comment=None)
def _intrinsic_trim(self, call):
return typed_ast3.Call(
func=typed_ast3.Attribute(value=call.args[0], attr='rstrip', ctx=typed_ast3.Load()),
args=call.args[1:], keywords=[])
def _intrinsic_count(self, call):
assert isinstance(call, typed_ast3.Call), type(call)
assert len(call.args) == 1, call.args
return typed_ast3.Call(
func=typed_ast3.Attribute(value=call.args[0], attr='sum', ctx=typed_ast3.Load()),
args=[], keywords=[])
def _intrinsic_converter_rename(self, call, name: str):
assert isinstance(call.func, typed_ast3.Name)
call.func.id = name
return call
def _intrinsic_converter_not_implemented(self, call):
raise NotImplementedError(
"cannot convert intrinsic call from raw AST:\n{}"
.format(typed_astunparse.unparse(call)))
def _intrinsic_numpy_call(self, call, members=None):
if not members:
members = (call.func.id,)
func = typed_ast3.Name(id='np', ctx=typed_ast3.Load())
for member in members:
func = typed_ast3.Attribute(value=func, attr=member, ctx=typed_ast3.Load())
return typed_ast3.Call(func=func, args=call.args, keywords=call.keywords)
_convgen_specials = {
'getenv': _intrinsic_getenv,
'trim': _intrinsic_trim,
'count': _intrinsic_count}
@classmethod
def _convgen(cls, case, value):
if value is None:
return cls._intrinsic_converter_not_implemented
if case in INTRINSICS_SPECIAL_CASES:
return cls._convgen_specials[case]
if case == value:
return cls._intrinsic_identity
if isinstance(value, str):
return functools.partial(cls._intrinsic_converter_rename, name=value)
assert isinstance(value, tuple), type(value)
assert len(value) >= 2, value
package, *members = value
if package == 'numpy':
# if len(value) == 1 and value[0] == function:
# return cls._intrinsic_numpy_call
return functools.partial(cls._intrinsic_numpy_call, members=members)
raise NotImplementedError((case, value))
_intrinsics_converters = {}
def _name(self, node: ET.Element) -> typed_ast3.AST:
name_str = node.attrib['id']
name = typed_ast3.Name(id=name_str, ctx=typed_ast3.Load())
name_str = name_str.lower()
name_type = node.attrib['type'] if 'type' in node.attrib else None
is_intrinsic = name_str in self._intrinsics_converters
subscripts_node = node.find('./subscripts')
try:
args = self._subscripts(subscripts_node, postprocess=False) if subscripts_node else []
args, keywords = separate_args_and_keywords(args)
call = typed_ast3.Call(func=name, args=args, keywords=keywords)
if is_intrinsic:
if subscripts_node is None:
_LOG.warning('found intrinsic name "%s" without any subscripts', name_str)
else:
name_type = 'function'
call = self._intrinsics_converters[name_str](self, call)
except SyntaxError:
_LOG.info('transforming name to call failed as below (continuing despite that)',
exc_info=True)
slice_ = self._subscripts(subscripts_node) if subscripts_node else None
subscript = typed_ast3.Subscript(value=name, slice=slice_, ctx=typed_ast3.Load())
if name_type in ('procedure', 'function'):
return call
if not subscripts_node:
return name
if name_type in ('variable',):
return subscript
if not slice_:
return call
if name_type in ('ambiguous',):
return subscript
if name_type is not None:
raise NotImplementedError('unrecognized name type "{}" in:\n{}'
.format(name_type, ET.tostring(node).decode().rstrip()))
if name_type is None:
raise NotImplementedError('no name type in:\n{}'
.format(ET.tostring(node).decode().rstrip()))
raise NotImplementedError('not implemented handling of:\n{}'
.format(ET.tostring(node).decode().rstrip()))
def _subscripts(self, node: ET.Element, postprocess: bool = True) -> t.Union[
typed_ast3.Index, typed_ast3.Slice, typed_ast3.ExtSlice]:
subscripts = self.transform_all_subnodes(node, ignored={
'section-subscript-list__begin', 'section-subscript-list'})
assert len(subscripts) == int(node.attrib['count'])
if not postprocess:
return subscripts
if any(isinstance(_, typed_ast3.Slice) for _ in subscripts):
if len(subscripts) == 1:
return subscripts[0]
return typed_ast3.ExtSlice(dims=[
(_ if isinstance(_, (typed_ast3.Index, typed_ast3.Slice))
else | |
+ m.b145 - m.b262 <= 0)
m.c3747 = Constraint(expr= - m.b138 + m.b146 - m.b263 <= 0)
m.c3748 = Constraint(expr= - m.b138 + m.b147 - m.b264 <= 0)
m.c3749 = Constraint(expr= - m.b139 + m.b140 - m.b265 <= 0)
m.c3750 = Constraint(expr= - m.b139 + m.b141 - m.b266 <= 0)
m.c3751 = Constraint(expr= - m.b139 + m.b142 - m.b267 <= 0)
m.c3752 = Constraint(expr= - m.b139 + m.b143 - m.b268 <= 0)
m.c3753 = Constraint(expr= - m.b139 + m.b144 - m.b269 <= 0)
m.c3754 = Constraint(expr= - m.b139 + m.b145 - m.b270 <= 0)
m.c3755 = Constraint(expr= - m.b139 + m.b146 - m.b271 <= 0)
m.c3756 = Constraint(expr= - m.b139 + m.b147 - m.b272 <= 0)
m.c3757 = Constraint(expr= - m.b140 + m.b141 - m.b273 <= 0)
m.c3758 = Constraint(expr= - m.b140 + m.b142 - m.b274 <= 0)
m.c3759 = Constraint(expr= - m.b140 + m.b143 - m.b275 <= 0)
m.c3760 = Constraint(expr= - m.b140 + m.b144 - m.b276 <= 0)
m.c3761 = Constraint(expr= - m.b140 + m.b145 - m.b277 <= 0)
m.c3762 = Constraint(expr= - m.b140 + m.b146 - m.b278 <= 0)
m.c3763 = Constraint(expr= - m.b140 + m.b147 - m.b279 <= 0)
m.c3764 = Constraint(expr= - m.b141 + m.b142 - m.b280 <= 0)
m.c3765 = Constraint(expr= - m.b141 + m.b143 - m.b281 <= 0)
m.c3766 = Constraint(expr= - m.b141 + m.b144 - m.b282 <= 0)
m.c3767 = Constraint(expr= - m.b141 + m.b145 - m.b283 <= 0)
m.c3768 = Constraint(expr= - m.b141 + m.b146 - m.b284 <= 0)
m.c3769 = Constraint(expr= - m.b141 + m.b147 - m.b285 <= 0)
m.c3770 = Constraint(expr= - m.b142 + m.b143 - m.b286 <= 0)
m.c3771 = Constraint(expr= - m.b142 + m.b144 - m.b287 <= 0)
m.c3772 = Constraint(expr= - m.b142 + m.b145 - m.b288 <= 0)
m.c3773 = Constraint(expr= - m.b142 + m.b146 - m.b289 <= 0)
m.c3774 = Constraint(expr= - m.b142 + m.b147 - m.b290 <= 0)
m.c3775 = Constraint(expr= - m.b143 + m.b144 - m.b291 <= 0)
m.c3776 = Constraint(expr= - m.b143 + m.b145 - m.b292 <= 0)
m.c3777 = Constraint(expr= - m.b143 + m.b146 - m.b293 <= 0)
m.c3778 = Constraint(expr= - m.b143 + m.b147 - m.b294 <= 0)
m.c3779 = Constraint(expr= - m.b144 + m.b145 - m.b295 <= 0)
m.c3780 = Constraint(expr= - m.b144 + m.b146 - m.b296 <= 0)
m.c3781 = Constraint(expr= - m.b144 + m.b147 - m.b297 <= 0)
m.c3782 = Constraint(expr= - m.b145 + m.b146 - m.b298 <= 0)
m.c3783 = Constraint(expr= - m.b145 + m.b147 - m.b299 <= 0)
m.c3784 = Constraint(expr= - m.b146 + m.b147 - m.b300 <= 0)
m.c3785 = Constraint(expr= - m.b148 + m.b149 - m.b165 <= 0)
m.c3786 = Constraint(expr= - m.b148 + m.b150 - m.b166 <= 0)
m.c3787 = Constraint(expr= - m.b148 + m.b151 - m.b167 <= 0)
m.c3788 = Constraint(expr= - m.b148 + m.b152 - m.b168 <= 0)
m.c3789 = Constraint(expr= - m.b148 + m.b153 - m.b169 <= 0)
m.c3790 = Constraint(expr= - m.b148 + m.b154 - m.b170 <= 0)
m.c3791 = Constraint(expr= - m.b148 + m.b155 - m.b171 <= 0)
m.c3792 = Constraint(expr= - m.b148 + m.b156 - m.b172 <= 0)
m.c3793 = Constraint(expr= - m.b148 + m.b157 - m.b173 <= 0)
m.c3794 = Constraint(expr= - m.b148 + m.b158 - m.b174 <= 0)
m.c3795 = Constraint(expr= - m.b148 + m.b159 - m.b175 <= 0)
m.c3796 = Constraint(expr= - m.b148 + m.b160 - m.b176 <= 0)
m.c3797 = Constraint(expr= - m.b148 + m.b161 - m.b177 <= 0)
m.c3798 = Constraint(expr= - m.b148 + m.b162 - m.b178 <= 0)
m.c3799 = Constraint(expr= - m.b148 + m.b163 - m.b179 <= 0)
m.c3800 = Constraint(expr= - m.b148 + m.b164 - m.b180 <= 0)
m.c3801 = Constraint(expr= - m.b149 + m.b150 - m.b181 <= 0)
m.c3802 = Constraint(expr= - m.b149 + m.b151 - m.b182 <= 0)
m.c3803 = Constraint(expr= - m.b149 + m.b152 - m.b183 <= 0)
m.c3804 = Constraint(expr= - m.b149 + m.b153 - m.b184 <= 0)
m.c3805 = Constraint(expr= - m.b149 + m.b154 - m.b185 <= 0)
m.c3806 = Constraint(expr= - m.b149 + m.b155 - m.b186 <= 0)
m.c3807 = Constraint(expr= - m.b149 + m.b156 - m.b187 <= 0)
m.c3808 = Constraint(expr= - m.b149 + m.b157 - m.b188 <= 0)
m.c3809 = Constraint(expr= - m.b149 + m.b158 - m.b189 <= 0)
m.c3810 = Constraint(expr= - m.b149 + m.b159 - m.b190 <= 0)
m.c3811 = Constraint(expr= - m.b149 + m.b160 - m.b191 <= 0)
m.c3812 = Constraint(expr= - m.b149 + m.b161 - m.b192 <= 0)
m.c3813 = Constraint(expr= - m.b149 + m.b162 - m.b193 <= 0)
m.c3814 = Constraint(expr= - m.b149 + m.b163 - m.b194 <= 0)
m.c3815 = Constraint(expr= - m.b149 + m.b164 - m.b195 <= 0)
m.c3816 = Constraint(expr= - m.b150 + m.b151 - m.b196 <= 0)
m.c3817 = Constraint(expr= - m.b150 + m.b152 - m.b197 <= 0)
m.c3818 = Constraint(expr= - m.b150 + m.b153 - m.b198 <= 0)
m.c3819 = Constraint(expr= - m.b150 + m.b154 - m.b199 <= 0)
m.c3820 = Constraint(expr= - m.b150 + m.b155 - m.b200 <= 0)
m.c3821 = Constraint(expr= - m.b150 + m.b156 - m.b201 <= 0)
m.c3822 = Constraint(expr= - m.b150 + m.b157 - m.b202 <= 0)
m.c3823 = Constraint(expr= - m.b150 + m.b158 - m.b203 <= 0)
m.c3824 = Constraint(expr= - m.b150 + m.b159 - m.b204 <= 0)
m.c3825 = Constraint(expr= - m.b150 + m.b160 - m.b205 <= 0)
m.c3826 = Constraint(expr= - m.b150 + m.b161 - m.b206 <= 0)
m.c3827 = Constraint(expr= - m.b150 + m.b162 - m.b207 <= 0)
m.c3828 = Constraint(expr= - m.b150 + m.b163 - m.b208 <= 0)
m.c3829 = Constraint(expr= - m.b150 + m.b164 - m.b209 <= 0)
m.c3830 = Constraint(expr= - m.b151 + m.b152 - m.b210 <= 0)
m.c3831 = Constraint(expr= - m.b151 + m.b153 - m.b211 <= 0)
m.c3832 = Constraint(expr= - m.b151 + m.b154 - m.b212 <= 0)
m.c3833 = Constraint(expr= - m.b151 + m.b155 - m.b213 <= 0)
m.c3834 = Constraint(expr= - m.b151 + m.b156 - m.b214 <= 0)
m.c3835 = Constraint(expr= - m.b151 + m.b157 - m.b215 <= 0)
m.c3836 = Constraint(expr= - m.b151 + m.b158 - m.b216 <= 0)
m.c3837 = Constraint(expr= - m.b151 + m.b159 - m.b217 <= 0)
m.c3838 = Constraint(expr= - m.b151 + m.b160 - m.b218 <= 0)
m.c3839 = Constraint(expr= - m.b151 + m.b161 - m.b219 <= 0)
m.c3840 = Constraint(expr= - m.b151 + m.b162 - m.b220 <= 0)
m.c3841 = Constraint(expr= - m.b151 + m.b163 - m.b221 <= 0)
m.c3842 = Constraint(expr= - m.b151 + m.b164 - m.b222 <= 0)
m.c3843 = Constraint(expr= - m.b152 + m.b153 - m.b223 <= 0)
m.c3844 = Constraint(expr= - m.b152 + m.b154 - m.b224 <= 0)
m.c3845 = Constraint(expr= - m.b152 + m.b155 - m.b225 <= 0)
m.c3846 = Constraint(expr= - m.b152 + m.b156 - m.b226 <= 0)
m.c3847 = Constraint(expr= - m.b152 + m.b157 - m.b227 <= 0)
m.c3848 = Constraint(expr= - m.b152 + m.b158 - m.b228 <= 0)
m.c3849 = Constraint(expr= - m.b152 + m.b159 - m.b229 <= 0)
m.c3850 = Constraint(expr= - m.b152 + m.b160 - m.b230 <= 0)
m.c3851 = Constraint(expr= - m.b152 + m.b161 - m.b231 <= 0)
m.c3852 = Constraint(expr= - m.b152 + m.b162 - m.b232 <= 0)
m.c3853 = Constraint(expr= - m.b152 + m.b163 - m.b233 <= 0)
m.c3854 = Constraint(expr= - m.b152 + m.b164 - m.b234 <= 0)
m.c3855 = Constraint(expr= - m.b153 + m.b154 - m.b235 <= 0)
m.c3856 = Constraint(expr= - m.b153 + m.b155 - m.b236 <= 0)
m.c3857 = Constraint(expr= - m.b153 + m.b156 - m.b237 <= 0)
m.c3858 = Constraint(expr= - m.b153 + m.b157 - m.b238 <= 0)
m.c3859 = Constraint(expr= - m.b153 + m.b158 - m.b239 <= 0)
m.c3860 = Constraint(expr= - m.b153 + m.b159 - m.b240 <= 0)
m.c3861 = Constraint(expr= - m.b153 + m.b160 - m.b241 <= 0)
m.c3862 = Constraint(expr= - m.b153 + m.b161 - m.b242 <= 0)
m.c3863 = Constraint(expr= - m.b153 + m.b162 - m.b243 <= 0)
m.c3864 = Constraint(expr= - m.b153 + m.b163 - m.b244 <= 0)
m.c3865 = Constraint(expr= - m.b153 + m.b164 - m.b245 <= 0)
m.c3866 = Constraint(expr= - m.b154 + m.b155 - m.b246 <= 0)
m.c3867 = Constraint(expr= - m.b154 + m.b156 - m.b247 <= 0)
m.c3868 = Constraint(expr= - m.b154 + m.b157 - m.b248 <= 0)
m.c3869 = Constraint(expr= - m.b154 + m.b158 - m.b249 <= 0)
m.c3870 = Constraint(expr= - m.b154 + m.b159 - m.b250 <= 0)
m.c3871 = Constraint(expr= - m.b154 + m.b160 - m.b251 <= 0)
m.c3872 = Constraint(expr= - m.b154 + m.b161 - m.b252 <= 0)
m.c3873 = Constraint(expr= - m.b154 + m.b162 - m.b253 <= 0)
m.c3874 = Constraint(expr= - m.b154 | |
fluid, wall material.
The reference area must always be the fluid-wall-contact-area for
consistency with other calculations.
Parameters:
-----------
A_i : float, int
The fluid-wall-contact area PER CELL. Calculated with:
A_i = np.pi * r_i * 2 * grid_spacing
r_i : float, int
Radius in [m] of the fluid-wall-contact-area.
r_o : float, int
Radius in [m] of the outer wall surface.
alpha_i : int, float, np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the fluid inside the
pipe and the wall. The shape must equal the fluid temperature array
shape, if given as array.
lam_wll : int, float, np.ndarray
Wall heat conductivity in [W / (mK)]. The shape must equal the fluid or
wall temperature array shape, if given as array.
"""
# the log mean outer diameter is taken for length of lam_wll:
# np.log((r_o / r_i + 1) / 2) = np.log((r_o + r_i)/ 2 / r_i)
# with r_wll = (r_o + r_i) / 2
print('UA_fld_wll -> replace np.log with const!')
UA[:] = A_i / (1 / alpha_i + r_i * np.log((r_o / r_i + 1) / 2) / lam_wll)
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def UA_wll_ins_amb_cyl(
A_i, r_i, r_o, r_ln_ins, r_rins, alpha_inf, lam_wll, lam_ins, UA
):
"""
Calculates the U*A-value for the heat flow to or from the wall of a
cylinder like a pipe or a round TES to or from the ambient in radial
direction. The wall is considered as a single finite volume element per
cell, thus the heat flow is calculated from/to the mid-point of the wall.
Layers which are considered: wall material, insulation or any other
additional material layer, ambient.
The reference area must always be the fluid-wall-contact-area for
consistency with other calculations.
Parameters:
-----------
A_i : float, int
The fluid-wall-contact area PER CELL. Calculated with:
A_i = np.pi * r_i * 2 * grid_spacing
r_i : float, int
Radius in [m] of the fluid-wall-contact-area.
r_o : float, int
Radius in [m] of the outer wall surface.
r_ln_ins : float, int
Radial thickness factor of the insulation heat conductivity referred to
the reference area. Must be pre-calculated with:
r_ln_ins = r_i * np.log((r_o + s_ins) / r_o)
r_rins : float, int
Radial thickness factor of the insulation-to-ambient heat transfer
coefficient referred to the reference area. Must be pre-calculated
with:
r_rins = r_i / (r_o + s_ins)
alpha_inf : int, float, np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the outer layer and
the ambient. The shape must equal the fluid temperature array shape,
if given as array.
lam_wll : int, float, np.ndarray
Wall heat conductivity in [W / (mK)]. The shape must equal the fluid or
wall temperature array shape, if given as array.
lam_ins : int, float, np.ndarray
Outer material layer heat conductivity in [W / (mK)]. The shape must
equal the fluid temperature array shape, if given as array.
"""
# the log mean outer diameter is taken for length of lam_wll:
# np.log(2 / (r_i / r_o + 1)) = np.log(r_o * 2 / (r_o + r_i))
# with r_wll = (r_o + r_i) / 2
UA[:] = A_i / (
r_i * np.log(2 / (r_i / r_o + 1)) / lam_wll
+ r_ln_ins / lam_ins
+ r_rins / alpha_inf
)
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def UA_wll_amb_cyl(A_i, r_i, r_o, alpha_inf, lam_wll, UA):
"""
Calculates the U*A-value for the heat flow to or from the wall of a
cylinder like a pipe or a round TES to or from the ambient in radial
direction. The wall is considered as a single finite volume element per
cell, thus the heat flow is calculated to the mid-point of the wall.
Layers which are considered: wall material, ambient.
The reference area must always be the fluid-wall-contact-area for
consistency with other calculations.
Parameters:
-----------
A_i : float, int
The fluid-wall-contact area PER CELL. Calculated with:
A_i = np.pi * r_i * 2 * grid_spacing
r_i : float, int
Radius in [m] of the fluid-wall-contact-area.
r_o : float, int
Radius in [m] of the outer wall surface.
alpha_inf : int, float, np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the outer layer and
the ambient. The shape must equal the fluid temperature array shape,
if given as array.
lam_wll : int, float, np.ndarray
Wall heat conductivity in [W / (mK)]. The shape must equal the fluid or
wall temperature array shape, if given as array.
"""
# the log mean outer diameter is taken for length of lam_wll:
# np.log(2 / (r_i / r_o + 1)) = np.log(r_o * 2 / (r_o + r_i))
# with r_wll = (r_o + r_i) / 2
UA[:] = A_i / (
r_i * np.log(2 / (r_i / r_o + 1)) / lam_wll + r_i / (r_o * alpha_inf)
)
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def UA_fld_wll_plate(A, s_wll, alpha_fld, lam_wll):
"""
Calculates the U*A-value for the heat flow to or from a fluid at a plate
to or from the ambient.
Layers which are considered: fluid, wall material.
The reference area must always be the cross section area.
Parameters:
-----------
A : float, int
The fluid-wall-contact area in [m^2].
s_wll : float, int
Wall thickness in [m].
alpha_fld : int, float
Heat transfer coefficient in [W/(m^2K)] between the fluid and the wall.
lam_wll : int, float
Wall heat conductivity in [W/(mK)].
UA : np.ndarray
Result array where U*A in [W/K] will be saved to.
"""
return A / (1 / alpha_fld + s_wll / lam_wll)
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def UA_fld_wll_ins_amb_plate(
A, s_wll, s_ins, alpha_fld, alpha_inf, lam_wll, lam_ins
):
"""
Calculates the U*A-value for the heat flow to or from a fluid at a plate
with or without insulation to or from the ambient.
Layers which are considered: fluid, wall material, insulation, ambient.
The reference area must always be the cross section area.
Parameters:
-----------
A : float, int
The fluid-wall-contact area in [m^2].
s_wll : float, int
Wall thickness in [m].
s_ins : float, int
Insulation thickness in [m]. Can be zero.
alpha_fld : int, float
Heat transfer coefficient in [W/(m^2K)] between the fluid and the wall.
alpha_inf : int, float
Heat transfer coefficient in [W/(m^2K)] between the outer layer and
the ambient.
lam_wll : int, float
Wall heat conductivity in [W/(mK)].
lam_ins : int, float
Insulation heat conductivity in [W/(mK)].
lam_fld : int, float
Fluid heat conductivity in [W/(mK)].
UA : np.ndarray
Result array where U*A in [W/K] will be saved to.
"""
return A / (
1 / alpha_fld + s_wll / lam_wll + s_ins / lam_ins + 1 / alpha_inf
)
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def UA_wll_ins_amb_plate(A, s_wll, s_ins, lam_wll, lam_ins, alpha_inf):
"""
Calculates the U*A-value for the heat flow to or from a plate with or
without insulation to or from the ambient.
Layers which are considered: wall material, insulation, ambient.
The reference area must always be the cross section area.
Parameters:
-----------
A : float, int
The fluid-wall-contact area in [m^2].
s_wll : float, int
Wall thickness in [m].
s_ins : float, int
Insulation thickness in [m].
lam_wll : int, float
Wall heat conductivity in [W/(mK)].
lam_ins : int, float
Insulation heat conductivity in [W/(mK)].
alpha_inf : int, float
Heat transfer coefficient in [W/(m^2K)] between the insulation and the
ambient.
UA : np.ndarray
Result array where U*A in [W/K] will be saved to.
"""
return A / (s_wll / lam_wll + s_ins / lam_ins + 1 / alpha_inf)
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def surface_temp_steady_state_inplace(T, T_inf, A_s, alpha_inf, UA, T_s):
"""
Parameters:
-----------
A_s : float, int
The outer surface area (air-contact-area) PER CELL. Calculated with:
A_s = np.pi * r_s * 2 * grid_spacing
alpha_inf : np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the outer layer and
the ambient. The shape must equal the fluid temperature array shape or
be a single array cell. This array is used to calculate the new outer
surface temperature and to get the new alpha_inf value | |
}
return s.charAt(i++);
},
'__iter__': function() {
return this;
}
};
};
} else {
String.prototype.__iter__ = function() {
var i = 0;
var s = this;
return {
'__array': this,
'next': function(noStop) {
if (i >= s.length) {
if (noStop === true) {
return;
}
throw $pyce(@{{StopIteration}}());
}
return s.charAt(i++);
},
'__iter__': function() {
return this;
}
};
};
}
String.prototype.strip = function(chars) {
return this.lstrip(chars).rstrip(chars);
};
String.prototype.lstrip = function(chars) {
if (typeof chars == 'undefined') return this.replace(/^\s+/, "");
if (chars.length == 0) return this;
var start = 0;
for (; start < this.length; start++) {
if (chars.indexOf(this[start]) < 0) {
return this.slice(start);
}
}
return '';
};
String.prototype.rstrip = function(chars) {
if (typeof chars == 'undefined') return this.replace(/\s+$/, "");
if (chars.length == 0) return this;
var end = this.length;
while(end--) {
if (chars.indexOf(this[end]) < 0) {
return this.slice(0, end + 1);
}
}
return '';
};
String.prototype.startswith = function(prefix, start, end) {
// FIXME: accept tuples as suffix (since 2.5)
if (typeof start == 'undefined') start = 0;
if (typeof end == 'undefined') end = this.length;
if ((end - start) < prefix.length) return false;
if (this.substr(start, prefix.length) == prefix) return true;
return false;
};
String.prototype.endswith = function(suffix, start, end) {
// FIXME: accept tuples as suffix (since 2.5)
if (typeof start == 'undefined') start = 0;
if (typeof end == 'undefined') end = this.length;
if ((end - start) < suffix.length) return false;
if (this.substr(end - suffix.length, suffix.length) == suffix) return true;
return false;
};
String.prototype.ljust = function(width, fillchar) {
switch (width.__number__) {
case 0x02:
case 0x04:
width = width.valueOf();
break;
case 0x01:
if (Math.floor(width) == width) break;
default:
throw $pyce(@{{TypeError}}("an integer is required (got '" + width + "')"));
}
if (typeof fillchar == 'undefined') fillchar = ' ';
if (typeof(fillchar) != 'string' ||
fillchar.length != 1) {
throw $pyce(@{{TypeError}}("ljust() argument 2 must be char, not " + typeof(fillchar)));
}
if (this.length >= width) return this;
return this + new Array(width+1 - this.length).join(fillchar);
};
String.prototype.rjust = function(width, fillchar) {
switch (width.__number__) {
case 0x02:
case 0x04:
width = width.valueOf();
break;
case 0x01:
if (Math.floor(width) == width) break;
default:
throw $pyce(@{{TypeError}}("an integer is required (got '" + width + "')"));
}
if (typeof fillchar == 'undefined') fillchar = ' ';
if (typeof(fillchar) != 'string' ||
fillchar.length != 1) {
throw $pyce(@{{TypeError}}("rjust() argument 2 must be char, not " + typeof(fillchar)));
}
if (this.length >= width) return this;
return new Array(width + 1 - this.length).join(fillchar) + this;
};
String.prototype.center = function(width, fillchar) {
switch (width.__number__) {
case 0x02:
case 0x04:
width = width.valueOf();
break;
case 0x01:
if (Math.floor(width) == width) break;
default:
throw $pyce(@{{TypeError}}("an integer is required (got '" + width + "')"));
}
if (typeof fillchar == 'undefined') fillchar = ' ';
if (typeof(fillchar) != 'string' ||
fillchar.length != 1) {
throw $pyce(@{{TypeError}}("center() argument 2 must be char, not " + typeof(fillchar)));
}
if (this.length >= width) return this;
var padlen = width - this.length;
var right = Math.ceil(padlen / 2);
var left = padlen - right;
return new Array(left+1).join(fillchar) + this + new Array(right+1).join(fillchar);
};
String.prototype.__getitem__ = function(idx) {
if (@{{isinstance}}(idx, @{{slice}})) {
var lower = idx.start, upper = idx.stop;
if (lower === null) {
lower = 0;
} else if (lower < 0) {
lower = this.length + lower;
}
if (upper === null) {
upper=this.length;
} else if (upper < 0) {
upper = this.length + upper;
}
return this.substring(lower, upper);
} else {
if (idx < 0) idx += this.length;
if (idx < 0 || idx > this.length) {
throw $pyce(@{{IndexError}}("string index out of range"));
}
return this.charAt(idx);
}
};
String.prototype.__setitem__ = function(idx, val) {
throw $pyce(@{{TypeError}}("'str' object does not support item assignment"));
};
String.prototype.upper = String.prototype.toUpperCase;
String.prototype.lower = String.prototype.toLowerCase;
String.prototype.capitalize = function() {
return this.charAt(0).toUpperCase() + this.substring(1);
};
String.prototype.zfill = function(width) {
return this.rjust(width, '0');
};
String.prototype.__add__ = function(y) {
if (typeof y != "string") {
throw $pyce(@{{TypeError}}("cannot concatenate 'str' and non-str objects"));
}
return this + y;
};
String.prototype.__mul__ = function(y) {
switch (y.__number__) {
case 0x02:
case 0x04:
y = y.valueOf();
break;
case 0x01:
if (Math.floor(y) == y) break;
default:
throw $pyce(@{{TypeError}}("can't multiply sequence by non-int of type 'str'"));
}
var s = '';
while (y-- > 0) {
s += this;
}
return s;
};
String.prototype.__rmul__ = String.prototype.__mul__;
String.prototype.__number__ = null;
String.prototype.__name__ = 'str';
String.prototype.__class__ = String.prototype;
String.prototype.__$super_cache__ = @{{str}}.__$super_cache__;
String.prototype.$H = @{{str}}.$H;
String.prototype.__is_instance__ = null;
String.prototype.__str__ = function () {
if (typeof this == 'function') return "<type '" + this.__name__ + "'>";
return this.toString();
};
String.prototype.__repr__ = function () {
if (typeof this == 'function') return "<type '" + this.__name__ + "'>";
return "'" + this.toString() + "'";
};
String.prototype.__mro__ = @{{tuple}}([@{{basestring}}, @{{object}}]);
""")
# Patching of the standard javascript Boolean object
JS("""
Boolean.prototype.__number__ = 0x01;
Boolean.prototype.__name__ = 'bool';
Boolean.prototype.__class__ = Boolean.prototype;
Boolean.prototype.__$super_cache__ = @{{bool}}.__$super_cache__;
Boolean.prototype.$H = @{{bool}}.$H;
Boolean.prototype.__is_instance__ = null;
Boolean.prototype.__str__= function () {
if (typeof this == 'function') return "<type '" + this.__name__ + "'>";
if (this == true) return "True";
return "False";
};
Boolean.prototype.__repr__ = Boolean.prototype.__str__;
Boolean.prototype.__and__ = function (y) {
return this & y.valueOf();
};
Boolean.prototype.__or__ = function (y) {
return this | y.valueOf();
};
Boolean.prototype.__xor__ = function (y) {
return this ^ y.valueOf();
};
""")
# Patching of the standard javascript Array object
# This makes it imposible to use for (k in Array())
JS("""
if (typeof Array.prototype.indexOf != 'function') {
Array.prototype.indexOf = function(elt /*, from*/) {
var len = this.length >>> 0;
var from = Number(arguments[1]) || 0;
from = (from < 0)
? Math.ceil(from)
: Math.floor(from);
if (from < 0)
from += len;
for (; from < len; from++) {
if (from in this &&
this[from] === elt)
return from;
}
return -1;
};
};
""")
# Patching of the standard javascript RegExp
JS("""
RegExp.prototype.Exec = function(pat) {
var m = this.exec(pat);
if (m !== null) {
var len = m.length >>> 0;
for (var i = 0; i < len; i++) {
if (typeof(m[i]) == 'undefined')
m[i] = null;
}
}
return m;
};
""")
JS("""
@{{abs}} = Math.abs;
""")
class Class:
def __init__(self, name):
self.name = name
def __str___(self):
return self.name
def open(fname, mode='r'):
raise NotImplementedError("open is not implemented in browsers")
cmp = JS("""function(a, b) {
if (typeof a == typeof b) {
switch (typeof a) {
case 'number':
case 'string':
case 'boolean':
return a == b ? 0 : (a < b ? -1 : 1);
}
if (a === b) return 0;
}
if (a === null) {
if (b === null) return 0;
return -1;
}
if (b === null) {
return 1;
}
switch ((a.__number__ << 8)|b.__number__) {
case 0x0202:
a = a.__v;
b = b.__v;
case 0x0101:
return a == b ? 0 : (a < b ? -1 : 1);
case 0x0100:
case 0x0200:
case 0x0400:
if (typeof b.__cmp__ == 'function') {
return -b.__cmp__(a);
}
return -1;
case 0x0001:
case 0x0002:
case 0x0004:
if (typeof a.__cmp__ == 'function') {
return a.__cmp__(b);
}
return 1;
case 0x0102:
return -b.__cmp__(new @{{int}}(a));
case 0x0104:
return -b.__cmp__(new @{{long}}(a));
case 0x0201:
return a.__cmp__(new @{{int}}(b));
case 0x0401:
return a.__cmp__(new @{{long}}(b));
case 0x0204:
return -b.__cmp__(new @{{long}}(a));
case 0x0402:
return a.__cmp__(new @{{long}}(b));
case 0x0404:
return a.__cmp__(b);
}
if (typeof a.__class__ == typeof b.__class__ && typeof a.__class__ == 'function') {
if (a.__class__.__name__ < b.__class__.__name__) {
return -1;
}
if (a.__class__.__name__ > b.__class__.__name__) {
return 1;
}
}
// use lt, gt and eq if defined on one of the two objects
if ((typeof a == 'object' || typeof a == 'function') && typeof a.__eq__ == 'function' &&
typeof a.__lt__ == 'function' && typeof a.__gt__ == 'function') {
eq_result = @{{bool}}(a.__eq__(b));
if (eq_result) {
return 0;
}
else {
lt_result = @{{bool}}(a.__lt__(b));
if (lt_result) {
return -1;
}
else {
return 1;
}
}
} else if ((typeof b == 'object' || typeof b == 'function') && typeof b.__eq__ == 'function' &&
typeof b.__lt__ == 'function' && typeof b.__gt__ == 'function') {
eq_result = @{{bool}}(b.__eq__(a));
if (eq_result) {
return 0;
}
else {
lt_result = @{{bool}}(b.__lt__(a));
if (lt_result) {
return 1;
}
else {
return -1;
}
}
}
if ((typeof a == 'object' || typeof a == 'function') && typeof a.__cmp__ == 'function') | |
number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
# Flag unapproved C++14 headers.
if include and include.group(1) in ('scoped_allocator', 'shared_mutex'):
error(filename, linenum, 'build/c++14', 5,
('<%s> is an unapproved C++14 header.') % include.group(1))
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=[]):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
ProcessGlobalSuppresions(lines)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
if IsHeaderExtension(file_extension):
CheckForHeaderGuard(filename, clean_lines, error)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
FlagCxx11Features(filename, clean_lines, line, error)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# Check that the .cc file has included its header if it exists.
if _IsSourceExtension(file_extension):
CheckHeaderFileIncluded(filename, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessConfigOverrides(filename):
""" Loads the configuration files and processes the config overrides.
Args:
filename: The name of the file being processed by the linter.
Returns:
False if the current |filename| should not be processed further.
"""
abs_filename = os.path.abspath(filename)
cfg_filters = []
keep_looking = True
while keep_looking:
abs_path, base_name = os.path.split(abs_filename)
if not base_name:
break # Reached the root directory.
cfg_file = os.path.join(abs_path, "CPPLINT.cfg")
abs_filename = abs_path
if not os.path.isfile(cfg_file):
continue
try:
with open(cfg_file) as file_handle:
for line in file_handle:
line, _, _ = line.partition('#') # Remove comments.
if not line.strip():
continue
name, _, val = line.partition('=')
name = name.strip()
val = val.strip()
if name == 'set noparent':
keep_looking = False
elif name == 'filter':
cfg_filters.append(val)
elif name == 'exclude_files':
# When matching exclude_files pattern, use the base_name of
# the current file name or the directory name we are processing.
# For example, if we are checking for lint errors in /foo/bar/baz.cc
# and we found the .cfg file at /foo/CPPLINT.cfg, then the config
# file's "exclude_files" filter is meant to be checked against "bar"
# and not "baz" nor "bar/baz.cc".
if base_name:
pattern = re.compile(val)
if pattern.match(base_name):
if _cpplint_state.quiet:
# Suppress "Ignoring file" warning when using --quiet.
return False
sys.stderr.write('Ignoring "%s": file excluded by "%s". '
'File path component "%s" matches '
'pattern "%s"\n' %
(filename, cfg_file, base_name, val))
return False
elif name == 'linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
sys.stderr.write('Line length must be numeric.')
elif name == 'root':
global _root
# root directories are specified relative to CPPLINT.cfg dir.
_root = os.path.join(os.path.dirname(cfg_file), val)
elif name == 'headers':
ProcessHppHeadersOption(val)
else:
sys.stderr.write(
'Invalid configuration option (%s) in file %s\n' %
(name, cfg_file))
except IOError:
sys.stderr.write(
"Skipping config file '%s': Can't open for reading\n" % cfg_file)
keep_looking = False
# Apply all the accumulated filters in reverse order (top-level directory
# config options having the least priority).
for filter in reversed(cfg_filters):
_AddFilters(filter)
return True
def ProcessFile(filename, vlevel, extra_check_functions=[]):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
_BackupFilters()
old_errors = _cpplint_state.error_count
if not ProcessConfigOverrides(filename):
_RestoreFilters()
return
lf_lines = []
crlf_lines = []
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
# Remove trailing '\r'.
# The -1 accounts for the extra trailing blank line we get from split()
for linenum in range(len(lines) - 1):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
crlf_lines.append(linenum + 1)
else:
lf_lines.append(linenum + 1)
except IOError:
sys.stderr.write(
"Skipping input '%s': Can't open for reading\n" % filename)
_RestoreFilters()
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if filename != '-' and file_extension not in _valid_extensions:
sys.stderr.write('Ignoring %s; not a valid file name '
'(%s)\n' % (filename, ', '.join(_valid_extensions)))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
# If end-of-line sequences are a mix of LF and CR-LF, issue
# warnings on the lines with CR.
#
# Don't issue any warnings if all lines are uniformly LF or CR-LF,
# since critique can handle these just fine, and the style guide
# doesn't dictate a particular end of line sequence.
#
# We can't depend on os.linesep to determine what the desired
# end-of-line sequence should be, since that will return the
# server-side end-of-line sequence.
if lf_lines and crlf_lines:
# Warn on every line with CR. An alternative approach might be to
# check whether the file is mostly CRLF or just LF, and warn on the
# minority, we bias toward LF here since most tools prefer LF.
for linenum in crlf_lines:
Error(filename, linenum, 'whitespace/newline', 1,
'Unexpected \\r (^M) found; better to use only \\n')
# Suppress printing anything if --quiet was passed unless the error
# count has increased after processing this file.
if not _cpplint_state.quiet or old_errors != _cpplint_state.error_count:
sys.stdout.write('Done processing %s\n' % filename)
_RestoreFilters()
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter=',
'root=',
'linelength=',
'extensions=',
'headers=',
'quiet'])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
quiet = _Quiet()
counting_style = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse'):
PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
output_format = val
elif opt == '--quiet':
quiet = True
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif opt == '--extensions':
global | |
<reponame>wikimedia/abstract-wikipedia-data-science<gh_stars>1-10
import sys
import mwapi
import toolforge
import pandas as pd
import pymysql
import numpy as np
import argparse
from urllib.parse import unquote
import utils.db_access as db_acc
import constants
pymysql.converters.encoders[np.int64] = pymysql.converters.escape_int
pymysql.converters.conversions = pymysql.converters.encoders.copy()
pymysql.converters.conversions.update(pymysql.converters.decoders)
## define constants
MIN_IDX = 0
def get_wiki_list(start_idx, end_idx, user_db_port=None, user=None, password=None):
"""
Fetches urls of all wikis and chooses the ones in the given indexes (both start and end indexes are included).
:param start_idx: starting index of the wikis, which should be processed.
:param end_idx: starting index of the wikis, which should be processed.
:param user_db_port: port for connecting to local Sources table through ssh tunneling, if used.
:param user: Toolforge username of the tool.
:param password: <PASSWORD> of the <PASSWORD>.
:return: list of wikis' urls within given indexes
"""
try:
conn = db_acc.connect_to_user_database(
constants.DATABASE_NAME, user_db_port, user, password
)
with conn.cursor() as cur:
cur.execute(
"select url from Sources where url is not NULL"
) # all, except 'meta'
ret = [wiki[0] for wiki in cur][start_idx : end_idx + 1]
conn.close()
return ret
except Exception as err:
print("Something went wrong.\n", err)
exit(1)
def save_content(
wiki, data_list, in_api, in_database, user_db_port=None, user=None, password=None
):
"""
Saves data into Scripts table.
:param wiki: The wiki project corresponding to the data provided.
:param data_list: The data to be saved in Scripts table.
:param in_database: Whether data was collected from databases.
:param in_api: Whether data was collected from API.
:param user_db_port: port for connecting to local Sources table through ssh tunneling, if used.
:param user: Toolforge username of the tool.
:param password: Toolforge password of the tool.
:return: None
"""
data_df = pd.DataFrame(
data_list,
columns=[
"id",
"title",
"url",
"length",
"content",
"content_model",
"touched",
"lastrevid",
],
)
query = (
"INSERT INTO Scripts(dbname, page_id, title, sourcecode, touched, "
"in_api, in_database, length, content_model, lastrevid, url) "
"VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) "
"ON DUPLICATE KEY UPDATE title = %s, sourcecode = %s, touched = %s, in_api = %s, in_database = %s, "
"length = %s, content_model = %s, lastrevid = %s, url = %s, is_missed=%s"
)
try:
conn = db_acc.connect_to_user_database(
constants.DATABASE_NAME, user_db_port, user, password
)
with conn.cursor() as cur:
cur.execute("SELECT dbname FROM Sources WHERE url = %s", wiki)
dbname = cur.fetchone()[0]
for index, elem in data_df.iterrows():
time = elem["touched"].replace("T", " ").replace("Z", " ")
cur.execute(
query,
[
dbname,
elem["id"],
elem["title"],
elem["content"],
time,
in_api,
in_database,
elem["length"],
elem["content_model"],
elem["lastrevid"],
elem["url"],
elem["title"],
elem["content"],
time,
in_api,
in_database,
elem["length"],
elem["content_model"],
elem["lastrevid"],
elem["url"],
0,
],
)
conn.commit()
conn.close()
except Exception as err:
print("Error saving pages from", wiki)
print(err)
def save_missed_content(wiki, missed, user_db_port=None, user=None, password=None):
"""
Mark missed pages as is_missed=True in Scripts table.
:param wiki: The wiki project corresponding to the data provided.
:param missed: List of pages missed.
:param user_db_port: port for connecting to local Sources table through ssh tunneling, if used.
:param user: Toolforge username of the tool.
:param password: Toolforge password of the tool.
:return: None
"""
missed_df = pd.DataFrame(missed, columns=["id"])
query = (
"INSERT INTO Scripts(dbname, page_id, in_api, is_missed) "
"values(%s, %s, %s, %s) "
"ON DUPLICATE KEY UPDATE in_api = %s, is_missed = %s"
)
try:
conn = db_acc.connect_to_user_database(
constants.DATABASE_NAME, user_db_port, user, password
)
with conn.cursor() as cur:
cur.execute("SELECT dbname FROM Sources WHERE url = %s", wiki)
dbname = cur.fetchone()[0]
for index, elem in missed_df.iterrows():
cur.execute(query, [dbname, elem["id"], 1, 1, 1, 1])
conn.commit()
conn.close()
except Exception as err:
print("Something went wrong.\n", err)
exit(1)
def needs_update(wiki, pageid, title, touched, revid):
return True
def get_contents(wikis, revise=False, user_db_port=None, user=None, password=None):
"""
Connects to the wiki by using API, fetches Scribunto modules and additional info from there
and saves them to the user's database.
Possible ways for the process to fail:
1. Failed to connect to wiki (See from output)
2. Connected but could not GET wiki (See from output)
3. Could not grab a page (Listed in missed pages)
:param wikis: list of urls of wikis, from which the modules will be collected
:param revise: `False` collects all contents and saves fresh
`True` only collects those that have been edited
:param user_db_port: port for connecting to local Sources table through ssh tunneling, if used.
:param user: Toolforge username of the tool.
:param password: <PASSWORD>.
:return: None
"""
for wiki in wikis:
try:
session = mwapi.Session(wiki, user_agent="abstract-wiki-ds")
except Exception as e:
print("Failed to connect to", wiki, "\n", e)
continue
data_list = []
cnt_data_list = 0
missed = []
cnt_missed = 0
_gapcontinue = ""
_continue = ""
while True:
params = {
"action": "query",
"generator": "allpages",
"gapnamespace": 828,
"gaplimit": 300,
"format": "json",
"prop": "info",
"inprop": "url",
"gapcontinue": _gapcontinue,
"continue": _continue,
}
try:
result = session.get(params)
except Exception as e:
print("Could not GET", wiki, "\n", e)
break
if "query" in result.keys():
for page in list(result["query"]["pages"].values()):
try:
pageid = page["pageid"]
title = page["title"]
touched = page["touched"]
length = page["length"]
url = unquote(page["fullurl"])
revid = page["lastrevid"]
if (not revise) or needs_update(
wiki, pageid, title, touched, revid
):
params = {
"action": "query",
"format": "json",
"prop": "revisions",
"revids": revid,
"rvprop": "content",
"rvslots": "main",
"formatversion": 2,
}
rev_result = session.get(params)
content_info = rev_result["query"]["pages"][0]["revisions"][
0
]["slots"]["main"]
content = content_info["content"]
content_model = content_info["contentmodel"]
if content_model == "Scribunto":
data_list.append(
[
pageid,
title,
url,
length,
content,
content_model,
touched,
revid,
]
)
except Exception as err:
if "pageid" in page.keys():
missed.append([pageid])
print("Miss:", wiki, title, pageid, "\n", err)
cnt_data_list += len(data_list)
cnt_missed += len(missed)
save_missed_content(wiki, missed, user_db_port, user, password)
save_content(wiki, data_list, 1, 0, user_db_port, user, password)
print(cnt_data_list, "pages loaded...")
data_list, missed = [], []
try:
_continue = result["continue"]["continue"]
_gapcontinue = (
result["continue"]["gapcontinue"]
if "gapcontinue" in result["continue"]
else ""
)
except:
break
print(
"All pages loaded for %s. Missed: %d, Loaded: %d"
% (wiki, cnt_missed, cnt_data_list)
)
print("Done loading!")
def get_db_map(wikis=[], dbs=[], user_db_port=None, user=None, password=None):
"""
Fetches info from the users database about the wikis with given dbnames or urls.
Chooses search by urls by default, if none are given (wikis is empty), searches by dbnames from dbs.
:param wikis: list of wikis' urls, whose info needed.
:param dbs: list of wikis' dbnames, whose info needed.
:param user_db_port: port for connecting to local Sources table through ssh tunneling, if used.
:param user: Toolforge username of the tool.
:param password: Toolforge password of the tool.
:return: dictionary of fetched info in form {dbname1: url1, dbname2:url2,...},
a comma separated placeholder(%s) string for the the number of db or wikis.
"""
query_input = []
if len(wikis) > 0:
placeholders = ",".join("%s" for _ in wikis)
query_input = wikis
query = "SELECT dbname, url FROM Sources WHERE url IN (%s)" % placeholders
else:
placeholders = ",".join("%s" for _ in dbs)
query_input = dbs
query = "SELECT dbname, url FROM Sources WHERE dbname IN (%s)" % placeholders
db_map = {}
try:
conn = db_acc.connect_to_user_database(
constants.DATABASE_NAME, user_db_port, user, password
)
with conn.cursor() as cur:
cur.execute(query, query_input)
db_map = {data[0]: data[1] for data in cur}
conn.close()
except Exception as err:
print("Something went wrong.\n", err)
exit(1)
return db_map, placeholders
def get_pages(df, in_api, in_database, user_db_port=None, user=None, password=None):
"""
Connects to the wikis from wiki field and fetches infomation for the pages with given page_id,
then saving fetched content and missing content to the user's database.
:param df: dataframe with columns page_id, dbname, wiki (represents url of wiki). dbname is not required.
:param in_api: the value to which in_api field will be set
:param in_database: the value to which in_database field will be set
:param user_db_port: port for connecting to local Sources table through ssh tunneling, if used.
:param user: Toolforge username of the tool.
:param password: Toolforge password of the tool.
:return: None
"""
for wiki, w_df in df.groupby("wiki"):
try:
session = mwapi.Session(wiki, user_agent="abstract-wiki-ds")
except Exception as e:
print("Failed to connect to", wiki, "\n", e)
continue
pageids = w_df["page_id"].values
data_list = []
missed = []
for pageid in list(pageids):
params = {
"action": "query",
"format": "json",
"prop": "revisions|info",
"pageids": pageid,
"rvprop": "content",
"rvslots": "main",
"inprop": "url",
"formatversion": 2,
}
try:
result = session.get(params)
page = result["query"]["pages"][0]
if page["lastrevid"] != 0:
url = unquote(page["fullurl"])
title = page["title"]
| |
<filename>gui/communication/esp32serial.py
"""
Library to interface with the ESP32
"""
from threading import Lock
import serial # pySerial
from . import ESP32Alarm, ESP32Warning
__all__ = ("ESP32Serial", "ESP32Exception")
class ESP32Exception(Exception):
"""
Exception class for decoding and hardware failures.
"""
def __init__(self, verb=None, line=None, output=None, details=None):
"""
Contructor
arguments:
- verb the transmit verb = {get, set}
- line the line transmitted to ESP32 that is failing
- output what the ESP32 is replying
"""
self.verb = str(verb)
self.line = str(line)
self.output = str(output)
self.details = str(details)
super(ESP32Exception, self).__init__(
"ERROR in %s: line: %s; output: %s; details: %s" %
(self.verb, self.line, self.output, self.details))
def _parse(result):
"""
Parses the message from ESP32
arguments:
- result what the ESP replied as a binary buffer
returns the requested value as a string
"""
check_str, value = result.decode().split('=')
check_str = check_str.strip()
if check_str != 'valore':
raise ESP32Exception("", "", "protocol error: 'valore=' expected")
return value.strip()
class ESP32Serial:
"""
Main class for interfacing with the ESP32 via a serial connection.
"""
def __init__(self, config, **kwargs):
"""
Contructor
Opens a serial connection to the MVM ESP32
arguments:
- config the configuration object containing at least the
"port" and "get_all_fields" keys
named arguments:
- any argument available for the serial.Serial pySerial class
- baudrate the preferred baudrate, default 115200
- terminator the line terminator, binary encoded, default
b'\n'
- timeout sets the read() timeout in seconds
"""
self.lock = Lock()
self.term = kwargs["terminator"] if "terminator" in kwargs else b'\n'
self._port = config["port"]
self._port_kwargs = kwargs
self.reconnect()
self.get_all_fields = config["get_all_fields"]
def reconnect(self):
"""
Reconnects to the ESP32 serial based on initialized settings.
"""
try:
self._close_connection()
baudrate = self._port_kwargs["baudrate"] if "baudrate" in self._port_kwargs else 115200
timeout = self._port_kwargs["timeout"] if "timeout" in self._port_kwargs else 1
self.connection = serial.Serial(port=self._port,
baudrate=baudrate, timeout=timeout,
**self._port_kwargs)
while self.connection.read():
pass
except Exception as exc: # pylint: disable=W0703
raise ESP32Exception("reconnect", None, None, str(exc))
def _close_connection(self):
"""
Closes the connection.
"""
with self.lock:
if hasattr(self, "connection"):
self.connection.close()
def __del__(self):
"""
Destructor.
Closes the connection.
"""
self._close_connection()
def _write(self, cmd):
"""
Writes the un-encoded message to the ESP32.
The command is stored as the last cmd.
arguments:
- cmd the unencoded command
"""
result = b""
try:
result = self.connection.write(cmd.encode())
except Exception as exc: # pylint: disable=W0703
raise ESP32Exception("write", cmd, result.decode(), str(exc))
def set(self, name, value):
"""
Set command wrapper
arguments:
- name the parameter name as a string
- value the value to assign to the variable as any type
convertible to string
returns: an "OK" string in case of success.
"""
print("ESP32Serial-DEBUG: set %s %s" % (name, value))
with self.lock:
# I know about Python 3.7 magic string formatting capability
# but I don't really remember now the version running on
# Raspbian
command = 'set ' + name + ' ' + str(value) + '\r\n'
self._write(command)
result = b""
try:
result = self.connection.read_until(terminator=self.term)
return _parse(result)
except Exception as exc: # pylint: disable=W0703
raise ESP32Exception("set", command, result.decode(), str(exc))
def set_watchdog(self):
"""
Set the watchdog polling command
returns: an "OK" string in case of success.
"""
return self.set("watchdog_reset", 1)
def get(self, name):
"""
Get command wrapper
arguments:
- name the parameter name as a string
returns: the requested value
"""
print("ESP32Serial-DEBUG: get %s" % name)
with self.lock:
command = 'get ' + name + '\r\n'
self._write(command)
result = b""
try:
result = self.connection.read_until(terminator=self.term)
return _parse(result)
except Exception as exc: # pylint: disable=W0703
raise ESP32Exception("get", command, result.decode(), str(exc))
def get_all(self):
"""
Get the observables as listed in the get_all_fields internal
object.
returns: a dict with member keys as written above and values as
strings.
"""
print("ESP32Serial-DEBUG: get all")
with self.lock:
self._write("get all\r\n")
result = b""
try:
result = self.connection.read_until(terminator=self.term)
values = _parse(result).split(',')
if len(values) != len(self.get_all_fields):
raise Exception("get_all answer mismatch: expected: %s, got %s" % (
self.get_all_fields, values))
return dict(zip(self.get_all_fields, values))
except Exception as exc: # pylint: disable=W0703
raise ESP32Exception("get", "get all", result.decode(), str(exc))
def get_alarms(self):
"""
Get the alarms from the ESP32
returns: a ESP32Alarm instance describing the possible alarms.
"""
return ESP32Alarm(int(self.get("alarm")))
def get_warnings(self):
"""
Get the warnings from the ESP32
returns: a ESP32Warning instance describing the possible warnings.
"""
return ESP32Warning(int(self.get("warning")))
def reset_alarms(self):
"""
Reset all the raised alarms in ESP32
returns: an "OK" string in case of success.
"""
return self.set("alarm", 0)
def reset_warnings(self):
"""
Reset all the raised warnings in ESP32
returns: an "OK" string in case of success.
"""
return self.set("warning", 0)
def raise_gui_alarm(self):
"""
Raises an alarm in ESP32
arguments:
- alarm_type an integer representing the alarm type
returns: an "OK" string in case of success.
"""
return self.set("alarm", 1)
def snooze_hw_alarm(self, alarm_type):
"""
Function to snooze the corresponding alarm in ESP32
arguments:
- alarm_type an integer representing the alarm type. One and
only one.
returns: an "OK" string in case of success.
"""
# yes, the ESP sends alarms as binary-coded struct, but the snooze
# happens by means of the exponent
bitmap = {1 << x: x for x in range(32)}
pos = bitmap[alarm_type]
return self.set("alarm_snooze", pos)
def snooze_gui_alarm(self):
"""
Function to snooze the GUI alarm in ESP32
returns: an "OK" string in case of success.
"""
return self.set("alarm_snooze", 29)
def venturi_calibration(self):
"""
Generator function to retrieve data for spirometer calibration.
returns a helper class instance.
"""
class VenturiRetriever():
"""
Helper class to wrap all the complexity and problems raising
from the protocol used to retrieve the Venturi Calibration
data.
"""
def __init__(self, esp32):
"""
Constructor
arguments:
- esp32: an istance of ESP32Serial
"""
self._esp32 = esp32
self._esp32.set("flush_pipe", 1)
# from this point, the class effectively OWNS the
# connection...
self._esp32.lock.acquire()
self._previous_timeout = self._esp32.connection.timeout
self._esp32.connection.timeout = 2
self._esp32.connection.write("get venturi_scan\r\n".encode())
def data(self):
"""
This function is a generator. It yields data as they come
out and returns when the work is finished.
Use it like:
```
for data in data():
#work on a chunk of data
```
yields a list of (3) floats:
1. measure index (percentage)
2. raw measured flow (spirometer)
3. pressure variation (Sinsirion)
"""
while True:
bresult = self._esp32.connection.read_until(
terminator=self._esp32.term)
result = bresult.decode().strip()
if result == '':
raise ESP32Exception("get", "get venturi_scan", "timeout")
elif result == 'valore=OK':
return
yield [float(datum) for datum in result.split(',')]
def __del__(self):
"""
Destructor
this puts the connection back in normal operation
"""
# read any possibly remaining data.
# For example if the generator has not been called till
# the end of the procedure.
while self._esp32.connection.read():
pass
# restore the timeout to the previously using value
self._esp32.connection.timeout = self._previous_timeout
self._esp32.lock.release()
# ...and from here it finally releases its ownership
self._esp32.set("flush_pipe", 0)
return VenturiRetriever(self)
def leakage_test(self):
"""
Generator function to retrieve data for leakage test.
returns a helper class instance.
"""
class LeakTestRetriever():
"""
Helper class to wrap all the complexity and problems raising
from the protocol used to retrieve the leakage test data.
"""
def __init__(self, esp32):
"""
Constructor
arguments:
- esp32: an istance of ESP32Serial
"""
self._esp32 = esp32
# from this point, the class effectively OWNS the
# connection...
self._esp32.lock.acquire()
self._previous_timeout = self._esp32.connection.timeout
self._esp32.connection.timeout = 2
self._esp32.connection.write("get leakage_test\r\n".encode())
def data(self):
"""
This function is a generator. It yields data as they come
out and returns when the work is finished.
Use it like:
```
for data in data():
#work on a chunk of data
```
yields a list of (3) floats:
1. completed percentage
2. internal pressure
3. pressure at the patient mouth
"""
while True:
bresult = self._esp32.connection.read_until(
terminator=self._esp32.term)
result = bresult.decode().strip()
if result == '':
raise ESP32Exception("get", "get leakage_test", "timeout")
elif result == 'valore=OK':
return
yield [float(datum) for datum in result.split(',')]
def __del__(self):
"""
Destructor
this puts the connection back in normal operation
"""
# read any possibly remaining data.
# For example if the generator has not been called till
# the end of the procedure.
while self._esp32.connection.read():
pass
# restore the timeout to the previously using value
self._esp32.connection.timeout = self._previous_timeout
self._esp32.lock.release()
# ...and from here it finally releases its ownership
return | |
import random
from typing import Union, Sequence, List, Type, Tuple, Optional
import numpy as np
import torch
import torch.nn
import torch.fft as fft
import torch.nn.functional as F
import torchvision.transforms as VT
import torchvision.transforms.functional as VF
from torchvision.utils import save_image, make_grid
from .expression import Expression, ExpressionContext
from .parameters import (
Parameter, SequenceParameter, FrameTimeParameter, _add_transforms_parameters
)
Int = Union[int, Expression]
Float = Union[float, Expression]
Str = Union[str, Expression]
transformations = dict()
class TransformBase:
NAME = None
IS_RANDOM = False
IS_RESIZE = False
PARAMS = None
def __init_subclass__(cls, **kwargs):
assert cls.NAME, f"Must specify {cls.__name__}.NAME"
assert cls.PARAMS, f"Must specify {cls.__name__}.PARAMS"
transformations[cls.NAME] = cls
# TODO: would be nice to have start/end on each transform
# but most transforms only have a single parameter so i'd
# like to support both notations, e.g.:
# resize: 224
# or
# resize:
# size: 224
# start: 10%
# cls.PARAMS = {
# **cls.PARAMS,
# "start": FrameTimeParameter(
# default=0.,
# doc="Start frame of the transform. The transform is inactive before this time."
# ),
# "end": FrameTimeParameter(
# default=1.,
# doc="End frame of the transform. The transform is inactive after this time."
# ),
# }
def __call__(self, image: torch.Tensor, context: ExpressionContext) -> torch.Tensor:
raise NotImplementedError
class Blur(TransformBase):
"""
A gaussian blur is applied to the pixels.
See [torchvision gaussian_blur](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.functional.gaussian_blur).
"""
NAME = "blur"
PARAMS = {
"kernel_size": SequenceParameter(
int, length=2, default=[3, 3],
doc="""
The size of the pixel window. Must be an **odd**, **positive** integer.
Two numbers define **width** and **height** separately.
"""
),
"sigma": SequenceParameter(
float, length=2, null=True, default=None,
doc="""
Gaussian kernel standard deviation. The larger, the more *blurry*.
If not specified it will default to `0.3 * ((kernel_size - 1) * 0.5 - 1) + 0.8`.
Two numbers define sigma for **x** and **y** separately.
"""
),
}
def __init__(self, kernel_size: List[Int], sigma: List[float]):
super().__init__()
self.kernel_size = kernel_size
self.sigma = sigma
def __call__(self, image: torch.Tensor, context: ExpressionContext) -> torch.Tensor:
kernel_size = context(self.kernel_size)
kernel_size = [
max(1, k+1 if k % 2 == 0 else k)
for k in kernel_size
]
if self.sigma is None:
sigma = None
else:
sigma = [max(0.0001, s) for s in context(self.sigma)]
return VF.gaussian_blur(image, kernel_size, sigma)
class Resize(TransformBase):
"""
The resolution of the image is changed.
"""
NAME = "resize"
IS_RESIZE = True
PARAMS = {
"size": SequenceParameter(
int, length=2, default=None,
doc="""
One integer for square images, two numbers to specify **width** and **height**.
"""
),
}
def __init__(self, size: List[Int]):
super().__init__()
self.size = size
def __call__(self, image: torch.Tensor, context: ExpressionContext) -> torch.Tensor:
size = context(self.size)
return VF.resize(image, [size[1], size[0]])
class CenterCrop(TransformBase):
"""
Crops an image of the given resolution from the center.
See [torchvision center_crop](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.functional.center_crop).
"""
NAME = "center_crop"
IS_RESIZE = True
PARAMS = {
"size": SequenceParameter(
int, length=2, default=None,
doc="""
One integer for square images, two numbers to specify **width** and **height**.
"""
),
}
def __init__(self, size: List[Int]):
super().__init__()
self.size = size
def __call__(self, image: torch.Tensor, context: ExpressionContext) -> torch.Tensor:
size = context(self.size)
return VF.center_crop(image, size)
class RandomCrop(TransformBase):
"""
Crops a section of the specified resolution from a random position in the image.
See [torchvision random_crop](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.functional.random_crop)
"""
NAME = "random_crop"
IS_RESIZE = True
PARAMS = {
"size": SequenceParameter(
int, length=2, default=None,
doc="""
One integer for square images, two numbers to specify **width** and **height**.
"""
),
}
def __init__(self, size: List[Int]):
super().__init__()
self.size = size
def __call__(self, image: torch.Tensor, context: ExpressionContext) -> torch.Tensor:
size = context(self.size)
return VT.RandomCrop(size=size)(image)
class Crop(TransformBase):
"""
Crops a specified section from the image.
"""
NAME = "crop"
IS_RESIZE = True
PARAMS = {
"xywh": SequenceParameter(
float, length=4, default=None,
doc="""
4 numbers: **x** and **y** of top-left corner followed by **width** and **height**.
A number between 0 and 1 is considered a fraction of the full resolution.
A number greater or equal to 1 is considered a pixel coordinate
"""
),
}
def __init__(self, xywh: List[Int]):
super().__init__()
self.xywh = xywh
def __call__(self, image: torch.Tensor, context: ExpressionContext) -> torch.Tensor:
x, y, w, h = context(self.xywh)
if x < 1:
x = x * image.shape[-1]
if y < 1:
y = y * image.shape[-2]
if w < 1:
w = w * image.shape[-1]
if h < 1:
h = h * image.shape[-2]
return VF.crop(image, int(y), int(x), int(h), int(w))
class Repeat(TransformBase):
"""
Repeats the image a number of times in the right and bottom direction.
"""
NAME = "repeat"
IS_RESIZE = True
PARAMS = {
"count": SequenceParameter(
int, length=2, default=None,
doc="""
One integer to specify **x** and **y** at the same time,
or two integers to specify them separately.
"""
),
}
def __init__(self, count: List[Int]):
super().__init__()
self.count = count
def __call__(self, image: torch.Tensor, context: ExpressionContext) -> torch.Tensor:
count = context(self.count)
return image.repeat(1, 1, count[0]).repeat(1, count[1], 1)
class Pad(TransformBase):
"""
Pads the image with additional pixels at the borders.
"""
NAME = "pad"
IS_RESIZE = True
PARAMS = {
"size": SequenceParameter(
int, length=2, default=None,
doc="""
The number of columns/rows to add.
One integer to specify **x** and **y** at the same time,
or two integers to specify them separately.
E.g. `1, 2` would add 1 column left and one column right of
the image and two rows on top and bottom respectively.
"""
),
"color": SequenceParameter(
float, length=3, default=[0., 0., 0.],
doc="""
The color of the pixels that are padded around the image.
"""
),
"mode": Parameter(
str, default="fill",
doc="""
The way the padded area is filled.
- `fill`: fills everything with the `color` value
- `edge`: repeats the edge pixels
- `wrap`: repeats the image from the opposite edge
"""
)
}
def __init__(self, size: List[Int], color: List[Float], mode: Str):
super().__init__()
self.size = size
self.color = color
self.mode = mode
def __call__(self, image: torch.Tensor, context: ExpressionContext) -> torch.Tensor:
mode = context(self.mode)
size = [max(0, s) for s in context(self.size)]
color = torch.Tensor(context(self.color)).to(image.device)
if mode == "fill":
if size[0]:
column = color.reshape(3, 1, 1).repeat(1, image.shape[-2], size[0])
image = torch.cat([column, image, column], dim=-1)
if size[1]:
row = color.reshape(3, 1, 1).repeat(1, size[1], image.shape[-1])
image = torch.cat([row, image, row], dim=-2)
elif mode == "edge":
if size[0]:
column_left = image[:, :, :1].repeat(1, 1, size[0])
column_right = image[:, :, -1:].repeat(1, 1, size[0])
image = torch.cat([column_left, image, column_right], dim=-1)
if size[1]:
row_top = image[:, :1, :].repeat(1, size[1], 1)
row_bottom = image[:, -1:, :].repeat(1, size[1], 1)
image = torch.cat([row_top, image, row_bottom], dim=-2)
elif mode == "wrap":
if size[0]:
column_left = image[:, :, -size[0]:]
column_right = image[:, :, :size[0]]
image = torch.cat([column_left, image, column_right], dim=-1)
if size[1]:
row_top = image[:, -size[1]:, :]
row_bottom = image[:, :size[1], :]
image = torch.cat([row_top, image, row_bottom], dim=-2)
return image
class Border(TransformBase):
"""
Draws a border on the edge of the image. The resolution is not changed.
"""
NAME = "border"
PARAMS = {
"size": SequenceParameter(
int, length=2, default=[1, 1],
doc="""
One integer two specify **width** and **height** at the same time,
or two integers to specify them separately.
"""
),
"color": SequenceParameter(
float, length=3, default=[0., 0., 0.],
doc="""
The color of the border as float numbers in the range `[0, 1]`.
Three numbers for **red**, **green** and **blue** or a single number
to specify a gray-scale.
"""
),
}
def __init__(self, size: List[Int], color: List[Float]):
super().__init__()
self.size = size
self.color = color
def __call__(self, image: torch.Tensor, context: ExpressionContext) -> torch.Tensor:
size = context(self.size)
color = torch.Tensor(context(self.color)).to(image.device)
image = image.clone()
s = min(size[1], image.shape[1])
color_s = color.reshape(3, 1, 1).repeat(1, s, 1)
image[:, :s, :] = color_s
image[:, -s:, :] = color_s
s = min(size[0], image.shape[2])
color_s = color.reshape(3, 1, 1).repeat(1, 1, s)
image[:, :, :s] = color_s
image[:, :, -s:] = color_s
return image
class Noise(TransformBase):
"""
Adds noise to the image.
The noise has a scalable normal distribution around zero.
"""
NAME = "noise"
IS_RANDOM = True
PARAMS = {
"std": SequenceParameter(
float, length=3, default=None,
doc="""
Specifies the standard deviation of the noise distribution.
One value or three values to specify | |
import sys
import os
from collections import OrderedDict
from ttfautohint._compat import (
ensure_binary, ensure_text, basestring, open, IntEnum,
)
USER_OPTIONS = dict(
in_file=None,
in_buffer=None,
out_file=None,
control_file=None,
control_buffer=None,
reference_file=None,
reference_buffer=None,
reference_index=0,
reference_name=None,
hinting_range_min=8,
hinting_range_max=50,
hinting_limit=200,
hint_composites=False,
adjust_subglyphs=False,
increase_x_height=14,
x_height_snapping_exceptions="",
windows_compatibility=False,
default_script="latn",
fallback_script="none",
fallback_scaling=False,
symbol=False,
fallback_stem_width=0,
ignore_restrictions=False,
family_suffix=None,
detailed_info=False,
no_info=False,
TTFA_info=False,
dehint=False,
epoch=None,
debug=False,
verbose=False,
)
StemWidthMode = IntEnum("StemWidthMode",
[
"NATURAL", # -1
"QUANTIZED", # 0
"STRONG", # 1
],
start=-1)
STEM_WIDTH_MODE_OPTIONS = OrderedDict([
("gray_stem_width_mode", StemWidthMode.QUANTIZED),
("gdi_cleartype_stem_width_mode", StemWidthMode.STRONG),
("dw_cleartype_stem_width_mode", StemWidthMode.QUANTIZED),
])
USER_OPTIONS.update(STEM_WIDTH_MODE_OPTIONS)
# Deprecated; use stem width mode options
STRONG_STEM_WIDTH_OPTIONS = dict(
gdi_cleartype_strong_stem_width=True,
gray_strong_stem_width=False,
dw_cleartype_strong_stem_width=False,
)
PRIVATE_OPTIONS = frozenset([
"in_buffer_len",
"control_buffer_len",
"reference_buffer_len",
"out_buffer",
"out_buffer_len",
"error_string",
"alloc_func",
"free_func",
"info_callback",
"info_post_callback",
"info_callback_data",
"progress_callback",
"progress_callback_data",
"error_callback",
"error_callback_data",
])
ALL_OPTIONS = frozenset(USER_OPTIONS) | PRIVATE_OPTIONS
# used when the control file does not have a name on the filesystem
CONTROL_NAME_FALLBACK = u"<control-instructions>"
def validate_options(kwargs):
opts = {k: kwargs.pop(k, USER_OPTIONS[k]) for k in USER_OPTIONS}
if kwargs:
raise TypeError(
"unknown keyword argument%s: %s" % (
"s" if len(kwargs) > 1 else "",
", ".join(repr(k) for k in kwargs)))
if opts["no_info"] and opts["detailed_info"]:
raise ValueError("no_info and detailed_info are mutually exclusive")
in_file, in_buffer = opts.pop("in_file"), opts.pop("in_buffer")
if in_file is None and in_buffer is None:
raise ValueError("No input file or buffer provided")
elif in_file is not None and in_buffer is not None:
raise ValueError("in_file and in_buffer are mutually exclusive")
if in_file is not None:
try:
in_buffer = in_file.read()
except AttributeError:
with open(in_file, "rb") as f:
in_buffer = f.read()
if not isinstance(in_buffer, bytes):
raise TypeError("in_buffer type must be bytes, not %s"
% type(in_buffer).__name__)
opts['in_buffer'] = in_buffer
opts['in_buffer_len'] = len(in_buffer)
control_file = opts.pop('control_file')
control_buffer = opts.pop('control_buffer')
if control_file is not None:
if control_buffer is not None:
raise ValueError(
"control_file and control_buffer are mutually exclusive")
try:
control_buffer = control_file.read()
except AttributeError:
with open(control_file, "rt", encoding="utf-8") as f:
control_buffer = f.read()
opts["control_name"] = control_file
else:
try:
opts["control_name"] = control_file.name
except AttributeError:
pass
if control_buffer is not None:
opts['control_buffer'] = ensure_binary(control_buffer, "utf-8")
opts['control_buffer_len'] = len(control_buffer)
if "control_name" in opts:
opts["control_name"] = ensure_text(
opts["control_name"], encoding=sys.getfilesystemencoding())
else:
opts["control_name"] = CONTROL_NAME_FALLBACK
reference_file = opts.pop('reference_file')
reference_buffer = opts.pop('reference_buffer')
if reference_file is not None:
if reference_buffer is not None:
raise ValueError(
"reference_file and reference_buffer are mutually exclusive")
try:
reference_buffer = reference_file.read()
except AttributeError:
with open(reference_file, "rb") as f:
reference_buffer = f.read()
if opts["reference_name"] is None:
opts["reference_name"] = reference_file
else:
if opts["reference_name"] is None:
try:
opts["reference_name"] = reference_file.name
except AttributeError:
pass
if reference_buffer is not None:
if not isinstance(reference_buffer, bytes):
raise TypeError("reference_buffer type must be bytes, not %s"
% type(reference_buffer).__name__)
opts['reference_buffer'] = reference_buffer
opts['reference_buffer_len'] = len(reference_buffer)
if opts["reference_name"] is not None:
opts["reference_name"] = ensure_binary(
opts["reference_name"], encoding=sys.getfilesystemencoding())
for key in ('default_script', 'fallback_script',
'x_height_snapping_exceptions'):
opts[key] = ensure_binary(opts[key])
if opts['epoch'] is not None:
from ctypes import c_ulonglong
opts['epoch'] = c_ulonglong(opts['epoch'])
if opts["family_suffix"] is not None:
opts["family_suffix"] = ensure_text(opts["family_suffix"])
for mode_option in STEM_WIDTH_MODE_OPTIONS:
# raises ValueError if integer value is not a valid stem width mode
opts[mode_option] = StemWidthMode(opts[mode_option])
return opts
def format_varargs(**options):
items = sorted((k, v) for k, v in options.items()
if k in ALL_OPTIONS and v is not None)
format_string = b", ".join(ensure_binary(k.replace("_", "-"))
for k, v in items)
values = tuple(v for k, v in items)
return format_string, values
def strong_stem_width(s):
if len(s) > 3:
import argparse
raise argparse.ArgumentTypeError(
"string can only contain up to 3 letters")
valid = {
"g": "gray_stem_width_mode",
"G": "gdi_cleartype_stem_width_mode",
"D": "dw_cleartype_stem_width_mode"}
chars = set(s)
invalid = chars - set(valid)
if invalid:
import argparse
raise argparse.ArgumentTypeError(
"invalid value: %s" % ", ".join(
repr(v) for v in sorted(invalid)))
result = {}
for char, opt_name in valid.items():
is_strong = char in chars
result[opt_name] = (StemWidthMode.STRONG if is_strong
else StemWidthMode.QUANTIZED)
return result
def stem_width_mode(s):
if len(s) != 3:
import argparse
raise argparse.ArgumentTypeError(
"Stem width mode string must consist of exactly three letters")
modes = {k[0].lower(): v
for k, v in StemWidthMode.__members__.items()}
result = {}
for i, option in enumerate(STEM_WIDTH_MODE_OPTIONS):
m = s[i]
if m not in modes:
import argparse
letters = sorted(repr(k) for k in modes)
raise argparse.ArgumentTypeError(
"Stem width mode letter for %s must be %s, or %s"
% (option, ", ".join(letters[:-1]), letters[-1]))
result[option] = modes[m]
return result
def stdin_or_input_path_type(s):
# the special argument "-" means sys.stdin
if s == "-":
try:
if sys.stdin.isatty(): # ignore if interactive
return None
return open(sys.stdin.fileno(), mode="rb", closefd=False)
except (AttributeError, IOError):
# if stdout was redirected (e.g. inside pytest), fileno may raise
# io.UnsupportedOperation
return None
return s
def stdout_or_output_path_type(s):
# the special argument "-" means sys.stdout
if s == "-":
try:
if sys.stdout.isatty(): # ignore if interactive
return None
return open(sys.stdout.fileno(), mode="wb", closefd=False)
except (AttributeError, IOError):
# if stdout was redirected (e.g. inside pytest), fileno may raise
# io.UnsupportedOperation
return None
return s
def parse_args(args=None):
"""Parse command line arguments and return a dictionary of options
for ttfautohint.ttfautohint function.
`args` can be either None, a list of strings, or a single string,
that is split into individual options with `shlex.split`.
When `args` is None, the console's default sys.argv are used, and any
SystemExit exceptions raised by argparse are propagated.
If args is a string list or a string, it is assumed that the function
was not called from a console script's `main` entry point, but from
other client code, and thus the SystemExit exceptions are muted and
a `None` value is returned.
"""
import argparse
from ttfautohint import __version__, libttfautohint
from ttfautohint.cli import USAGE, DESCRIPTION, EPILOG
version_string = "ttfautohint-py %s (libttfautohint %s)" % (
__version__, libttfautohint.version_string)
if args is None:
capture_sys_exit = False
else:
capture_sys_exit = True
if isinstance(args, basestring):
import shlex
args = shlex.split(args)
parser = argparse.ArgumentParser(
prog="ttfautohint",
usage=USAGE,
description=DESCRIPTION,
epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"in_file", nargs="?", metavar="IN-FILE", default="-",
type=stdin_or_input_path_type,
help="input file (default: standard input)")
parser.add_argument(
"out_file", nargs="?", metavar="OUT-FILE", default="-",
type=stdout_or_output_path_type,
help="output file (default: standard output)")
parser.add_argument(
"--debug", action="store_true", help="print debugging information")
stem_width_group = parser.add_mutually_exclusive_group(required=False)
stem_width_group.add_argument(
"-a", "--stem-width-mode", type=stem_width_mode, metavar="S",
default=STEM_WIDTH_MODE_OPTIONS,
help=("select stem width mode for grayscale, GDI ClearType, and DW "
"ClearType, where S is a string of three letters with possible "
"values 'n' for natural, 'q' for quantized, and 's' for strong "
"(default: qsq)"))
stem_width_group.add_argument( # deprecated
"-w", "--strong-stem-width", type=strong_stem_width, metavar="S",
help=argparse.SUPPRESS)
parser.add_argument(
"-c", "--composites", dest="hint_composites", action="store_true",
help="hint glyph composites also")
parser.add_argument(
"-d", "--dehint", action="store_true", help="remove all hints")
parser.add_argument(
"-D", "--default-script", metavar="SCRIPT",
default=USER_OPTIONS["default_script"],
help="set default OpenType script (default: %(default)s)")
parser.add_argument(
"-f", "--fallback-script", metavar="SCRIPT",
default=USER_OPTIONS["fallback_script"],
help="set fallback script (default: %(default)s)")
parser.add_argument(
"-F", "--family-suffix", metavar="SUFFIX",
help="append SUFFIX to the family name string(s) in the `name' table")
parser.add_argument(
"-G", "--hinting-limit", type=int, metavar="PPEM",
default=USER_OPTIONS["hinting_limit"],
help=("switch off hinting above this PPEM value (default: "
"%(default)s); value 0 means no limit"))
parser.add_argument(
"-H", "--fallback-stem-width", type=int, metavar="UNITS",
default=USER_OPTIONS["fallback_stem_width"],
help=("set fallback stem width (default: %(default)s font units at "
"2048 UPEM)"))
parser.add_argument(
"-i", "--ignore-restrictions", action="store_true",
help="override font license restrictions")
parser.add_argument(
"-I", "--detailed-info", action="store_true",
help=("add detailed ttfautohint info to the version string(s) in "
"the `name' table"))
parser.add_argument(
"-l", "--hinting-range-min", type=int, metavar="PPEM",
default=USER_OPTIONS["hinting_range_min"],
help="the minimum PPEM value for hint sets (default: %(default)s)")
parser.add_argument(
"-m", "--control-file", metavar="FILE",
help="get control instructions from FILE")
parser.add_argument(
"-n", "--no-info", action="store_true",
help=("don't add ttfautohint info to the version string(s) in the "
"`name' table"))
parser.add_argument(
"-p", "--adjust-subglyphs", action="store_true",
help="handle subglyph adjustments in exotic fonts")
parser.add_argument(
"-r", "--hinting-range-max", type=int, metavar="PPEM",
default=USER_OPTIONS["hinting_range_max"],
help="the maximum PPEM value for hint sets (default: %(default)s)")
parser.add_argument(
"-R", "--reference", dest="reference_file", metavar="FILE",
help="derive blue zones from reference font FILE")
parser.add_argument(
"-s", "--symbol", action="store_true",
help="input is symbol font")
parser.add_argument(
"-S", "--fallback-scaling", action="store_true",
help="use fallback scaling, not hinting")
parser.add_argument(
"-t", "--ttfa-table", action="store_true", dest="TTFA_info",
help="add TTFA information table")
parser.add_argument(
"-T", "--ttfa-info", dest="show_TTFA_info", action="store_true",
help="display TTFA table in IN-FILE and exit")
parser.add_argument(
"-v", "--verbose", action="store_true",
help="show progress information")
parser.add_argument(
"-V", "--version", action="version",
version=version_string,
help="print version information and exit")
parser.add_argument(
"-W", "--windows-compatibility", action="store_true",
help=("add blue zones for `usWinAscent' and `usWinDescent' to avoid "
"clipping"))
parser.add_argument(
"-x", "--increase-x-height", type=int, metavar="PPEM",
default=USER_OPTIONS["increase_x_height"],
help=("increase x height for sizes in the range 6<=PPEM<=N; value "
"0 switches off this feature (default: %(default)s)"))
parser.add_argument(
"-X", "--x-height-snapping-exceptions", metavar="STRING",
default=USER_OPTIONS["x_height_snapping_exceptions"],
help=('specify a comma-separated list of x-height snapping exceptions'
', for example "-9, 13-17, 19" (default: "%(default)s")'))
parser.add_argument(
"-Z", "--reference-index", type=int, metavar="NUMBER",
default=USER_OPTIONS["reference_index"],
help="face | |
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
import os
from . import path
DEFAULT_CAMERA_CONFIG = {
'trackbodyid': 0,
'distance': 1.0,
'lookat': np.array((0.0, 0.0, 0)),
'elevation': 0,
}
def sin(t,omega=1.5,phi=0.):#1
return np.sin(omega*t+phi)
def cos(t,omega=1.5,phi=0.):#1
return np.cos(omega*t+phi)
class VA(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,xml_file='vertical_arm.xml',
distance_reward_weight=5.0,
ctrl_cost_weight=0.05
):
print(xml_file)
#utils.EzPickle.__init__(self)
utils.EzPickle.__init__(**locals())
self.joint_list = ['shoulder', 'elbow']
self.real_time=0.01
self.frame_skip=2
self.t=0
self.target_pos = np.asarray([0, 0, 0])
self.distance_reward_weight= distance_reward_weight
self.ctrl_cost_weight= ctrl_cost_weight
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), self.frame_skip)
'''if path is not None:
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), self.frame_skip)
else:
mujoco_env.MujocoEnv.__init__(self, xml_file, self.frame_skip)'''
def step(self, a):
#print(a)
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
vec = self.get_body_com("fingertip")- self.target_pos
reward_dist = - np.linalg.norm(vec)
reward_ctrl = - np.square(a).sum()
reward = self.distance_reward_weight*reward_dist +self.ctrl_cost_weight*reward_ctrl
self.do_simulation(a, self.frame_skip)
self.t += self.frame_skip * self.real_time
self.sim.data.site_xpos[0] = self.sim.data.site_xpos[0] + [-0.15*sin(self.t,phi=0)*np.sin(-25* np.pi / 180.), 0,
0.15*sin(self.t,phi=0)*np.cos(-25* np.pi / 180.)+0.01]#,phi=1
self.target_pos = self.sim.data.site_xpos[0]
ob = self._get_obs()
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(a[i]) * delta_theta
done = False
info = {
'energy': energy,
'reward_dist': self.distance_reward_weight*reward_dist,
'reward_ctrl': self.ctrl_cost_weight*reward_ctrl,
'ori_reward': reward
}
return ob, reward, done, info#,reward_ctrl=reward_ctrl
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
def reset_model(self):
self.t=0
#self.data.site_xpos[0] = [1, 1, 1] -.15 0.01 -.1
qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos
qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
self.set_state(qpos, qvel)
self.target_pos = self.data.site_xpos[0]
return self._get_obs()
def _get_obs(self):
theta = self.sim.data.qpos.flat[:2]
return np.concatenate([
np.cos(theta),
np.sin(theta),
self.sim.data.qpos.flat[2:],
self.sim.data.qvel.flat[:2],
[self.get_body_com("fingertip")[0] - self.target_pos[0]],
[self.get_body_com("fingertip")[2] - self.target_pos[2]]
]).ravel()
class VA4dof(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,xml_file='vertical_arm4dof.xml',
distance_reward_weight=5.0,
ctrl_cost_weight=0.05
):
print(xml_file)
#utils.EzPickle.__init__(self)
utils.EzPickle.__init__(**locals())
self.joint_list = ['shoulder','shoulder2', 'elbow', 'elbow2']
self.real_time=0.01
self.frame_skip=2
self.t=0
self.target_pos = np.asarray([0, 0, 0])
self.distance_reward_weight= distance_reward_weight
self.ctrl_cost_weight= ctrl_cost_weight
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), self.frame_skip)
'''if path is not None:
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), self.frame_skip)
else:
mujoco_env.MujocoEnv.__init__(self, xml_file, self.frame_skip)'''
def step(self, a):
#print(a)
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
vec = self.get_body_com("fingertip")- self.target_pos
reward_dist = - np.linalg.norm(vec)
reward_ctrl =- np.square(a).sum()
reward = self.distance_reward_weight*reward_dist +self.ctrl_cost_weight*reward_ctrl
self.do_simulation(a, self.frame_skip)
self.t += self.frame_skip * self.real_time
self.sim.data.site_xpos[0] = self.sim.data.site_xpos[0] + [-0.15*sin(self.t,phi=0)*np.sin(-25* np.pi / 180.), 0,
0.15*sin(self.t,phi=0)*np.cos(-25* np.pi / 180.)+0.01]#,phi=1
self.target_pos = self.sim.data.site_xpos[0]
ob = self._get_obs()
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(a[i]) * delta_theta
done = False
info = {
'energy': energy,
'reward_dist': self.distance_reward_weight*reward_dist,
'reward_ctrl': self.ctrl_cost_weight*reward_ctrl,
'ori_reward': reward
}
return ob, reward, done, info#,reward_ctrl=reward_ctrl
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
def reset_model(self):
self.t=0
#self.data.site_xpos[0] = [1, 1, 1] -.15 0.01 -.1
qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos
qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
self.set_state(qpos, qvel)
self.target_pos = self.data.site_xpos[0]
return self._get_obs()
def _get_obs(self):
theta = self.sim.data.qpos.flat[:4]
return np.concatenate([
np.cos(theta),
np.sin(theta),
self.sim.data.qpos.flat[4:],
self.sim.data.qvel.flat[:4],
[self.get_body_com("fingertip")[0] - self.target_pos[0]],
[self.get_body_com("fingertip")[2] - self.target_pos[2]]
]).ravel()
class VA6dof(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,xml_file='vertical_arm6dof.xml',
distance_reward_weight=5.0,
ctrl_cost_weight=0.05
):
print(xml_file)
#utils.EzPickle.__init__(self)
utils.EzPickle.__init__(**locals())
self.joint_list = ['shoulder','shoulder2', 'elbow', 'elbow2','elbow3', 'elbow4']
self.real_time=0.01
self.frame_skip=2
self.t=0
self.target_pos = np.asarray([0, 0, 0])
self.distance_reward_weight= distance_reward_weight
self.ctrl_cost_weight= ctrl_cost_weight
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), self.frame_skip)
'''if path is not None:
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), self.frame_skip)
else:
mujoco_env.MujocoEnv.__init__(self, xml_file, self.frame_skip)'''
def step(self, a):
#print(a)
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
vec = self.get_body_com("fingertip")- self.target_pos
reward_dist = - np.linalg.norm(vec)
reward_ctrl =- np.square(a).sum()
reward = self.distance_reward_weight*reward_dist +self.ctrl_cost_weight*reward_ctrl
self.do_simulation(a, self.frame_skip)
self.t += self.frame_skip * self.real_time
self.sim.data.site_xpos[0] = self.sim.data.site_xpos[0] + [-0.15*sin(self.t,phi=0)*np.sin(-25* np.pi / 180.), 0,
0.15*sin(self.t,phi=0)*np.cos(-25* np.pi / 180.)+0.01]#,phi=1
self.target_pos = self.sim.data.site_xpos[0]
ob = self._get_obs()
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(a[i]) * delta_theta
done = False
info = {
'energy': energy,
'reward_dist': self.distance_reward_weight*reward_dist,
'reward_ctrl': self.ctrl_cost_weight*reward_ctrl,
'ori_reward': reward
}
return ob, reward, done, info#,reward_ctrl=reward_ctrl
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
def reset_model(self):
self.t=0
#self.data.site_xpos[0] = [1, 1, 1] -.15 0.01 -.1
qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos
qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
self.set_state(qpos, qvel)
self.target_pos = self.data.site_xpos[0]
return self._get_obs()
def _get_obs(self):
theta = self.sim.data.qpos.flat[:6]
return np.concatenate([
np.cos(theta),
np.sin(theta),
self.sim.data.qpos.flat[6:],
self.sim.data.qvel.flat[:6],
[self.get_body_com("fingertip")[0] - self.target_pos[0]],
[self.get_body_com("fingertip")[2] - self.target_pos[2]]
]).ravel()
class VA8dof(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,xml_file='vertical_arm8dof.xml',
distance_reward_weight=5.0,
ctrl_cost_weight=0.05
):
print(xml_file)
#utils.EzPickle.__init__(self)
utils.EzPickle.__init__(**locals())
self.joint_list = ['shoulder','shoulder2', 'shoulder3','shoulder4','elbow', 'elbow2','elbow3', 'elbow4']
self.real_time=0.01
self.frame_skip=2
self.t=0
self.target_pos = np.asarray([0, 0, 0])
self.distance_reward_weight= distance_reward_weight
self.ctrl_cost_weight= ctrl_cost_weight
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), self.frame_skip)
'''if path is not None:
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), self.frame_skip)
else:
mujoco_env.MujocoEnv.__init__(self, xml_file, self.frame_skip)'''
def step(self, a):
#print(a)
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
vec = self.get_body_com("fingertip")- self.target_pos
reward_dist = - np.linalg.norm(vec)
reward_ctrl =- np.square(a).sum()
reward = self.distance_reward_weight*reward_dist +self.ctrl_cost_weight*reward_ctrl
self.do_simulation(a, self.frame_skip)
self.t += self.frame_skip * self.real_time
self.sim.data.site_xpos[0] = self.sim.data.site_xpos[0] + [-0.15*sin(self.t,phi=0)*np.sin(-25* np.pi / 180.), 0,
0.15*sin(self.t,phi=0)*np.cos(-25* np.pi / 180.)+0.01]#,phi=1
self.target_pos = self.sim.data.site_xpos[0]
ob = self._get_obs()
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(a[i]) * delta_theta
done = False
info = {
'energy': energy,
'reward_dist': self.distance_reward_weight*reward_dist,
'reward_ctrl': self.ctrl_cost_weight*reward_ctrl,
'ori_reward': reward
}
return ob, reward, done, info#,reward_ctrl=reward_ctrl
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
def reset_model(self):
self.t=0
#self.data.site_xpos[0] = [1, 1, 1] -.15 0.01 -.1
qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos
qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
self.set_state(qpos, qvel)
self.target_pos = self.data.site_xpos[0]
return self._get_obs()
def _get_obs(self):
theta = self.sim.data.qpos.flat[:8]
return np.concatenate([
np.cos(theta),
np.sin(theta),
self.sim.data.qpos.flat[8:],
self.sim.data.qvel.flat[:8],
[self.get_body_com("fingertip")[0] - self.target_pos[0]],
[self.get_body_com("fingertip")[2] - self.target_pos[2]]
]).ravel()
# pcx=0.05#0.05#0.32#-0.6
# pcy=0#0.32#-0.2 #-0.2
class RealArm7dof(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self, xml_file='real_arm7dof.xml',distance_reward_weight=5,
shoulder_cost_weight=0,wrist_cost_weight=0,pcx=0.05,pcy=0):
# utils.EzPickle.__init__(self)
utils.EzPickle.__init__(**locals())
self.joint_list = ['s_abduction','s_flexion', 's_rotation','e_flexion','e_pronation', 'w_abduction','w_flexion']
self.pcx=pcx
self.pcy=pcy
self.real_time = 0.01
self.frame_skip = 2 # 2
self.f=0.4
self.t = 0
self.target_pos = np.asarray([0, 0, 0])
self.distance_reward_weight=distance_reward_weight
self.shoulder_cost_weight=shoulder_cost_weight
self.wrist_cost_weight=wrist_cost_weight
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), self.frame_skip)
def step(self, a):
#a=0
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
vec = self.get_body_com("fingertip") - self.target_pos
total_torque = a
reward_dist = - np.linalg.norm(vec)
#reward_ctrl = - np.square(total_torque).sum()
reward_shoulder = - np.square(total_torque[0:3]).sum()
reward_wristrot = - np.square(total_torque[4::]).sum()
reward = self.distance_reward_weight * reward_dist\
+ self.shoulder_cost_weight*reward_shoulder\
+ self.wrist_cost_weight*reward_wristrot
self.do_simulation(total_torque, self.frame_skip)
self.t += self.frame_skip * self.real_time
self.sim.data.site_xpos[0] = self.sim.data.site_xpos[0] + \
[self.pcx + (-0.22 * np.sin(self.t * np.pi * 2 * self.f)),self.pcy
+ (-0.18 * np.cos(self.t * np.pi * 2 * self.f)),0.1]
self.target_pos = self.sim.data.site_xpos[0]
ob = self._get_obs()
next_states_angle = []
for j in self.joint_list:
next_states_angle.append(self.sim.data.get_joint_qpos(j))
energy = 0
for i in range(len(self.joint_list)):
delta_theta = np.abs(next_states_angle[i] - states_angle[i])
energy = energy + np.abs(a[i]) * delta_theta
done = False
info = {
'energy': energy,
'reward_dist': self.distance_reward_weight * reward_dist,
'penalty_shoulder': self.shoulder_cost_weight * reward_shoulder,
'penalty_wrist': self.wrist_cost_weight * reward_wristrot,
'ori_reward': reward
}
return ob, reward, done,info
def viewer_setup(self):
self.viewer.cam.trackbodyid = 0 # id of the body to track ()
self.viewer.cam.distance = self.model.stat.extent * 2.0 # how much you "zoom in", model.stat.extent is the max limits of the arena
self.viewer.cam.lookat[0] += 0 # x,y,z offset from the object (works if trackbodyid=-1)
self.viewer.cam.lookat[1] += 0
self.viewer.cam.lookat[2] += 0.5
self.viewer.cam.elevation = -20 # camera rotation around the axis in the plane going through the frame origin (if 0 you just see a line)
self.viewer.cam.azimuth = 90 # camera rotation around the camera's vertical axis
def reset_model(self):
self.t = 0
#self.init_qpos[1] = -3.142 / 2
self.init_qpos[3] = -3.142 / 2
qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos
qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
self.set_state(qpos, qvel)
self.target_pos = self.data.site_xpos[0]
return self._get_obs()
def _get_obs(self):
theta = self.sim.data.qpos.flat[:7]
return np.concatenate([
np.cos(theta),
np.sin(theta),
self.sim.data.qpos.flat[7:],
self.sim.data.qvel.flat[:7],
[self.get_body_com("fingertip")[0] - self.target_pos[0]],
[self.get_body_com("fingertip")[1] - self.target_pos[1]],
[self.get_body_com("fingertip")[2] - self.target_pos[2]]
])
class RealArm6dof(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self, xml_file='real_arm6dof.xml',distance_reward_weight=5,
shoulder_cost_weight=0,wrist_cost_weight=0,pcx=0.05,pcy=0):
# utils.EzPickle.__init__(self)
utils.EzPickle.__init__(**locals())
self.joint_list = ['s_abduction','s_flexion', 's_rotation','e_flexion', 'w_abduction','w_flexion']
self.pcx = pcx
self.pcy = pcy
self.real_time = 0.01
self.frame_skip = 2 # 2
self.f=0.4
self.t = 0
self.target_pos = np.asarray([0, 0, 0])
self.distance_reward_weight=distance_reward_weight
self.shoulder_cost_weight=shoulder_cost_weight
self.wrist_cost_weight=wrist_cost_weight
global path
mujoco_env.MujocoEnv.__init__(self, os.path.join(path, xml_file), self.frame_skip)
def step(self, a):
#a=0
states_angle = []
for j in self.joint_list:
states_angle.append(self.sim.data.get_joint_qpos(j))
vec = self.get_body_com("fingertip") - self.target_pos
total_torque = | |
#!/usr/bin/env python
# Copyright 2017 Calico LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
from optparse import OptionParser
import copy, os, pdb, random, shutil, subprocess, time
import h5py
import matplotlib
matplotlib.use('PDF')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import spearmanr
import seaborn as sns
from sklearn import preprocessing
import tensorflow as tf
import basenji
'''
basenji_motifs.py
Collect statistics and make plots to explore the first convolution layer
of the given model using the given sequences.
'''
weblogo_opts = '-X NO -Y NO --errorbars NO --fineprint ""'
weblogo_opts += ' -C "#CB2026" A A'
weblogo_opts += ' -C "#34459C" C C'
weblogo_opts += ' -C "#FBB116" G G'
weblogo_opts += ' -C "#0C8040" T T'
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <params_file> <model_file> <data_file>'
parser = OptionParser(usage)
parser.add_option(
'-a',
dest='act_t',
default=0.5,
type='float',
help=
'Activation threshold (as proportion of max) to consider for PWM [Default: %default]'
)
parser.add_option(
'-d',
dest='model_hdf5_file',
default=None,
help='Pre-computed model output as HDF5.')
parser.add_option('-o', dest='out_dir', default='.')
parser.add_option(
'-m',
dest='meme_db',
default='%s/data/motifs/Homo_sapiens.meme' % os.environ['BASENJIDIR'],
help='MEME database used to annotate motifs')
parser.add_option(
'-p',
dest='plot_heats',
default=False,
action='store_true',
help=
'Plot heat maps describing filter activations in the test sequences [Default: %default]'
)
parser.add_option(
'-s',
dest='sample',
default=None,
type='int',
help='Sample sequences from the test set [Default:%default]')
parser.add_option(
'-t',
dest='trim_filters',
default=False,
action='store_true',
help='Trim uninformative positions off the filter ends [Default: %default]'
)
(options, args) = parser.parse_args()
if len(args) != 3:
parser.error(
'Must provide Basenji parameters and model files and test data in HDF5'
' format.'
)
else:
params_file = args[0]
model_file = args[1]
data_file = args[2]
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
#################################################################
# load data
data_open = h5py.File(data_file)
test_seqs1 = data_open['test_in']
test_targets = data_open['test_out']
try:
target_names = list(data_open['target_labels'])
except KeyError:
target_names = ['t%d' % ti for ti in range(test_targets.shape[1])]
if options.sample is not None:
# choose sampled indexes
sample_i = sorted(random.sample(range(test_seqs1.shape[0]), options.sample))
# filter
test_seqs1 = test_seqs1[sample_i]
test_targets = test_targets[sample_i]
# convert to letters
test_seqs = basenji.dna_io.hot1_dna(test_seqs1)
#################################################################
# model parameters and placeholders
job = basenji.dna_io.read_job_params(params_file)
job['seq_length'] = test_seqs1.shape[1]
job['seq_depth'] = test_seqs1.shape[2]
job['num_targets'] = test_targets.shape[2]
job['target_pool'] = int(np.array(data_open.get('pool_width', 1)))
t0 = time.time()
dr = basenji.seqnn.SeqNN()
dr.build(job)
print('Model building time %ds' % (time.time() - t0))
# adjust for fourier
job['fourier'] = 'train_out_imag' in data_open
if job['fourier']:
test_targets_imag = data_open['test_out_imag']
if options.valid:
test_targets_imag = data_open['valid_out_imag']
#################################################################
# predict
# initialize batcher
if job['fourier']:
batcher_test = basenji.batcher.BatcherF(
test_seqs1,
test_targets,
test_targets_imag,
batch_size=dr.batch_size,
pool_width=job['target_pool'])
else:
batcher_test = basenji.batcher.Batcher(
test_seqs1,
test_targets,
batch_size=dr.batch_size,
pool_width=job['target_pool'])
# initialize saver
saver = tf.train.Saver()
with tf.Session() as sess:
# load variables into session
saver.restore(sess, model_file)
# get weights
filter_weights = sess.run(dr.filter_weights[0])
filter_weights = np.transpose(np.squeeze(filter_weights), [2, 1, 0])
print(filter_weights.shape)
# test
t0 = time.time()
layer_filter_outs, _ = dr.hidden(sess, batcher_test, layers=[0])
filter_outs = layer_filter_outs[0]
print(filter_outs.shape)
# store useful variables
num_filters = filter_weights.shape[0]
filter_size = filter_weights.shape[2]
#################################################################
# individual filter plots
#################################################################
# also save information contents
filters_ic = []
meme_out = meme_intro('%s/filters_meme.txt' % options.out_dir, test_seqs)
for f in range(num_filters):
print('Filter %d' % f)
# plot filter parameters as a heatmap
plot_filter_heat(filter_weights[f, :, :],
'%s/filter%d_heat.pdf' % (options.out_dir, f))
# write possum motif file
filter_possum(filter_weights[f, :, :], 'filter%d' % f,
'%s/filter%d_possum.txt' % (options.out_dir,
f), options.trim_filters)
# plot weblogo of high scoring outputs
plot_filter_logo(
filter_outs[:, :, f],
filter_size,
test_seqs,
'%s/filter%d_logo' % (options.out_dir, f),
maxpct_t=options.act_t)
# make a PWM for the filter
filter_pwm, nsites = make_filter_pwm('%s/filter%d_logo.fa' %
(options.out_dir, f))
if nsites < 10:
# no information
filters_ic.append(0)
else:
# compute and save information content
filters_ic.append(info_content(filter_pwm))
# add to the meme motif file
meme_add(meme_out, f, filter_pwm, nsites, options.trim_filters)
meme_out.close()
#################################################################
# annotate filters
#################################################################
# run tomtom
subprocess.call(
'tomtom -dist pearson -thresh 0.1 -oc %s/tomtom %s/filters_meme.txt %s' %
(options.out_dir, options.out_dir, options.meme_db),
shell=True)
# read in annotations
filter_names = name_filters(
num_filters, '%s/tomtom/tomtom.txt' % options.out_dir, options.meme_db)
#################################################################
# print a table of information
#################################################################
table_out = open('%s/table.txt' % options.out_dir, 'w')
# print header for later panda reading
header_cols = ('', 'consensus', 'annotation', 'ic', 'mean', 'std')
print('%3s %19s %10s %5s %6s %6s' % header_cols, file=table_out)
for f in range(num_filters):
# collapse to a consensus motif
consensus = filter_motif(filter_weights[f, :, :])
# grab annotation
annotation = '.'
name_pieces = filter_names[f].split('_')
if len(name_pieces) > 1:
annotation = name_pieces[1]
# plot density of filter output scores
fmean, fstd = plot_score_density(
np.ravel(filter_outs[:, :, f]),
'%s/filter%d_dens.pdf' % (options.out_dir, f))
row_cols = (f, consensus, annotation, filters_ic[f], fmean, fstd)
print('%-3d %19s %10s %5.2f %6.4f %6.4f' % row_cols, file=table_out)
table_out.close()
#################################################################
# global filter plots
#################################################################
if options.plot_heats:
# plot filter-sequence heatmap
plot_filter_seq_heat(filter_outs, '%s/filter_seqs.pdf' % options.out_dir)
# plot filter-segment heatmap
plot_filter_seg_heat(filter_outs, '%s/filter_segs.pdf' % options.out_dir)
plot_filter_seg_heat(
filter_outs, '%s/filter_segs_raw.pdf' % options.out_dir, whiten=False)
# plot filter-target correlation heatmap
plot_target_corr(filter_outs, seq_targets, filter_names, target_names,
'%s/filter_target_cors_mean.pdf' % options.out_dir, 'mean')
plot_target_corr(filter_outs, seq_targets, filter_names, target_names,
'%s/filter_target_cors_max.pdf' % options.out_dir, 'max')
def get_motif_proteins(meme_db_file):
""" Hash motif_id's to protein names using the MEME DB file """
motif_protein = {}
for line in open(meme_db_file):
a = line.split()
if len(a) > 0 and a[0] == 'MOTIF':
if a[2][0] == '(':
motif_protein[a[1]] = a[2][1:a[2].find(')')]
else:
motif_protein[a[1]] = a[2]
return motif_protein
def info_content(pwm, transpose=False, bg_gc=0.415):
""" Compute PWM information content.
In the original analysis, I used a bg_gc=0.5. For any
future analysis, I ought to switch to the true hg19
value of 0.415.
"""
pseudoc = 1e-9
if transpose:
pwm = np.transpose(pwm)
bg_pwm = [1 - bg_gc, bg_gc, bg_gc, 1 - bg_gc]
ic = 0
for i in range(pwm.shape[0]):
for j in range(4):
# ic += 0.5 + pwm[i][j]*np.log2(pseudoc+pwm[i][j])
ic += -bg_pwm[j] * np.log2(
bg_pwm[j]) + pwm[i][j] * np.log2(pseudoc + pwm[i][j])
return ic
def make_filter_pwm(filter_fasta):
""" Make a PWM for this filter from its top hits """
nts = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
pwm_counts = []
nsites = 4 # pseudocounts
for line in open(filter_fasta):
if line[0] != '>':
seq = line.rstrip()
nsites += 1
if len(pwm_counts) == 0:
# initialize with the length
for i in range(len(seq)):
pwm_counts.append(np.array([1.0] * 4))
# count
for i in range(len(seq)):
try:
pwm_counts[i][nts[seq[i]]] += 1
except KeyError:
pwm_counts[i] += np.array([0.25] * 4)
# normalize
pwm_freqs = []
for i in range(len(pwm_counts)):
pwm_freqs.append([pwm_counts[i][j] / float(nsites) for j in range(4)])
return np.array(pwm_freqs), nsites - 4
def meme_add(meme_out, f, filter_pwm, nsites, trim_filters=False):
""" Print a filter to the growing MEME file
Attrs:
meme_out : open file
f (int) : filter index #
filter_pwm (array) : filter PWM array
nsites (int) : number of filter sites
"""
if not trim_filters:
ic_start = 0
ic_end = filter_pwm.shape[0] - 1
else:
ic_t = 0.2
# trim PWM of uninformative prefix
ic_start = 0
while ic_start < filter_pwm.shape[0] and info_content(
filter_pwm[ic_start:ic_start + 1]) < ic_t:
ic_start += 1
# trim PWM of uninformative suffix
ic_end = filter_pwm.shape[0] - 1
while ic_end >= 0 and info_content(filter_pwm[ic_end:ic_end + 1]) < ic_t:
ic_end -= 1
if ic_start < ic_end:
print('MOTIF filter%d' % f, file=meme_out)
print(
'letter-probability matrix: alength= 4 w= %d nsites= %d' %
(ic_end - ic_start + 1, nsites),
file=meme_out)
for i in range(ic_start, ic_end + 1):
print('%.4f %.4f %.4f %.4f' % tuple(filter_pwm[i]), file=meme_out)
print('', file=meme_out)
def meme_intro(meme_file, seqs):
""" Open MEME motif format file and print intro
Attrs:
meme_file (str) : filename
seqs [str] : list of strings for obtaining background freqs
Returns:
mem_out : open MEME file
"""
nts = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
# count
nt_counts = [1] * 4
for i in range(len(seqs)):
for nt in seqs[i]:
try:
nt_counts[nts[nt]] += 1
except KeyError:
pass
# normalize
nt_sum = float(sum(nt_counts))
nt_freqs = [nt_counts[i] / nt_sum for i in range(4)]
# open file for writing
meme_out = open(meme_file, 'w')
# print intro material
print('MEME version 4', file=meme_out)
print('', file=meme_out)
print('ALPHABET= ACGT', | |
"Species_1"
ON "Animal_3".species = "Species_1".uuid
WHERE (
"Animal_1".name LIKE '%%' || %(wanted)s || '%%'
) AND (
"Animal_4".parent IS NOT NULL OR
"Animal_2".uuid IS NULL
) AND (
"Species_1".uuid IS NOT NULL OR
"Animal_3".parent IS NULL
)
"""
check_test_data(
self,
test_data,
expected_match,
expected_gremlin,
expected_mssql,
expected_cypher,
expected_postgresql,
)
def test_optional_traversal_and_optional_without_traversal(self) -> None:
test_data = test_input_data.optional_traversal_and_optional_without_traversal()
expected_match = """
SELECT EXPAND($result)
LET
$optional__0 = (
SELECT
Animal___1.name AS `animal_name`,
if(
eval("(Animal__in_Animal_ParentOf___1 IS NOT null)"),
Animal__in_Animal_ParentOf___1.name,
null
) AS `child_name`
FROM (
MATCH {{
class: Animal,
where: ((
(name LIKE ('%' + ({wanted} + '%')))
AND
(
(out_Animal_ParentOf IS null)
OR
(out_Animal_ParentOf.size() = 0)
)
)),
as: Animal___1
}}.in('Animal_ParentOf') {{
optional: true,
as: Animal__in_Animal_ParentOf___1
}}
RETURN $matches
)
WHERE (
(
(Animal___1.in_Animal_ParentOf IS null)
OR
(Animal___1.in_Animal_ParentOf.size() = 0)
)
OR
(Animal__in_Animal_ParentOf___1 IS NOT null)
)
),
$optional__1 = (
SELECT
Animal___1.name AS `animal_name`,
if(
eval("(Animal__in_Animal_ParentOf___1 IS NOT null)"),
Animal__in_Animal_ParentOf___1.name,
null
) AS `child_name`,
Animal__out_Animal_ParentOf___1.name AS `parent_name`,
Animal__out_Animal_ParentOf__out_Animal_OfSpecies___1.name
AS `parent_species`
FROM (
MATCH {{
class: Animal,
where: ((name LIKE ('%' + ({wanted} + '%')))),
as: Animal___1
}}.in('Animal_ParentOf') {{
optional: true,
as: Animal__in_Animal_ParentOf___1
}} ,
{{
class: Animal,
as: Animal___1
}}.out('Animal_ParentOf') {{
as: Animal__out_Animal_ParentOf___1
}}.out('Animal_OfSpecies') {{
as: Animal__out_Animal_ParentOf__out_Animal_OfSpecies___1
}}
RETURN $matches
)
WHERE (
(
(Animal___1.in_Animal_ParentOf IS null)
OR
(Animal___1.in_Animal_ParentOf.size() = 0)
)
OR
(Animal__in_Animal_ParentOf___1 IS NOT null)
)
),
$result = UNIONALL($optional__0, $optional__1)
"""
expected_gremlin = """
g.V('@class', 'Animal')
.filter{it, m -> it.name.contains($wanted)}
.as('Animal___1')
.ifThenElse{it.in_Animal_ParentOf == null}{null}{it.in('Animal_ParentOf')}
.as('Animal__in_Animal_ParentOf___1')
.optional('Animal___1')
.as('Animal___2')
.ifThenElse{it.out_Animal_ParentOf == null}{null}{it.out('Animal_ParentOf')}
.as('Animal__out_Animal_ParentOf___1')
.ifThenElse{it == null}{null}{it.out('Animal_OfSpecies')}
.as('Animal__out_Animal_ParentOf__out_Animal_OfSpecies___1')
.back('Animal__out_Animal_ParentOf___1')
.optional('Animal___2')
.as('Animal___3')
.transform{it, m -> new com.orientechnologies.orient.core.record.impl.ODocument([
animal_name: m.Animal___1.name,
child_name: (
(m.Animal__in_Animal_ParentOf___1 != null) ?
m.Animal__in_Animal_ParentOf___1.name : null
),
parent_name: (
(m.Animal__out_Animal_ParentOf___1 != null) ?
m.Animal__out_Animal_ParentOf___1.name : null
),
parent_species: (
(m.Animal__out_Animal_ParentOf__out_Animal_OfSpecies___1 != null) ?
m.Animal__out_Animal_ParentOf__out_Animal_OfSpecies___1.name : null
)
])}
"""
expected_mssql = """
SELECT
[Animal_1].name AS animal_name,
[Animal_2].name AS child_name,
[Animal_3].name AS parent_name,
[Species_1].name AS parent_species
FROM
db_1.schema_1.[Animal] AS [Animal_1]
LEFT OUTER JOIN db_1.schema_1.[Animal] AS [Animal_2]
ON [Animal_1].parent = [Animal_2].uuid
LEFT OUTER JOIN db_1.schema_1.[Animal] AS [Animal_3]
ON [Animal_1].uuid = [Animal_3].parent
LEFT OUTER JOIN db_1.schema_1.[Species] AS [Species_1]
ON [Animal_3].species = [Species_1].uuid
WHERE (
[Animal_1].name LIKE '%' + :wanted + '%'
) AND (
[Species_1].uuid IS NOT NULL OR
[Animal_3].parent IS NULL
)
"""
expected_cypher = SKIP_TEST
expected_postgresql = """
SELECT
"Animal_1".name AS animal_name,
"Animal_2".name AS child_name,
"Animal_3".name AS parent_name,
"Species_1".name AS parent_species
FROM
schema_1."Animal" AS "Animal_1"
LEFT OUTER JOIN schema_1."Animal" AS "Animal_2"
ON "Animal_1".parent = "Animal_2".uuid
LEFT OUTER JOIN schema_1."Animal" AS "Animal_3"
ON "Animal_1".uuid = "Animal_3".parent
LEFT OUTER JOIN schema_1."Species" AS "Species_1"
ON "Animal_3".species = "Species_1".uuid
WHERE (
"Animal_1".name LIKE '%%' || %(wanted)s || '%%'
) AND (
"Species_1".uuid IS NOT NULL OR
"Animal_3".parent IS NULL
)
"""
check_test_data(
self,
test_data,
expected_match,
expected_gremlin,
expected_mssql,
expected_cypher,
expected_postgresql,
)
def test_coercion_on_interface_within_optional_traversal(self) -> None:
test_data = test_input_data.coercion_on_interface_within_optional_traversal()
expected_match = """
SELECT EXPAND($result)
LET
$optional__0 = (
SELECT
Animal___1.name AS `animal_name`
FROM (
MATCH {{
class: Animal,
where: ((
(in_Animal_ParentOf IS null)
OR
(in_Animal_ParentOf.size() = 0)
)),
as: Animal___1
}}
RETURN $matches
)
),
$optional__1 = (
SELECT
Animal___1.name AS `animal_name`,
Animal__in_Animal_ParentOf__out_Entity_Related__out_Animal_OfSpecies___1.name
AS `related_animal_species`
FROM (
MATCH {{
class: Animal,
as: Animal___1
}}.in('Animal_ParentOf') {{
class: Animal,
as: Animal__in_Animal_ParentOf___1
}}.out('Entity_Related') {{
class: Animal,
as: Animal__in_Animal_ParentOf__out_Entity_Related___1
}}.out('Animal_OfSpecies') {{
class: Species,
as: Animal__in_Animal_ParentOf__out_Entity_Related
__out_Animal_OfSpecies___1
}}
RETURN $matches
)
),
$result = UNIONALL($optional__0, $optional__1)
"""
expected_gremlin = """
g.V('@class', 'Animal')
.as('Animal___1')
.ifThenElse{it.in_Animal_ParentOf == null}{null}{it.in('Animal_ParentOf')}
.as('Animal__in_Animal_ParentOf___1')
.ifThenElse{it == null}{null}{it.out('Entity_Related')}
.filter{it, m -> ((it == null) || ['Animal'].contains(it['@class']))}
.as('Animal__in_Animal_ParentOf__out_Entity_Related___1')
.ifThenElse{it == null}{null}{it.out('Animal_OfSpecies')}
.as('Animal__in_Animal_ParentOf__out_Entity_Related
__out_Animal_OfSpecies___1')
.back('Animal__in_Animal_ParentOf__out_Entity_Related___1')
.back('Animal__in_Animal_ParentOf___1')
.optional('Animal___1')
.as('Animal___2')
.transform{it, m -> new com.orientechnologies.orient.core.record.impl.ODocument([
animal_name: m.Animal___1.name,
related_animal_species: (
(m.Animal__in_Animal_ParentOf__out_Entity_Related
__out_Animal_OfSpecies___1 != null) ?
m.Animal__in_Animal_ParentOf__out_Entity_Related
__out_Animal_OfSpecies___1.name : null
)
])}
"""
expected_sql = NotImplementedError
expected_cypher = SKIP_TEST
check_test_data(
self,
test_data,
expected_match,
expected_gremlin,
expected_sql,
expected_cypher,
expected_sql,
)
def test_filter_on_optional_traversal_equality(self) -> None:
test_data = test_input_data.filter_on_optional_traversal_equality()
expected_match = """
SELECT EXPAND($result)
LET
$optional__0 = (
SELECT
Animal___1.name AS `animal_name`
FROM (
MATCH {{
where: ((@this INSTANCEOF 'Animal')),
as: Animal___1
}}.out('Animal_ParentOf') {{
class: Animal,
where: ((
(out_Animal_ParentOf IS null)
OR
(out_Animal_ParentOf.size() = 0)
)),
as: Animal__out_Animal_ParentOf___1
}} ,
{{
as: Animal___1
}}.out('Animal_FedAt') {{
as: Animal__out_Animal_FedAt___1
}}
RETURN $matches
)
),
$optional__1 = (
SELECT
Animal___1.name AS `animal_name`
FROM (
MATCH {{
class: Animal,
as: Animal___1
}}.out('Animal_ParentOf') {{
class: Animal,
as: Animal__out_Animal_ParentOf___1
}}.out('Animal_ParentOf') {{
class: Animal,
as: Animal__out_Animal_ParentOf__out_Animal_ParentOf___1
}}.out('Animal_FedAt') {{
class: FeedingEvent,
as: Animal__out_Animal_ParentOf__out_Animal_ParentOf
__out_Animal_FedAt___1
}} ,
{{
class: Animal,
as: Animal___1
}}.out('Animal_FedAt') {{
where: ((
($matched.Animal__out_Animal_ParentOf
__out_Animal_ParentOf__out_Animal_FedAt___1 IS null)
OR
(name = $matched.Animal__out_Animal_ParentOf
__out_Animal_ParentOf__out_Animal_FedAt___1.name)
)),
as: Animal__out_Animal_FedAt___1
}}
RETURN $matches
)
),
$result = UNIONALL($optional__0, $optional__1)
"""
expected_gremlin = """
g.V('@class',
'Animal')
.as('Animal___1')
.out('Animal_ParentOf')
.as('Animal__out_Animal_ParentOf___1')
.ifThenElse{it.out_Animal_ParentOf == null}{null}{it.out('Animal_ParentOf')}
.as('Animal__out_Animal_ParentOf__out_Animal_ParentOf___1')
.ifThenElse{it == null}{null}{it.out('Animal_FedAt')}
.as('Animal__out_Animal_ParentOf__out_Animal_ParentOf
__out_Animal_FedAt___1')
.back('Animal__out_Animal_ParentOf__out_Animal_ParentOf___1')
.optional('Animal__out_Animal_ParentOf___1')
.as('Animal__out_Animal_ParentOf___2')
.back('Animal___1')
.out('Animal_FedAt')
.filter{it, m -> (
(m.Animal__out_Animal_ParentOf__out_Animal_ParentOf
__out_Animal_FedAt___1 == null)
||
(it.name == m.Animal__out_Animal_ParentOf__out_Animal_ParentOf
__out_Animal_FedAt___1.name)
)
}
.as('Animal__out_Animal_FedAt___1')
.transform{it, m -> new com.orientechnologies.orient.core.record.impl.ODocument([
animal_name: m.Animal___1.name
])}
"""
expected_sql = NotImplementedError
expected_cypher = SKIP_TEST
check_test_data(
self,
test_data,
expected_match,
expected_gremlin,
expected_sql,
expected_cypher,
expected_sql,
)
def test_filter_on_optional_traversal_name_or_alias(self) -> None:
test_data = test_input_data.filter_on_optional_traversal_name_or_alias()
expected_match = """
SELECT EXPAND($result)
LET
$optional__0 = (
SELECT
Animal__out_Animal_ParentOf___1.name AS `parent_name`
FROM (
MATCH {{
class: Animal,
where: ((
(in_Animal_ParentOf IS null)
OR
(in_Animal_ParentOf.size() = 0)
)),
as: Animal___1
}} ,
{{
class: Animal,
as: Animal___1
}}.out('Animal_ParentOf') {{
as: Animal__out_Animal_ParentOf___1
}}
RETURN $matches
)
),
$optional__1 = (
SELECT
Animal__out_Animal_ParentOf___1.name AS `parent_name`
FROM (
MATCH {{
class: Animal,
as: Animal___1
}}.in('Animal_ParentOf') {{
class: Animal,
as: Animal__in_Animal_ParentOf___1
}}.in('Animal_ParentOf') {{
class: Animal,
as: Animal__in_Animal_ParentOf__in_Animal_ParentOf___1
}} ,
{{
class: Animal,
as: Animal___1
}}.out('Animal_ParentOf') {{
where: ((
($matched.Animal__in_Animal_ParentOf
__in_Animal_ParentOf___1 IS null)
OR
(
(name = $matched.Animal__in_Animal_ParentOf
__in_Animal_ParentOf___1.name)
OR
(alias CONTAINS $matched.Animal__in_Animal_ParentOf
__in_Animal_ParentOf___1.name)
)
)),
as: Animal__out_Animal_ParentOf___1
}}
RETURN $matches
)
),
$result = UNIONALL($optional__0, $optional__1)
"""
expected_gremlin = """
g.V('@class',
'Animal')
.as('Animal___1')
.ifThenElse{it.in_Animal_ParentOf == null}{null}{it.in('Animal_ParentOf')}
.as('Animal__in_Animal_ParentOf___1')
.ifThenElse{it == null}{null}{it.in('Animal_ParentOf')}
.as('Animal__in_Animal_ParentOf__in_Animal_ParentOf___1')
.back('Animal__in_Animal_ParentOf___1')
.optional('Animal___1')
.as('Animal___2')
.out('Animal_ParentOf')
.filter{it, m -> (
(m.Animal__in_Animal_ParentOf__in_Animal_ParentOf___1 == null)
|| (
(it.name == m.Animal__in_Animal_ParentOf__in_Animal_ParentOf___1.name)
||
it.alias.contains(m.Animal__in_Animal_ParentOf
__in_Animal_ParentOf___1.name)
)
)}
.as('Animal__out_Animal_ParentOf___1')
.transform{it, m -> new com.orientechnologies.orient.core.record.impl.ODocument([
parent_name: m.Animal__out_Animal_ParentOf___1.name
])}
"""
expected_sql = NotImplementedError
expected_cypher = SKIP_TEST
check_test_data(
self,
test_data,
expected_match,
expected_gremlin,
expected_sql,
expected_cypher,
expected_sql,
)
def test_complex_optional_traversal_variables(self) -> None:
test_data = test_input_data.complex_optional_traversal_variables()
expected_match = """
SELECT EXPAND($result)
LET
$optional__0 = (
SELECT
Animal__in_Animal_ParentOf__out_Animal_FedAt___1
.event_date.format("yyyy-MM-dd'T'HH:mm:ss") AS `grandchild_fed_at`,
if(eval("(Animal__out_Animal_ParentOf__out_Animal_FedAt___1 IS NOT null)"),
Animal__out_Animal_ParentOf__out_Animal_FedAt___1
.event_date.format("yyyy-MM-dd'T'HH:mm:ss"),
null
) AS `parent_fed_at`
FROM (
MATCH {{
class: Animal,
where: ((name = {animal_name})),
as: Animal___1
}}.out('Animal_ParentOf') {{
class: Animal,
where: ((
(in_Animal_ParentOf IS null)
OR
(in_Animal_ParentOf.size() = 0)
)),
as: Animal__out_Animal_ParentOf___1
}}.out('Animal_FedAt') {{
optional: true,
as: Animal__out_Animal_ParentOf__out_Animal_FedAt___1
}},
{{
class: Animal,
as: Animal___1
}}.in('Animal_ParentOf') {{
as: Animal__in_Animal_ParentOf___1
}}.out('Animal_FedAt') {{
where: ((
(
($matched.Animal__out_Animal_ParentOf
__out_Animal_FedAt___1 IS null)
OR
(name = $matched.Animal__out_Animal_ParentOf
__out_Animal_FedAt___1.name)
)
AND
(
($matched.Animal__out_Animal_ParentOf
__out_Animal_FedAt___1 IS null)
OR
(event_date <= $matched.Animal__out_Animal_ParentOf
__out_Animal_FedAt___1.event_date)
)
)),
as: Animal__in_Animal_ParentOf__out_Animal_FedAt___1
}}
RETURN $matches
)
WHERE (
(
(Animal__out_Animal_ParentOf___1.out_Animal_FedAt IS null)
OR
(Animal__out_Animal_ParentOf___1.out_Animal_FedAt.size() = 0)
)
OR
(Animal__out_Animal_ParentOf__out_Animal_FedAt___1 IS NOT null)
)
),
$optional__1 = (
SELECT
Animal__in_Animal_ParentOf__out_Animal_FedAt___1
.event_date.format("yyyy-MM-dd'T'HH:mm:ss") AS `grandchild_fed_at`,
Animal__out_Animal_ParentOf__in_Animal_ParentOf__out_Animal_FedAt___1
.event_date.format("yyyy-MM-dd'T'HH:mm:ss") AS `other_child_fed_at`,
if(eval("(Animal__out_Animal_ParentOf__out_Animal_FedAt___1 IS NOT null)"),
Animal__out_Animal_ParentOf__out_Animal_FedAt___1
.event_date.format("yyyy-MM-dd'T'HH:mm:ss"),
null
) AS `parent_fed_at`
FROM (
MATCH {{
class: Animal,
where: ((name = {animal_name})),
as: Animal___1
}}.out('Animal_ParentOf') {{
as: Animal__out_Animal_ParentOf___1
}}.out('Animal_FedAt') {{
optional: true,
as: Animal__out_Animal_ParentOf__out_Animal_FedAt___1
}} ,
{{
where: ((@this INSTANCEOF 'Animal')),
as: Animal__out_Animal_ParentOf___1
}}.in('Animal_ParentOf') {{
as: Animal__out_Animal_ParentOf__in_Animal_ParentOf___1
}}.out('Animal_FedAt') {{
as: Animal__out_Animal_ParentOf__in_Animal_ParentOf
__out_Animal_FedAt___1
}} ,
{{
class: Animal,
as: Animal___1
}}.in('Animal_ParentOf') {{
as: Animal__in_Animal_ParentOf___1
}}.out('Animal_FedAt') {{
where: ((
(
($matched.Animal__out_Animal_ParentOf
__out_Animal_FedAt___1 IS null)
OR
(name = $matched.Animal__out_Animal_ParentOf
__out_Animal_FedAt___1.name)
)
AND
(
(
($matched.Animal__out_Animal_ParentOf
__in_Animal_ParentOf__out_Animal_FedAt___1 IS null)
OR
(event_date >= $matched.Animal__out_Animal_ParentOf
__in_Animal_ParentOf__out_Animal_FedAt___1.event_date)
)
AND
(
($matched.Animal__out_Animal_ParentOf
__out_Animal_FedAt___1 IS null)
OR
(event_date <= $matched.Animal__out_Animal_ParentOf
__out_Animal_FedAt___1.event_date)
)
)
)),
as: Animal__in_Animal_ParentOf__out_Animal_FedAt___1
}}
RETURN $matches
)
WHERE (
(
(Animal__out_Animal_ParentOf___1.out_Animal_FedAt IS null)
OR
(Animal__out_Animal_ParentOf___1.out_Animal_FedAt.size() = 0)
)
OR
(Animal__out_Animal_ParentOf__out_Animal_FedAt___1 IS NOT null)
)
),
$result = UNIONALL($optional__0, $optional__1)
"""
expected_gremlin = """
g.V('@class',
'Animal')
.filter{it, m -> (it.name == $animal_name)}
.as('Animal___1')
.out('Animal_ParentOf')
.as('Animal__out_Animal_ParentOf___1')
.ifThenElse{it.out_Animal_FedAt == null}{null}{it.out('Animal_FedAt')}
.as('Animal__out_Animal_ParentOf__out_Animal_FedAt___1')
.optional('Animal__out_Animal_ParentOf___1')
.as('Animal__out_Animal_ParentOf___2')
.ifThenElse{it.in_Animal_ParentOf == null}{null}{it.in('Animal_ParentOf')}
.as('Animal__out_Animal_ParentOf__in_Animal_ParentOf___1')
.ifThenElse{it == null}{null}{it.out('Animal_FedAt')}
.as('Animal__out_Animal_ParentOf__in_Animal_ParentOf__out_Animal_FedAt___1')
.back('Animal__out_Animal_ParentOf__in_Animal_ParentOf___1')
.optional('Animal__out_Animal_ParentOf___2')
.as('Animal__out_Animal_ParentOf___3')
.back('Animal___1')
.in('Animal_ParentOf')
.as('Animal__in_Animal_ParentOf___1')
.out('Animal_FedAt')
.filter{it, m -> (
(
| |
"""
Connections
===========
.. Copyright:
Copyright 2019 Wirepas Ltd under Apache License, Version 2.0.
See file LICENSE for full license details.
"""
# pylint: disable=locally-disabled, too-many-lines
try:
import MySQLdb
except ImportError:
print("Failed to import MySQLdb")
pass
import logging
import json
class MySQL(object):
"""
MySQL connection handler
"""
def __init__(
self,
username: str,
password: str,
hostname: str,
database: str,
port: int,
connection_timeout: int,
logger: logging.Logger = None,
):
super(MySQL, self).__init__()
self.logger = logger or logging.getLogger(__name__)
self.hostname = hostname
self.username = username
self.password = password
self.database_name = database
self.database = None
self.port = port
self.cursor = None
self.connection_timeout = connection_timeout
def connect(self, table_creation=True) -> None:
""" Establishes a connection and service loop. """
# pylint: disable=locally-disabled, protected-access
self.logger.info(
"MySQL connection to %s:%s@%s:%s",
self.username,
self.password,
self.hostname,
self.port,
)
self.database = MySQLdb.connect(
host=self.hostname,
user=self.username,
passwd=self.password,
port=self.port,
connect_timeout=self.connection_timeout,
)
self.cursor = self.database.cursor()
self.cursor.execute("SHOW DATABASES")
try:
self.cursor.execute(
"CREATE DATABASE {}".format(self.database_name)
)
except MySQLdb._exceptions.ProgrammingError as error_message:
if error_message.args[0] != 1007:
self.logger.error(
"Could not create database %s", self.database_name
)
raise
self.cursor.execute("USE {}".format(self.database_name))
if table_creation:
self.create_tables()
def close(self: "MySQL") -> None:
""" Handles disconnect from database object """
self.cursor.close()
self.database.close()
def create_tables(self):
"""
Create tables if they do not exist
"""
# pylint: disable=locally-disabled, too-many-statements
query = (
"CREATE TABLE IF NOT EXISTS known_nodes ("
" network_address BIGINT UNSIGNED NOT NULL,"
" node_address INT UNSIGNED NOT NULL,"
" last_time TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),"
" voltage DOUBLE NULL,"
" node_role SMALLINT UNSIGNED NULL,"
" firmware_version INT UNSIGNED NULL,"
" scratchpad_seq INT UNSIGNED NULL,"
" hw_magic INT UNSIGNED NULL,"
" stack_profile INT UNSIGNED NULL,"
" boot_count INT UNSIGNED NULL,"
" file_line_num INT UNSIGNED NULL,"
" file_name_hash INT UNSIGNED NULL,"
" UNIQUE INDEX node (network_address, node_address)"
") ENGINE = InnoDB;"
)
self.cursor.execute(query)
query = (
"CREATE TABLE IF NOT EXISTS received_packets ("
" id BIGINT NOT NULL AUTO_INCREMENT UNIQUE,"
" logged_time TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),"
" launch_time TIMESTAMP(6) NULL,"
" path_delay_ms BIGINT UNSIGNED NOT NULL,"
" network_address BIGINT UNSIGNED NOT NULL,"
" sink_address INT UNSIGNED NOT NULL,"
" source_address INT UNSIGNED NOT NULL,"
" dest_address INT UNSIGNED NOT NULL,"
" source_endpoint SMALLINT UNSIGNED NOT NULL,"
" dest_endpoint SMALLINT UNSIGNED NOT NULL,"
" qos SMALLINT UNSIGNED NOT NULL,"
" num_bytes SMALLINT UNSIGNED NOT NULL,"
" hop_count SMALLINT UNSIGNED DEFAULT NULL,"
" PRIMARY KEY (id),"
" INDEX (logged_time),"
" INDEX (launch_time),"
" INDEX (source_address),"
" INDEX packets_from_node (network_address, source_address)"
") ENGINE = InnoDB;"
)
self.cursor.execute(query)
# See if we need to expand the old received_packets table with
# the hop_count column.
query = "SHOW COLUMNS FROM received_packets;"
self.cursor.execute(query)
self.database.commit()
values = self.cursor.fetchall()
column_names = map(lambda x: x[0], values)
if "hop_count" not in column_names:
# hop_count was not in the table so add it.
query = (
"ALTER TABLE received_packets\n"
"ADD COLUMN hop_count SMALLINT UNSIGNED DEFAULT NULL;"
)
self.cursor.execute(query)
self.database.commit()
query = (
"CREATE TABLE IF NOT EXISTS diagnostics_json ("
" received_packet BIGINT NOT NULL,"
" FOREIGN KEY (received_packet) REFERENCES received_packets(id),"
" apdu JSON NOT NULL"
") ENGINE = InnoDB;"
)
self.cursor.execute(query)
createtable = (
"CREATE TABLE IF NOT EXISTS advertiser_json ("
" received_packet BIGINT NOT NULL,"
" FOREIGN KEY (received_packet) REFERENCES received_packets(id),"
" apdu JSON NOT NULL"
") ENGINE = InnoDB;"
)
self.cursor.execute(createtable)
query = "SHOW COLUMNS FROM advertiser_json;"
self.cursor.execute(query)
self.database.commit()
values = self.cursor.fetchall()
column_names = map(lambda x: x[0], values)
if "received_packet" not in column_names:
# hop_count was not in the table so add it.
query = (
"ALTER TABLE advertiser_json\n"
"ADD COLUMN received_packet BIGINT NOT NULL;"
)
self.cursor.execute(query)
self.database.commit()
# Create test nw app database
default_test_ids = 10
default_column_count = 30
for test_data_id in range(1, default_test_ids):
table_name = f"TestData_ID_{test_data_id}"
query = """
CREATE TABLE IF NOT EXISTS `{}` (
received_packet BIGINT NOT NULL,
`logged_time` DOUBLE DEFAULT NULL,
`launch_time` DOUBLE DEFAULT NULL,
`ID_ctrl` INT UNSIGNED DEFAULT NULL,
`field_count` int DEFAULT 0,
""".format(
table_name
)
for i in range(1, default_column_count + 1):
query += "`DataCol_{}` INT UNSIGNED DEFAULT NULL,".format(i)
query += "INDEX (logged_time),"
query += "INDEX (launch_time),"
query += "INDEX (ID_ctrl),"
query += (
"FOREIGN KEY (received_packet) REFERENCES received_packets(id)"
)
query += ") ENGINE=InnoDB;"
self.cursor.execute(query)
self.database.commit()
query = (
"CREATE TABLE IF NOT EXISTS diagnostic_traffic ("
" received_packet BIGINT NOT NULL,"
" access_cycles INT UNSIGNED NOT NULL,"
" cluster_channel SMALLINT UNSIGNED NOT NULL,"
" channel_reliability SMALLINT UNSIGNED NOT NULL,"
" rx_count INT UNSIGNED NOT NULL,"
" tx_count INT UNSIGNED NOT NULL,"
" aloha_rxs SMALLINT UNSIGNED NOT NULL,"
" resv_rx_ok SMALLINT UNSIGNED NOT NULL,"
" data_rxs SMALLINT UNSIGNED NOT NULL,"
" dup_rxs SMALLINT UNSIGNED NOT NULL,"
" cca_ratio SMALLINT UNSIGNED NOT NULL,"
" bcast_ratio SMALLINT UNSIGNED NOT NULL,"
" tx_unicast_fail SMALLINT UNSIGNED NOT NULL,"
" resv_usage_max SMALLINT UNSIGNED NOT NULL,"
" resv_usage_avg SMALLINT UNSIGNED NOT NULL,"
" aloha_usage_max SMALLINT UNSIGNED NOT NULL,"
" FOREIGN KEY (received_packet) REFERENCES received_packets(id)"
") ENGINE = InnoDB;"
)
self.cursor.execute(query)
# See if we need to expand the old diagnostic_traffic table with
# the cluster_members and/or cluster_headnode_members column.
query = "SHOW COLUMNS FROM diagnostic_traffic;"
self.cursor.execute(query)
self.database.commit()
values = self.cursor.fetchall()
column_names = map(lambda x: x[0], values)
if "cluster_members" not in column_names:
# cluster_members was not in the table so add it.
query = (
"ALTER TABLE diagnostic_traffic\n"
"ADD COLUMN cluster_members SMALLINT UNSIGNED DEFAULT NULL;"
)
self.cursor.execute(query)
self.database.commit()
if "cluster_headnode_members" not in column_names:
# cluster_headnode_members was not in the table so add it.
query = (
"ALTER TABLE diagnostic_traffic\n"
"ADD COLUMN cluster_headnode_members SMALLINT UNSIGNED DEFAULT NULL;"
)
self.cursor.execute(query)
self.database.commit()
query = (
"CREATE TABLE IF NOT EXISTS diagnostic_neighbor ("
" received_packet BIGINT NOT NULL,"
" node_address INT UNSIGNED NOT NULL,"
" cluster_channel SMALLINT UNSIGNED NOT NULL,"
" radio_power SMALLINT UNSIGNED NOT NULL,"
" device_info SMALLINT UNSIGNED NOT NULL,"
" norm_rssi SMALLINT UNSIGNED NOT NULL,"
" FOREIGN KEY (received_packet) REFERENCES received_packets(id)"
") ENGINE = InnoDB;"
)
self.cursor.execute(query)
query = (
"CREATE TABLE IF NOT EXISTS diagnostic_node ("
" received_packet BIGINT NOT NULL,"
" access_cycle_ms INT UNSIGNED NOT NULL,"
" node_role SMALLINT UNSIGNED NOT NULL,"
" voltage DOUBLE NOT NULL,"
" buf_usage_max SMALLINT UNSIGNED NOT NULL,"
" buf_usage_avg SMALLINT UNSIGNED NOT NULL,"
" mem_alloc_fails SMALLINT UNSIGNED NOT NULL,"
" tc0_delay SMALLINT UNSIGNED NOT NULL,"
" tc1_delay SMALLINT UNSIGNED NOT NULL,"
" network_scans SMALLINT UNSIGNED NOT NULL,"
" downlink_delay_avg_0 INT UNSIGNED NOT NULL,"
" downlink_delay_min_0 INT UNSIGNED NOT NULL,"
" downlink_delay_max_0 INT UNSIGNED NOT NULL,"
" downlink_delay_samples_0 INT UNSIGNED NOT NULL,"
" downlink_delay_avg_1 INT UNSIGNED NOT NULL,"
" downlink_delay_min_1 INT UNSIGNED NOT NULL,"
" downlink_delay_max_1 INT UNSIGNED NOT NULL,"
" downlink_delay_samples_1 INT UNSIGNED NOT NULL,"
" dropped_packets_0 SMALLINT UNSIGNED NOT NULL,"
" dropped_packets_1 SMALLINT UNSIGNED NOT NULL,"
" route_address INT UNSIGNED NOT NULL,"
" next_hop_address_0 INT UNSIGNED NOT NULL,"
" cost_0 SMALLINT UNSIGNED NOT NULL,"
" quality_0 SMALLINT UNSIGNED NOT NULL,"
" next_hop_address_1 INT UNSIGNED NOT NULL,"
" cost_1 SMALLINT UNSIGNED NOT NULL,"
" quality_1 SMALLINT UNSIGNED NOT NULL,"
" FOREIGN KEY (received_packet) REFERENCES received_packets(id)"
") ENGINE = InnoDB;"
)
self.cursor.execute(query)
self.update_diagnostic_node_table_4_0()
self.update_diagnostic_node_table_4_2()
query = (
"CREATE TABLE IF NOT EXISTS diagnostic_event ("
" received_packet BIGINT NOT NULL,"
" position SMALLINT NOT NULL,"
" event SMALLINT NOT NULL,"
" FOREIGN KEY (received_packet) REFERENCES received_packets(id),"
" UNIQUE INDEX event_id (received_packet, position)"
") ENGINE = InnoDB;"
)
self.cursor.execute(query)
query = (
"CREATE TABLE IF NOT EXISTS diagnostic_boot ("
" received_packet BIGINT NOT NULL,"
" boot_count INT UNSIGNED NOT NULL,"
" node_role SMALLINT UNSIGNED NOT NULL,"
" firmware_version INT UNSIGNED NOT NULL,"
" scratchpad_seq INT UNSIGNED NOT NULL,"
" hw_magic INT UNSIGNED NOT NULL,"
" stack_profile INT UNSIGNED NOT NULL,"
" otap_enabled BOOL NOT NULL,"
" file_line_num INT UNSIGNED NOT NULL,"
" file_name_hash INT UNSIGNED NOT NULL,"
" stack_trace_0 INT UNSIGNED NOT NULL,"
" stack_trace_1 INT UNSIGNED NOT NULL,"
" stack_trace_2 INT UNSIGNED NOT NULL,"
" current_seq INT UNSIGNED DEFAULT NULL,"
" FOREIGN KEY (received_packet) REFERENCES received_packets(id)"
") ENGINE = InnoDB;"
)
self.cursor.execute(query)
# See if we need to expand the old diagnostic_boot table with
# the current_seq.
query = "SHOW COLUMNS FROM diagnostic_boot;"
self.cursor.execute(query)
| |
i_save = 0
n = N_BASE_STATES
n1 = self.gyro_model.n_states
n2 = self.accel_model.n_states
if self.gyro_model.scale is not None:
gyro = theta / self.dt
gyro = np.vstack((gyro, 2 * gyro[-1] - gyro[-2]))
else:
gyro = None
if self.accel_model.scale is not None:
accel = dv / self.dt
accel = np.vstack((accel, 2 * accel[-1] - accel[-2]))
else:
accel = None
H_gyro = np.atleast_2d(self.gyro_model.output_matrix(gyro))
H_accel = np.atleast_2d(self.accel_model.output_matrix(accel))
F = np.zeros((self.n_states, self.n_states))
F[n: n + n1, n: n + n1] = self.gyro_model.F
F[n + n1:n + n1 + n2, n + n1: n + n1 + n2] = self.accel_model.F
F1 = F
F2 = F.copy()
s = 0
s1 = self.gyro_model.n_noises
s2 = self.accel_model.n_noises
if self.gyro_model.noise is not None:
s += 3
if self.accel_model.noise is not None:
s += 3
G = np.zeros((self.n_states, self.n_noises))
G[n: n + n1, s: s + s1] = self.gyro_model.G
G[n + n1: n + n1 + n2, s + s1: s + s1 + s2] = self.accel_model.G
G1 = G
G2 = G.copy()
obs_stamps = [[] for _ in range(len(observations))]
obs_residuals = [[] for _ in range(len(observations))]
n_readings = theta.shape[0]
while i_reading < n_readings:
theta_b = theta[i_reading: i_reading + feedback_period]
dv_b = dv[i_reading: i_reading + feedback_period]
traj_b = integrator.integrate(theta_b, dv_b)
Fi, Fig, Fia = _error_model_matrices(traj_b)
i = 0
while i < theta_b.shape[0]:
stamp = stamps[i_stamp]
stamp_next = stamps[i_stamp + 1]
delta_i = stamp_next - stamp
i_next = i + delta_i
if data_for_backward and record_stamps[i_save] == stamp:
xa[i_save] = xc
Pa[i_save] = Pc
for i_obs, obs in enumerate(observations):
ret = obs.compute_obs(stamp, traj_b.iloc[i])
if ret is not None:
z, H, R = ret
H_max[:H.shape[0], :N_BASE_STATES] = H
res = _kalman_correct(xc, Pc, z,
H_max[:H.shape[0]], R,
gain_factor, obs.gain_curve)
obs_stamps[i_obs].append(stamp)
obs_residuals[i_obs].append(res)
if record_stamps[i_save] == stamp:
x[i_save] = xc
P[i_save] = Pc
i_save += 1
dt = self.dt * delta_i
F1[:n, :n] = Fi[i]
F2[:n, :n] = Fi[i_next]
if H_gyro.ndim == 2:
H_gyro_i = H_gyro
H_gyro_i_next = H_gyro
else:
H_gyro_i = H_gyro[stamp - start]
H_gyro_i_next = H_gyro[stamp_next - start]
if H_accel.ndim == 2:
H_accel_i = H_accel
H_accel_i_next = H_accel
else:
H_accel_i = H_accel[stamp - start]
H_accel_i_next = H_accel[stamp_next - start]
F1[:n, n: n + n1] = Fig[i].dot(H_gyro_i)
F2[:n, n: n + n1] = Fig[i_next].dot(H_gyro_i_next)
F1[:n, n + n1: n + n1 + n2] = Fia[i].dot(H_accel_i)
F2[:n, n + n1: n + n1 + n2] = Fia[i_next].dot(H_accel_i_next)
s = 0
if self.gyro_model.noise is not None:
G1[:n, :3] = Fig[i]
G2[:n, :3] = Fig[i_next]
s += 3
if self.accel_model.noise is not None:
G1[:n, s: s + 3] = Fia[i]
G2[:n, s: s + 3] = Fia[i_next]
Phi = 0.5 * (F1 + F2) * dt
Phi[np.diag_indices_from(Phi)] += 1
Qd = 0.5 * (G1 + G2)
Qd *= self.q
Qd = np.dot(Qd, Qd.T) * dt
xc = Phi.dot(xc)
Pc = Phi.dot(Pc).dot(Phi.T) + Qd
if data_for_backward:
Phi_arr[i_save - 1] = Phi
i = i_next
i_stamp += 1
i_reading += feedback_period
integrator._correct(xc[:N_BASE_STATES])
xc[:N_BASE_STATES] = 0
if record_stamps[i_save] == stamps[i_stamp]:
x[i_save] = xc
P[i_save] = Pc
if data_for_backward:
xa[i_save] = xc
Pa[i_save] = Pc
residuals = []
for s, r in zip(obs_stamps, obs_residuals):
residuals.append(pd.DataFrame(index=s, data=np.asarray(r)))
return x, P, xa, Pa, Phi_arr, residuals
def run(self, integrator, theta, dv, observations=[], gain_factor=None,
max_step=1, feedback_period=500, record_stamps=None):
"""Run the filter.
Parameters
----------
integrator : `pyins.integrate.Integrator` instance
Integrator to use for INS state propagation. It will be reset
before the filter start.
theta, dv : ndarray, shape (n_readings, 3)
Rotation vectors and velocity increments computed from gyro and
accelerometer readings after applying coning and sculling
corrections.
observations : list of `Observation`
Measurements which will be processed. Empty by default.
gain_factor : array_like with shape (n_states,) or None, optional
Factor for Kalman gain for each filter's state. It might be
beneficial in some practical situations to set factors less than 1
in order to decrease influence of measurements on some states.
Setting values higher than 1 is unlikely to be reasonable. If None
(default), use standard optimal Kalman gain.
max_step : float, optional
Maximum allowed time step. Default is 1 second. Set to 0 if you
desire the smallest possible step.
feedback_period : float
Time after which INS state will be corrected by the estimated
errors. Default is 500 seconds.
record_stamps : array_like or None
At which stamps record estimated errors. If None (default), errors
will be saved at each stamp used internally in the filter.
Returns
-------
Bunch object with the fields listed below. Note that all data frames
contain stamps only presented in `record_stamps`.
traj : DataFrame
Trajectory corrected by estimated errors. It will only contain
stamps presented in `record_stamps`.
sd : DataFrame
Estimated standard deviations of trajectory errors.
gyro_err, gyro_sd : DataFrame
Estimated gyro error states and their standard deviations.
accel_err, accel_sd : DataFrame
Estimated accelerometer error states and their standard deviations.
P : ndarray, shape (n_points, n_states, n_states)
History of the filter covariance.
residuals : list of DataFrame
Each DataFrame corresponds to an observation from `observations`.
Its index is observation time stamps and columns contain normalized
observations residuals for each component of the observation
vector `z`.
Notes
-----
Estimated trajectory errors and a history of the filter states are not
returned because they are computed relative to partially corrected
trajectory and are not useful for interpretation.
"""
(theta, dv, observations, stamps, record_stamps,
gain_factor, feedback_period) = \
self._validate_parameters(integrator, theta, dv, observations,
gain_factor, max_step, record_stamps,
feedback_period)
x, P, _, _, _, residuals = \
self._forward_pass(integrator, theta, dv, observations,
gain_factor, stamps, record_stamps,
feedback_period)
traj = integrator.traj.loc[record_stamps]
err, sd, accel_err, accel_sd, gyro_err, gyro_sd = \
_compute_output_errors(traj, x, P, record_stamps, self.gyro_model,
self.accel_model)
traj_corr = correct_traj(integrator.traj, err)
return FiltResult(traj=traj_corr, sd=sd, gyro_err=gyro_err,
gyro_sd=gyro_sd, accel_err=accel_err,
accel_sd=accel_sd, P=P, residuals=residuals)
def run_smoother(self, integrator, theta, dv, observations=[],
gain_factor=None, max_step=1, feedback_period=500,
record_stamps=None):
"""Run the smoother.
It means that observations during the whole time is used to estimate
the errors at each moment of time (i.e. it is not real time). The
Rauch-Tung-Striebel two pass recursion is used [1]_.
Parameters
----------
integrator : `pyins.integrate.Integrator` instance
Integrator to use for INS state propagation. It will be reset
before the filter start.
theta, dv : ndarray, shape (n_readings, 3)
Rotation vectors and velocity increments computed from gyro and
accelerometer readings after applying coning and sculling
corrections.
observations : list of `Observation`
Measurements which will be processed. Empty by default.
gain_factor : array_like with shape (n_states,) or None, optional
Factor for Kalman gain for each filter's state. It might be
beneficial in some practical situations to set factors less than 1
in order to decrease influence of measurements on some states.
Setting values higher than 1 is unlikely to be reasonable. If None
(default), use standard optimal Kalman gain.
max_step : float, optional
Maximum allowed time step. Default is 1 second. Set to 0 if you
desire the smallest possible step.
feedback_period : float
Time after which INS state will be corrected by the estimated
errors. Default is 500 seconds.
record_stamps : array_like or None
At which stamps record estimated errors. If None (default), errors
will be saved at each stamp used internally in the filter.
Returns
-------
Bunch object with the fields listed below. Note that all data frames
contain stamps only presented in `record_stamps`.
traj : DataFrame
Trajectory corrected by estimated errors. It will only contain
stamps presented in `record_stamps`.
sd : DataFrame
Estimated trajectory errors and their standard deviations.
gyro_err, gyro_sd : DataFrame
Estimated gyro error states and their standard deviations.
accel_err, accel_sd : DataFrame
Estimated accelerometer error states and their standard deviations.
P : ndarray, shape (n_points, n_states, n_states)
History of the filter covariance.
residuals : list of DataFrame
Each DataFrame corresponds to an observation from `observations`.
Its index is observation time stamps and columns contain normalized
observations residuals for each component of the observation
vector `z`.
Notes
-----
Estimated trajectory errors and a history of the filter states are not
returned because they are computed relative to | |
<reponame>arpitgogia/mars_city<filename>MOCC/src/planning/PlexilPlanStorage/PlexilPlanStorage.py
#!/usr/bin/env python
# -*- coding:utf-8 -*-
##############################################################################
## license :
##============================================================================
##
## File : PlexilPlanStorage.py
##
## Project : Plexil Plan Storage
##
## This file is part of Tango device class.
##
## Tango is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## Tango is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Tango. If not, see <http://www.gnu.org/licenses/>.
##
##
## $Author : dipankar1995$
##
## $Revision : $
##
## $Date : $
##
## $HeadUrl : $
##============================================================================
## This file is generated by POGO
## (Program Obviously used to Generate tango Object)
##
## (c) - Software Engineering Group - ESRF
##############################################################################
"""A Tango interface to store and retrieve Plexil plans."""
__all__ = ["PlexilPlanStorage", "PlexilPlanStorageClass", "main"]
__docformat__ = 'restructuredtext'
import PyTango
import sys
# Add additional import
#----- PROTECTED REGION ID(PlexilPlanStorage.additionnal_import) ENABLED START -----#
import os, subprocess
from PyTango import DevState
from sqlalchemy import create_engine, exists
from sqlalchemy.orm import sessionmaker
from datetime import datetime
from PlexilDB_declarative import Category, Plan, Config, Script, Base
session = None
from MongoDBhelper import addToCollection, removeFromCollection
#----- PROTECTED REGION END -----# // PlexilPlanStorage.additionnal_import
## Device States Description
## No states for this device
class PlexilPlanStorage (PyTango.Device_4Impl):
#--------- Add you global variables here --------------------------
#----- PROTECTED REGION ID(PlexilPlanStorage.global_variables) ENABLED START -----#
#----- PROTECTED REGION END -----# // PlexilPlanStorage.global_variables
def __init__(self,cl, name):
PyTango.Device_4Impl.__init__(self,cl,name)
self.debug_stream("In __init__()")
PlexilPlanStorage.init_device(self)
#----- PROTECTED REGION ID(PlexilPlanStorage.__init__) ENABLED START -----#
#----- PROTECTED REGION END -----# // PlexilPlanStorage.__init__
def delete_device(self):
self.debug_stream("In delete_device()")
#----- PROTECTED REGION ID(PlexilPlanStorage.delete_device) ENABLED START -----#
#----- PROTECTED REGION END -----# // PlexilPlanStorage.delete_device
def init_device(self):
self.debug_stream("In init_device()")
self.get_device_properties(self.get_device_class())
self.attr_IsStorageDirEmpty_read = False
#----- PROTECTED REGION ID(PlexilPlanStorage.init_device) ENABLED START -----#
self.set_state(DevState.ON);
self.set_status("Ready to accept queries.");
engine = create_engine('mysql+pymysql://root:@localhost:3306/PlexilDatabase')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
global session
session = DBSession()
#----- PROTECTED REGION END -----# // PlexilPlanStorage.init_device
def always_executed_hook(self):
self.debug_stream("In always_excuted_hook()")
#----- PROTECTED REGION ID(PlexilPlanStorage.always_executed_hook) ENABLED START -----#
#----- PROTECTED REGION END -----# // PlexilPlanStorage.always_executed_hook
#-----------------------------------------------------------------------------
# PlexilPlanStorage read/write attribute methods
#-----------------------------------------------------------------------------
def read_IsStorageDirEmpty(self, attr):
self.debug_stream("In read_IsStorageDirEmpty()")
#----- PROTECTED REGION ID(PlexilPlanStorage.IsStorageDirEmpty_read) ENABLED START -----#
attr.set_value(False)
path = PyTango.Database().get_class_property(sys.argv[0], "StorageDirPath")["StorageDirPath"][0]
if not os.listdir(path):
attr.set_value(True)
#----- PROTECTED REGION END -----# // PlexilPlanStorage.IsStorageDirEmpty_read
#----- PROTECTED REGION ID(PlexilPlanStorage.initialize_dynamic_attributes) ENABLED START -----#
#----- PROTECTED REGION END -----# // PlexilPlanStorage.initialize_dynamic_attributes
def read_attr_hardware(self, data):
self.debug_stream("In read_attr_hardware()")
#----- PROTECTED REGION ID(PlexilPlanStorage.read_attr_hardware) ENABLED START -----#
#----- PROTECTED REGION END -----# // PlexilPlanStorage.read_attr_hardware
#-----------------------------------------------------------------------------
# PlexilPlanStorage command methods
#-----------------------------------------------------------------------------
def AddPlan(self, argin):
""" Adds a Plexil Plan to storage and tries to compile it.
:param argin: sourcedestination
:type: PyTango.DevString
:return: result
:rtype: PyTango.DevBoolean """
self.debug_stream("In AddPlan()")
argout = False
#----- PROTECTED REGION ID(PlexilPlanStorage.AddPlan) ENABLED START -----#
try:
# Parse the input string
path = PyTango.Database().get_class_property(sys.argv[0], "StorageDirPath")["StorageDirPath"][0]
argin = argin.split(";")
source, dest, valid = argin[0], argin[1], argin[2]
categ = dest.split("/")[0]
# Create Category directory if it doesn't exist
val1 = 0
if not os.path.isdir(path + categ):
command = 'mkdir ' + path + categ
val1 = subprocess.check_call(command, shell=True)
# Get a few params and set extension flag
filename = dest
dest = path + dest
extension = filename.split(".")[1]
if extension == 'ple':
extension = 1
else:
extension = 0
# Copy the file to the directory
command = 'cp ' + source + ' ' + dest
val2 = subprocess.check_call(command, shell=True)
# Parsing and storing into MongoDB's PlexilDatabase's Plans' collection only if .plx file
if extension == 0:
uniqueID = addToCollection(source)
# Create Database entry
new_plan = Plan(Name=filename, Path=dest, MongoDBid=str(uniqueID), Validity=valid, Is_ple=extension)
category = None
if not session.query(exists().where(Category.Name==categ)).scalar():
category = Category(Name=categ)
else:
category = session.query(Category).filter(Category.Name==categ).one()
category.plans.append(new_plan)
# Placing the session.commit() inside ensures atomicity of Copying and Database Entry
if val1 == 0 and val2 == 0:
session.add(new_plan)
session.commit()
argout = True
except Exception as e:
if uniqueID:
removeFromCollection(uniqueID)
session.rollback()
argout = False
#----- PROTECTED REGION END -----# // PlexilPlanStorage.AddPlan
return argout
def AddConfigFile(self, argin):
""" Adds a Plexil Debug Config File or a Plexil Interface Config File to the storage
:param argin: sourcedestination
:type: PyTango.DevString
:return: result
:rtype: PyTango.DevBoolean """
self.debug_stream("In AddConfigFile()")
argout = False
#----- PROTECTED REGION ID(PlexilPlanStorage.AddConfigFile) ENABLED START -----#
try:
# Parse the input string
path = PyTango.Database().get_class_property(sys.argv[0], "StorageDirPath")["StorageDirPath"][0]
argin = argin.split(";")
source, dest, valid = argin[0], argin[1], argin[2]
categ = dest.split("/")[0]
# Create Category directory if it doesn't exist
val1 = 0
if not os.path.isdir(path + categ):
command = 'mkdir ' + path + categ
val1 = subprocess.check_call(command, shell=True)
# Get a few params and set extension flag
filename = dest
dest = path + dest
# Copy the file to the directory
command = 'cp ' + source + ' ' + dest
val2 = subprocess.check_call(command, shell=True)
# Create Database entry
new_config = Config(Name=filename, Path=dest, Validity=valid)
category = None
if not session.query(exists().where(Category.Name==categ)).scalar():
category = Category(Name=categ)
else:
category = session.query(Category).filter(Category.Name==categ).one()
category.configs.append(new_config)
# Placing the session.commit() inside ensures atomicity of Copying and Database Entry
if val1 == 0 and val2 == 0:
session.add(new_config)
session.commit()
argout = True
except Exception as e:
session.rollback()
argout = False
#----- PROTECTED REGION END -----# // PlexilPlanStorage.AddConfigFile
return argout
def AddScript(self, argin):
""" Adds a Plexil script file and tries to compile it
:param argin: sourcedestination
:type: PyTango.DevString
:return: result
:rtype: PyTango.DevBoolean """
self.debug_stream("In AddScript()")
argout = False
#----- PROTECTED REGION ID(PlexilPlanStorage.AddScript) ENABLED START -----#
try:
# Parse the input string
path = PyTango.Database().get_class_property(sys.argv[0], "StorageDirPath")["StorageDirPath"][0]
argin = argin.split(";")
source, dest, valid = argin[0], argin[1], argin[2]
categ = dest.split("/")[0]
# Create Category directory if it doesn't exist
val1 = 0
if not os.path.isdir(path + categ):
command = 'mkdir ' + path + categ
val1 = subprocess.check_call(command, shell=True)
# Get a few params and set extension flag
filename = dest
dest = path + dest
extension = filename.split(".")[1]
if extension == 'pst':
extension = 1
else:
extension = 0
# Copy the file to the directory
command = 'cp ' + source + ' ' + dest
val2 = subprocess.check_call(command, shell=True)
# Create Database entry
new_script = Script(Name=filename, Path=dest, Validity=valid, Is_pst=extension)
category = None
if not session.query(exists().where(Category.Name==categ)).scalar():
category = Category(Name=categ)
else:
category = session.query(Category).filter(Category.Name==categ).one()
category.scripts.append(new_script)
# Placing the session.commit() inside ensures atomicity of Copying and Database Entry
if val1 == 0 and val2 == 0:
session.add(new_script)
session.commit()
argout = True
except Exception as e:
session.rollback()
argout = False
#----- PROTECTED REGION END -----# // PlexilPlanStorage.AddScript
return argout
def DeleteFile(self, argin):
""" Deletes the given file from storage - can be a plan, a script, a debug-config or a interface config
:param argin: pathtofile
:type: PyTango.DevString
:return: result
:rtype: PyTango.DevBoolean """
self.debug_stream("In DeleteFile()")
argout = False
#----- PROTECTED REGION ID(PlexilPlanStorage.DeleteFile) ENABLED START -----#
try:
path = PyTango.Database().get_class_property(sys.argv[0], "StorageDirPath")["StorageDirPath"][0]
argin = argin.split(';')
internalpath, filetype = argin[0], int(argin[1])
# Check for and delete Database Entry
categ = None
if filetype == 0:
if session.query(exists().where(Plan.Name==internalpath)).scalar():
plan = session.query(Plan).filter(Plan.Name==internalpath).one()
categ = plan.Categories
uniqueID = plan.MongoDBid
removeFromCollection(uniqueID)
session.query(Plan).filter(Plan.Name==internalpath).delete()
elif filetype == 1:
if session.query(exists().where(Config.Name==internalpath)).scalar():
config = session.query(Config).filter(Config.Name==internalpath).one()
categ = config.Categories
session.query(Config).filter(Config.Name==internalpath).delete()
elif filetype == 2:
if session.query(exists().where(Script.Name==internalpath)).scalar():
script = session.query(Script).filter(Script.Name==internalpath).one()
categ = script.Categories
session.query(Script).filter(Script.Name==internalpath).delete()
emptydirflag = False
# Ensure empty Category not left over
if categ != None and not categ.plans and not categ.configs and not categ.scripts:
session.query(Category).filter(Category.Name==categ.Name).delete()
emptydirflag = True
if emptydirflag:
command = 'rm -rf ' + path + internalpath.split('/')[0]
else:
command = 'rm ' + path + internalpath
val = subprocess.check_call(command, shell=True)
# Ensure atomicity
if val == 0:
session.commit()
argout = True
except Exception as e:
session.rollback()
argout = False
#----- PROTECTED REGION END -----# // PlexilPlanStorage.DeleteFile
return argout
def RetrieveFile(self, argin):
""" Retreive the specified Plexil File to the specified location
:param argin: sourcedestination
:type: PyTango.DevString
:return: result
:rtype: PyTango.DevBoolean """
self.debug_stream("In RetrieveFile()")
argout = False
#----- PROTECTED REGION ID(PlexilPlanStorage.RetrieveFile) ENABLED | |
<gh_stars>10-100
import tensorflow as tf
import numpy as np
from groupy.gconv.tensorflow_gconv.splitgconv2d import gconv2d, gconv2d_util
import cnn_params
from keras_gcnn.layers import GBatchNorm
from keras_gcnn.layers.pooling import GroupPool
MOVING_AVERAGE_DECAY = 0.98
BN_DECAY = MOVING_AVERAGE_DECAY
BN_EPSILON = 0.001
def weight_variable(shape, name, initial=None, std=None):
''' Create weight variable for a layer.
Initialize variables and add l2 weight decay'''
if std is None:
stddev = 0.1
else:
stddev = std
if initial is None:
weights = tf.get_variable(name, shape,
initializer=tf.random_normal_initializer(
stddev=stddev),
regularizer=tf.contrib.layers.l2_regularizer(
scale=0.0005))
else:
weights = tf.get_variable(name, shape,
initializer=tf.constant_initializer(initial),
regularizer=tf.contrib.layers.l2_regularizer(
scale=0.0005))
return weights
def weight_variable_with_rotations(shape, n_rotations, name):
'''Creates weight variable and then applies rotatations.
NOTE: This is used for DFT transition layer, not convolutional
layers. For convolutional layers, rotations are applied in
rotational_conv2d functions.'''
# Choose interpolation method ('BILINEAR' or 'NEAREST').
# interpolation = 'NEAREST'
interpolation = 'BILINEAR'
rot_angle = 2*np.pi / n_rotations
weights = weight_variable([shape[3], shape[0], shape[1], shape[2]], name)
weights_rotated = []
for r in range(1, n_rotations):
weights_rotated.append(
tf.contrib.image.rotate(weights, r*rot_angle,
interpolation=interpolation))
weights = tf.concat([weights] + weights_rotated, axis=0)
weights = tf.transpose(weights, [1, 2, 3, 0])
return weights
def bias_variable(shape, name, initial=None):
'''Creates bias variable for a layer.'''
if initial is None:
bias = tf.get_variable(name, shape,
initializer=tf.constant_initializer(0.0))
else:
bias = tf.get_variable(name,
initializer=tf.convert_to_tensor(initial))
return bias
def variable_summaries(var, name):
'''Save summary statistics of variables of a layer.'''
mean = tf.reduce_mean(var)
tf.summary.scalar('mean/' + name, mean)
stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))
tf.summary.scalar('stddev/' + name, stddev)
tf.summary.scalar('max/' + name, tf.reduce_max(var))
tf.summary.scalar('min/' + name, tf.reduce_min(var))
tf.summary.histogram(name, var)
def conv2d(x, W, s, padding='VALID'):
'''Simple wrapper around TF convolution function.'''
return tf.nn.conv2d(x, W, strides=[1, s, s, 1], padding=padding)
def conic_conv2d(x, W, x_shape, W_shape, n_rotations, stride, padding='VALID'):
interpolation = 'NEAREST'
assert(W_shape[0] % 2 == 1) # check that filter size is odd
W_radius = W_shape[0] / 2 # radius excludes center pixel
x_s = x_shape[1] # could be odd or even
mid = x_s / 2
mid2 = (x_s - 2*W_radius) / 2
assert(n_rotations in [4, 8])
if n_rotations == 4:
# To rotate W
W_rot = []
W_3D = tf.reshape(W, W_shape[:2] + [-1]) # Required by tf.image.rot90
W_rot.append(W)
W_rot.append(tf.reshape(tf.image.rot90(W_3D, -1), W_shape))
W_rot.append(tf.reshape(tf.image.rot90(W_3D, -2), W_shape))
W_rot.append(tf.reshape(tf.image.rot90(W_3D, -3), W_shape))
# Calculate convolution on the `quarter' only
tmp = [
tf.nn.conv2d(x[:, :-mid+W_radius , :-mid+W_radius , :], W_rot[0], strides=[1,]*4, padding='VALID'),
tf.nn.conv2d(x[:, :-mid+W_radius , mid-W_radius:, :], W_rot[1], strides=[1,]*4, padding='VALID'),
tf.nn.conv2d(x[:, mid-W_radius:, mid-W_radius:, :], W_rot[2], strides=[1,]*4, padding='VALID'),
tf.nn.conv2d(x[:, mid-W_radius:, :-mid+W_radius , :], W_rot[3], strides=[1,]*4, padding='VALID'),
]
# Merge the conv into the desired result
if x_s % 2 == 0:
x_out = [[
tmp[0],
tmp[1],
], [
tmp[3],
tmp[2],
]]
else:
x_out = [[
tmp[0][:, :-1, :-1, :],
tmp[1][:, :-1, : 1, :],
tmp[1][:, :-1, 1: , :],
], [
tmp[0][:, -1: , :-1, :],
(tmp[0][:, -1:, -1:, :] + tmp[1][:, -1:, :1, :] + tmp[2][:, :1, :1, :] + tmp[0][:, :1, -1:, :])/4,
tmp[2][:, :1 , 1: , :],
], [
tmp[3][:, 1: , :-1, :],
tmp[3][:, 1: , -1: , :],
tmp[2][:, 1: , 1: , :],
]]
x_out = [tf.concat(_, axis=2) for _ in x_out]
x_out = tf.concat(x_out, axis=1)
x_out = tf.squeeze(x_out)
elif n_rotations == 8:
# rotating X once and W 3+1 times is more expensive than rotating W 6+1 times
W_rot = []
W_3D = tf.reshape(W, W_shape[:2] + [-1])
W_45 = tf.contrib.image.rotate(W_3D, np.pi/4, interpolation=interpolation)
W_rot.append(W)
W_rot.append(tf.reshape(tf.image.rot90(W_45, -1), W_shape))
W_rot.append(tf.reshape(tf.image.rot90(W_3D, -1), W_shape))
W_rot.append(tf.reshape(tf.image.rot90(W_45, -2), W_shape))
W_rot.append(tf.reshape(tf.image.rot90(W_3D, -2), W_shape))
W_rot.append(tf.reshape(tf.image.rot90(W_45, -3), W_shape))
W_rot.append(tf.reshape(tf.image.rot90(W_3D, -3), W_shape))
W_rot.append(tf.reshape(W_45, W_shape))
# \ 0 | 1 /
# 7 \ | / 2
# \ | /
# -----------
# / | \
# 6 / | \ 3
# / 5 | 4 \
del W_3D, W_45
# Calculate convolution on the `quarter' only
tmp = [
tf.nn.conv2d(x[:, :-mid+W_radius , :-mid+W_radius , :], W_rot[0], strides=[1,]*4, padding='VALID'),
tf.nn.conv2d(x[:, :-mid+W_radius , mid-W_radius:, :], W_rot[1], strides=[1,]*4, padding='VALID'),
tf.nn.conv2d(x[:, :-mid+W_radius , mid-W_radius:, :], W_rot[2], strides=[1,]*4, padding='VALID'),
tf.nn.conv2d(x[:, mid-W_radius:, mid-W_radius:, :], W_rot[3], strides=[1,]*4, padding='VALID'),
tf.nn.conv2d(x[:, mid-W_radius:, mid-W_radius:, :], W_rot[4], strides=[1,]*4, padding='VALID'),
tf.nn.conv2d(x[:, mid-W_radius:, :-mid+W_radius , :], W_rot[5], strides=[1,]*4, padding='VALID'),
tf.nn.conv2d(x[:, mid-W_radius:, :-mid+W_radius , :], W_rot[6], strides=[1,]*4, padding='VALID'),
tf.nn.conv2d(x[:, :-mid+W_radius , :-mid+W_radius , :], W_rot[7], strides=[1,]*4, padding='VALID'),
]
# We need masks to merge two triangular parts in the same `quarter'
mask_np = np.zeros([mid2, ]*2, dtype=np.float32)
mask_np[np.tril_indices(mid2, -1)] = 1
mask_np[np.diag_indices(mid2 )] = .5
maskll = tf.constant(mask_np[:: , :: ][None, :, :, None]) # lower-left
maskul = tf.constant(mask_np[::-1, :: ][None, :, :, None]) # upper-left
maskur = tf.constant(mask_np[::-1, ::-1][None, :, :, None]) # upper-right
masklr = tf.constant(mask_np[:: , ::-1][None, :, :, None]) # lower-right
# Merge the conv into the desired result
if x_s % 2 == 0:
x_out = [[
tmp[7]*maskll + tmp[0]*maskur,
tmp[1]*maskul + tmp[2]*masklr,
], [
tmp[5]*masklr + tmp[6]*maskul,
tmp[3]*maskur + tmp[4]*maskll,
]]
else:
x_out = [[
tmp[7][:, :-1, :-1, :]*maskll + tmp[0][:, :-1, :-1, :]*maskur,
(tmp[0][:, :-1, -1:, :] + tmp[1][:, :-1, :1, :]) / 2,
tmp[1][:, :-1, 1:, :]*maskul + tmp[2][:, :-1, 1:, :]*masklr,
], [
(tmp[6][:, :1, :-1, :] + tmp[7][:, -1:, :-1, :]) / 2,
sum([
tmp[0][:, -1:, -1:, :],
tmp[1][:, -1:, :1 , :],
tmp[2][:, -1:, :1 , :],
tmp[3][:, :1, :1 , :],
tmp[4][:, :1, :1 , :],
tmp[5][:, :1, -1:, :],
tmp[6][:, :1, -1:, :],
tmp[7][:, -1:, -1:, :],
], 0) / 8,
(tmp[2][:, -1:, 1:, :] + tmp[3][:, :1, 1:, :]) / 2,
], [
tmp[5][:, 1:, :-1, :]*masklr + tmp[6][:, 1:, :-1, :]*maskul,
(tmp[4][:, 1:, :1, :] + tmp[5][:, 1:, -1:, :]) / 2,
tmp[3][:, 1:, 1:, :]*maskur + tmp[4][:, 1:, 1:, :]*maskll,
]]
x_out = [tf.concat(_, axis=2) for _ in x_out]
x_out = tf.concat(x_out, axis=1)
x_out = tf.squeeze(x_out)
else:
assert False
return x_out
class GBatchNorm_TF():
def __init__(self):
pass
def make_var(self, name, shape, trainable_param=True, initializer_param=tf.keras.initializers.he_normal()):
'''Creates a new TensorFlow variable.'''
return tf.get_variable(name, shape, trainable=trainable_param, initializer=initializer_param)
def run(self, x, x_size, x_depth, group_input, is_training):
x_shape = [1, x_size, x_size, x_depth]
params_shape = x_shape[-1:]
axis = list(range(len(x_shape) - 1))
one_init = tf.constant_initializer(value=1.0)
zero_init = tf.constant_initializer(value=0.0)
beta = self.make_var('beta', params_shape, initializer_param=zero_init)
gamma = self.make_var('gamma', params_shape, initializer_param=one_init)
moving_mean = self.make_var('moving_mean', params_shape,
trainable_param=False, initializer_param=zero_init)
moving_variance = self.make_var('moving_variance', params_shape,
trainable_param=False, initializer_param=one_init)
if is_training:
mean, variance = tf.nn.moments(x, axis)
if group_input != 'Z2':
if group_input == 'C4':
num_repeat = 4
else:
num_repeat = 8
mean = tf.reshape(mean, [-1, num_repeat])
mean = tf.reduce_mean(mean, 1, keep_dims=False)
mean = tf.reshape(tf.tile(tf.expand_dims(mean, -1), [1, num_repeat]), [-1])
variance = tf.reshape(variance, [-1, num_repeat])
variance = tf.reduce_mean(variance, 1, keep_dims=False)
variance = tf.reshape(tf.tile(tf.expand_dims(variance, -1), [1, num_repeat]), [-1])
# update_moving_mean = moving_averages.assign_moving_average(moving_mean,
# mean, BN_DECAY)
# update_moving_variance = moving_averages.assign_moving_average(
# moving_variance, variance, BN_DECAY)
# tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
# tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)
train_mean = tf.assign(moving_mean,
moving_mean * BN_DECAY + mean * (1 - BN_DECAY))
train_var = tf.assign(moving_variance,
moving_variance * BN_DECAY + variance * (1 - BN_DECAY))
with tf.control_dependencies([train_mean, train_var]):
return tf.nn.batch_normalization(x,
mean, variance, beta, gamma, BN_EPSILON)
else:
return tf.nn.batch_normalization(x, moving_mean, moving_variance, beta, gamma, BN_EPSILON)
def conic_conv_layer(x, x_size, x_depth, layer_params, is_training,
activation=True, layer_name='conic_conv'):
w_shape = [layer_params.filter_size,
layer_params.filter_size,
x_depth,
layer_params.n_filters]
x_shape = [1, x_size, x_size, x_depth]
biases = bias_variable([layer_params.n_filters],
layer_name + '/biases')
variable_summaries(biases, layer_name + '/biases')
weights = weight_variable(w_shape,
layer_name + '/weights')
variable_summaries(weights, layer_name + '/weights')
h = conic_conv2d(x, weights, x_shape, w_shape,
layer_params.n_rotations,
layer_params.convolution_stride)\
+ biases
# Batch normalization
if layer_params.batch_norm:
with tf.variable_scope(layer_name + '_bn'):
h = tf.contrib.layers.batch_norm(
h, decay=0.95, center=True, scale=True,
is_training=is_training)
# Pooling
if layer_params.pooling:
# Preserving rotation equivariance requires handling odd and even
# sized inputs differently when pooling
if x_size % 2 == 0:
pool_sup = 2
else:
pool_sup = 3
if layer_params.pooling == 'max':
h = tf.nn.max_pool(h, [1, pool_sup, pool_sup, 1],
[1, layer_params.pooling_stride,
layer_params.pooling_stride, 1],
padding='VALID')
elif layer_params.pooling == 'avg':
h = tf.nn.avg_pool(h, [1, pool_sup, pool_sup, 1],
[1, layer_params.pooling_stride,
layer_params.pooling_stride, 1],
padding='VALID')
# Activation
if activation:
h = tf.nn.relu(h)
h_size = layer_params.calculate_output_size(x_size)
return h, h_size
def g_conv_layer(x, x_size, x_depth, group_input, layer_params, is_training_bool,
activation=True, layer_name='g-conv'):
assert(layer_params.ge_type in ['C4', 'D4'])
group_output = layer_params.ge_type
gconv_indices, gconv_shape_info, w_shape =\
gconv2d_util(h_input=group_input, h_output=group_output,
in_channels=x_depth,
out_channels=layer_params.n_filters,
ksize=layer_params.filter_size)
weights = weight_variable(w_shape, layer_name + '/weights')
variable_summaries(weights, layer_name + '/weights')
h = gconv2d(input=x, filter=weights,
strides=[1, layer_params.convolution_stride,
layer_params.convolution_stride, 1],
padding=layer_params.padding,
gconv_indices=gconv_indices,
gconv_shape_info=gconv_shape_info)
# Batch normalization
if layer_params.batch_norm:
with tf.variable_scope(layer_name + '_bn'):
#h = GBatchNorm(layer_params.ge_type)(h)
| |
<filename>test/python/test_identifiers.py
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name,missing-docstring,broad-except
# Copyright 2018 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Non-string identifiers for circuit and record identifiers test"""
import unittest
from qiskit import (ClassicalRegister, QISKitError, QuantumCircuit,
QuantumRegister, QuantumProgram)
from qiskit.backends.local.qasm_simulator_cpp import QasmSimulatorCpp
from qiskit import wrapper
from .common import QiskitTestCase
# Cpp backend required
try:
cpp_backend = QasmSimulatorCpp()
except FileNotFoundError:
_skip_cpp = True
else:
_skip_cpp = False
class TestAnonymousIdsInQuantumProgram(QiskitTestCase):
"""Circuits and records can have no name"""
def setUp(self):
self.QPS_SPECS_NONAMES = {
"circuits": [{
"quantum_registers": [{
"size": 3}],
"classical_registers": [{
"size": 3}]
}]
}
###############################################################
# Tests to initiate an build a quantum program with anonymous ids
###############################################################
def test_create_program_with_specsnonames(self):
"""Test Quantum Object Factory creation using Specs definition
object with no names for circuit nor records.
"""
result = QuantumProgram(specs=self.QPS_SPECS_NONAMES)
self.assertIsInstance(result, QuantumProgram)
def test_create_anonymous_classical_register(self):
"""Test create_classical_register with no name.
"""
q_program = QuantumProgram()
cr = q_program.create_classical_register(size=3)
self.assertIsInstance(cr, ClassicalRegister)
def test_create_anonymous_quantum_register(self):
"""Test create_quantum_register with no name.
"""
q_program = QuantumProgram()
qr = q_program.create_quantum_register(size=3)
self.assertIsInstance(qr, QuantumRegister)
def test_create_classical_registers_noname(self):
"""Test create_classical_registers with no name
"""
q_program = QuantumProgram()
classical_registers = [{"size": 4},
{"size": 2}]
crs = q_program.create_classical_registers(classical_registers)
for i in crs:
self.assertIsInstance(i, ClassicalRegister)
def test_create_quantum_registers_noname(self):
"""Test create_quantum_registers with no name.
"""
q_program = QuantumProgram()
quantum_registers = [{"size": 4},
{"size": 2}]
qrs = q_program.create_quantum_registers(quantum_registers)
for i in qrs:
self.assertIsInstance(i, QuantumRegister)
def test_create_circuit_noname(self):
"""Test create_circuit with no name
"""
q_program = QuantumProgram()
qr = q_program.create_quantum_register(size=3)
cr = q_program.create_classical_register(size=3)
qc = q_program.create_circuit(qregisters=[qr], cregisters=[cr])
self.assertIsInstance(qc, QuantumCircuit)
def test_create_several_circuits_noname(self):
"""Test create_circuit with several inputs and without names.
"""
q_program = QuantumProgram()
qr1 = q_program.create_quantum_register(size=3)
cr1 = q_program.create_classical_register(size=3)
qr2 = q_program.create_quantum_register(size=3)
cr2 = q_program.create_classical_register(size=3)
qc1 = q_program.create_circuit(qregisters=[qr1], cregisters=[cr1])
qc2 = q_program.create_circuit(qregisters=[qr2], cregisters=[cr2])
qc3 = q_program.create_circuit(qregisters=[qr1, qr2], cregisters=[cr1, cr2])
self.assertIsInstance(qc1, QuantumCircuit)
self.assertIsInstance(qc2, QuantumCircuit)
self.assertIsInstance(qc3, QuantumCircuit)
def test_get_register_and_circuit_names_nonames(self):
"""Get the names of the circuits and registers after create them without a name
"""
q_program = QuantumProgram()
qr1 = q_program.create_quantum_register(size=3)
cr1 = q_program.create_classical_register(size=3)
qr2 = q_program.create_quantum_register(size=3)
cr2 = q_program.create_classical_register(size=3)
q_program.create_circuit(qregisters=[qr1], cregisters=[cr1])
q_program.create_circuit(qregisters=[qr2], cregisters=[cr2])
q_program.create_circuit(qregisters=[qr1, qr2], cregisters=[cr1, cr2])
qrn = q_program.get_quantum_register_names()
crn = q_program.get_classical_register_names()
qcn = q_program.get_circuit_names()
self.assertEqual(len(qrn), 2)
self.assertEqual(len(crn), 2)
self.assertEqual(len(qcn), 3)
def test_get_circuit_noname(self):
q_program = QuantumProgram(specs=self.QPS_SPECS_NONAMES)
qc = q_program.get_circuit()
self.assertIsInstance(qc, QuantumCircuit)
def test_get_quantum_register_noname(self):
q_program = QuantumProgram(specs=self.QPS_SPECS_NONAMES)
qr = q_program.get_quantum_register()
self.assertIsInstance(qr, QuantumRegister)
def test_get_classical_register_noname(self):
q_program = QuantumProgram(specs=self.QPS_SPECS_NONAMES)
cr = q_program.get_classical_register()
self.assertIsInstance(cr, ClassicalRegister)
def test_get_qasm_noname(self):
"""Test the get_qasm using an specification without names.
"""
q_program = QuantumProgram(specs=self.QPS_SPECS_NONAMES)
qc = q_program.get_circuit()
qrn = list(q_program.get_quantum_register_names())
self.assertEqual(len(qrn), 1)
qr = q_program.get_quantum_register(qrn[0])
crn = list(q_program.get_classical_register_names())
self.assertEqual(len(crn), 1)
cr = q_program.get_classical_register(crn[0])
qc.h(qr[0])
qc.cx(qr[0], qr[1])
qc.cx(qr[1], qr[2])
qc.measure(qr[0], cr[0])
qc.measure(qr[1], cr[1])
qc.measure(qr[2], cr[2])
result = q_program.get_qasm()
self.assertEqual(len(result), len(qrn[0]) * 9 + len(crn[0]) * 4 + 147)
def test_get_qasms_noname(self):
"""Test the get_qasms from a qprogram without names.
"""
q_program = QuantumProgram()
qr = q_program.create_quantum_register(size=3)
cr = q_program.create_classical_register(size=3)
qc1 = q_program.create_circuit(qregisters=[qr], cregisters=[cr])
qc2 = q_program.create_circuit(qregisters=[qr], cregisters=[cr])
qc1.h(qr[0])
qc1.cx(qr[0], qr[1])
qc1.cx(qr[1], qr[2])
qc1.measure(qr[0], cr[0])
qc1.measure(qr[1], cr[1])
qc1.measure(qr[2], cr[2])
qc2.h(qr)
qc2.measure(qr[0], cr[0])
qc2.measure(qr[1], cr[1])
qc2.measure(qr[2], cr[2])
results = dict(zip(q_program.get_circuit_names(), q_program.get_qasms()))
qr_name_len = len(qr.name)
cr_name_len = len(cr.name)
self.assertEqual(len(results[qc1.name]), qr_name_len * 9 + cr_name_len * 4 + 147)
self.assertEqual(len(results[qc2.name]), qr_name_len * 7 + cr_name_len * 4 + 137)
def test_get_qasm_all_gates(self):
"""Test the get_qasm for more gates, using an specification without names.
"""
q_program = QuantumProgram(specs=self.QPS_SPECS_NONAMES)
qc = q_program.get_circuit()
qr = q_program.get_quantum_register()
cr = q_program.get_classical_register()
qc.u1(0.3, qr[0])
qc.u2(0.2, 0.1, qr[1])
qc.u3(0.3, 0.2, 0.1, qr[2])
qc.s(qr[1])
qc.s(qr[2]).inverse()
qc.cx(qr[1], qr[2])
qc.barrier()
qc.cx(qr[0], qr[1])
qc.h(qr[0])
qc.x(qr[2]).c_if(cr, 0)
qc.y(qr[2]).c_if(cr, 1)
qc.z(qr[2]).c_if(cr, 2)
qc.barrier(qr)
qc.measure(qr[0], cr[0])
qc.measure(qr[1], cr[1])
qc.measure(qr[2], cr[2])
result = q_program.get_qasm()
self.assertEqual(len(result), (len(qr.name) * 23 +
len(cr.name) * 7 +
385))
###############################################################
# Test for compile
###############################################################
def test_compile_program_noname(self):
"""Test compile with a no name.
"""
q_program = QuantumProgram(specs=self.QPS_SPECS_NONAMES)
qc = q_program.get_circuit()
qr = q_program.get_quantum_register()
cr = q_program.get_classical_register()
qc.h(qr[0])
qc.cx(qr[0], qr[1])
qc.measure(qr[0], cr[0])
qc.measure(qr[1], cr[1])
out = q_program.compile()
self.log.info(out)
self.assertEqual(len(out), 3)
def test_get_execution_list_noname(self):
"""Test get_execution_list for circuits without name.
"""
q_program = QuantumProgram(specs=self.QPS_SPECS_NONAMES)
qc = q_program.get_circuit()
qr = q_program.get_quantum_register()
cr = q_program.get_classical_register()
qc.h(qr[0])
qc.cx(qr[0], qr[1])
qc.measure(qr[0], cr[0])
qc.measure(qr[1], cr[1])
qobj = q_program.compile()
result = q_program.get_execution_list(qobj, print_func=self.log.info)
self.assertEqual(len(result), 1)
def test_change_circuit_qobj_after_compile_noname(self):
q_program = QuantumProgram(specs=self.QPS_SPECS_NONAMES)
qr = q_program.get_quantum_register()
cr = q_program.get_classical_register()
qc2 = q_program.create_circuit(qregisters=[qr], cregisters=[cr])
qc3 = q_program.create_circuit(qregisters=[qr], cregisters=[cr])
qc2.h(qr[0])
qc2.cx(qr[0], qr[1])
qc2.cx(qr[0], qr[2])
qc3.h(qr)
qc2.measure(qr, cr)
qc3.measure(qr, cr)
circuits = [qc2.name, qc3.name]
shots = 1024
backend = 'local_qasm_simulator'
config = {'seed': 10, 'shots': 1, 'xvals': [1, 2, 3, 4]}
qobj1 = q_program.compile(circuits, backend=backend, shots=shots, seed=88, config=config)
qobj1['circuits'][0]['config']['shots'] = 50
qobj1['circuits'][0]['config']['xvals'] = [1, 1, 1]
config['shots'] = 1000
config['xvals'][0] = 'only for qobj2'
qobj2 = q_program.compile(circuits, backend=backend, shots=shots, seed=88, config=config)
self.assertTrue(qobj1['circuits'][0]['config']['shots'] == 50)
self.assertTrue(qobj1['circuits'][1]['config']['shots'] == 1)
self.assertTrue(qobj1['circuits'][0]['config']['xvals'] == [1, 1, 1])
self.assertTrue(qobj1['circuits'][1]['config']['xvals'] == [1, 2, 3, 4])
self.assertTrue(qobj1['config']['shots'] == 1024)
self.assertTrue(qobj2['circuits'][0]['config']['shots'] == 1000)
self.assertTrue(qobj2['circuits'][1]['config']['shots'] == 1000)
self.assertTrue(qobj2['circuits'][0]['config']['xvals'] == [
'only for qobj2', 2, 3, 4])
self.assertTrue(qobj2['circuits'][1]['config']['xvals'] == [
'only for qobj2', 2, 3, 4])
def test_add_circuit_noname(self):
"""Test add two circuits without names. Also tests get_counts without circuit name.
"""
q_program = QuantumProgram()
qr = q_program.create_quantum_register(size=2)
cr = q_program.create_classical_register(size=2)
qc1 = q_program.create_circuit(qregisters=[qr], cregisters=[cr])
qc2 = q_program.create_circuit(qregisters=[qr], cregisters=[cr])
qc1.h(qr[0])
qc1.measure(qr[0], cr[0])
qc2.measure(qr[1], cr[1])
new_circuit = qc1 + qc2
q_program.add_circuit(quantum_circuit=new_circuit)
backend = 'local_qasm_simulator_py' # cpp simulator rejects non string IDs (FIXME)
shots = 1024
result = q_program.execute(backend=backend, shots=shots, seed=78)
counts = result.get_counts(new_circuit.name)
target = {'00': shots / 2, '01': shots / 2}
threshold = 0.04 * shots
self.assertDictAlmostEqual(counts, target, threshold)
self.assertRaises(QISKitError, result.get_counts)
class TestQobj(QiskitTestCase):
"""Check the objects compiled for different backends create names properly"""
def setUp(self):
qr = QuantumRegister(2, name="qr2")
cr = ClassicalRegister(2, name=None)
qc = QuantumCircuit(qr, cr, name="qc10")
qc.h(qr[0])
qc.measure(qr[0], cr[0])
self.qr_name = qr.name
self.cr_name = cr.name
self.circuits = [qc]
def test_local_qasm_simulator_py(self):
backend = wrapper.get_backend('local_qasm_simulator_py')
qobj = wrapper.compile(self.circuits, backend=backend)
cc = qobj['circuits'][0]['compiled_circuit']
ccq = qobj['circuits'][0]['compiled_circuit_qasm']
self.assertIn(self.qr_name, map(lambda x: x[0], cc['header']['qubit_labels']))
self.assertIn(self.qr_name, ccq)
self.assertIn(self.cr_name, map(lambda x: x[0], cc['header']['clbit_labels']))
self.assertIn(self.cr_name, ccq)
@unittest.skipIf(_skip_cpp, "no c++ simulator found.")
def test_local_clifford_simulator_cpp(self):
backend = wrapper.get_backend('local_clifford_simulator_cpp')
qobj = wrapper.compile(self.circuits, backend=backend)
cc = qobj['circuits'][0]['compiled_circuit']
ccq = qobj['circuits'][0]['compiled_circuit_qasm']
self.assertIn(self.qr_name, map(lambda x: x[0], cc['header']['qubit_labels']))
self.assertIn(self.qr_name, ccq)
self.assertIn(self.cr_name, map(lambda x: x[0], cc['header']['clbit_labels']))
self.assertIn(self.cr_name, ccq)
@unittest.skipIf(_skip_cpp, "no c++ simulator found.")
def test_local_qasm_simulator_cpp(self):
backend = wrapper.get_backend('local_qasm_simulator_cpp')
qobj = wrapper.compile(self.circuits, backend=backend)
cc = qobj['circuits'][0]['compiled_circuit']
ccq = qobj['circuits'][0]['compiled_circuit_qasm']
self.assertIn(self.qr_name, map(lambda x: x[0], cc['header']['qubit_labels']))
self.assertIn(self.qr_name, ccq)
self.assertIn(self.cr_name, map(lambda x: x[0], cc['header']['clbit_labels']))
self.assertIn(self.cr_name, ccq)
def test_local_statevector_simulator_sympy(self):
backend = wrapper.get_backend('local_statevector_simulator_sympy')
qobj = wrapper.compile(self.circuits, backend=backend)
cc = qobj['circuits'][0]['compiled_circuit']
ccq = qobj['circuits'][0]['compiled_circuit_qasm']
self.assertIn(self.qr_name, map(lambda x: x[0], cc['header']['qubit_labels']))
self.assertIn(self.qr_name, ccq)
self.assertIn(self.cr_name, map(lambda x: x[0], cc['header']['clbit_labels']))
self.assertIn(self.cr_name, ccq)
def test_local_unitary_simulator_sympy(self):
backend = wrapper.get_backend('local_unitary_simulator_sympy')
qobj = wrapper.compile(self.circuits, backend=backend)
cc = qobj['circuits'][0]['compiled_circuit']
ccq = qobj['circuits'][0]['compiled_circuit_qasm']
self.assertIn(self.qr_name, map(lambda x: x[0], cc['header']['qubit_labels']))
self.assertIn(self.qr_name, ccq)
self.assertIn(self.cr_name, map(lambda x: x[0], cc['header']['clbit_labels']))
self.assertIn(self.cr_name, ccq)
def test_local_unitary_simulator(self):
backend = wrapper.get_backend('local_unitary_simulator_py')
qobj = wrapper.compile(self.circuits, backend=backend)
cc = qobj['circuits'][0]['compiled_circuit']
ccq = qobj['circuits'][0]['compiled_circuit_qasm']
self.assertIn(self.qr_name, map(lambda x: x[0], cc['header']['qubit_labels']))
self.assertIn(self.qr_name, ccq)
self.assertIn(self.cr_name, map(lambda x: x[0], cc['header']['clbit_labels']))
self.assertIn(self.cr_name, ccq)
class TestAnonymousIds(QiskitTestCase):
"""Test the anonymous use of registers.
"""
def test_create_anonymous_classical_register(self):
"""Test creating a ClassicalRegister with no name.
"""
cr = ClassicalRegister(size=3)
self.assertIsInstance(cr, ClassicalRegister)
def test_create_anonymous_quantum_register(self):
"""Test creating a QuantumRegister with no name.
"""
qr = QuantumRegister(size=3)
self.assertIsInstance(qr, QuantumRegister)
def test_create_anonymous_classical_registers(self):
"""Test creating several ClassicalRegister with no name.
"""
cr1 = ClassicalRegister(size=3)
cr2 = ClassicalRegister(size=3)
self.assertNotEqual(cr1.name, cr2.name)
def test_create_anonymous_quantum_registers(self):
"""Test creating several QuantumRegister with no name.
"""
qr1 = QuantumRegister(size=3)
qr2 = QuantumRegister(size=3)
self.assertNotEqual(qr1.name, qr2.name)
def test_create_anonymous_mixed_registers(self):
"""Test creating several Registers with no name.
"""
cr0 = ClassicalRegister(size=3)
qr0 = QuantumRegister(size=3)
# Get the current index counte of the registers
cr_index = int(cr0.name[1:])
qr_index = int(qr0.name[1:])
cr1 = ClassicalRegister(size=3)
_ = QuantumRegister(size=3)
qr2 = QuantumRegister(size=3)
# Check that the counters for each kind are incremented separately.
cr_current = int(cr1.name[1:])
qr_current = int(qr2.name[1:])
self.assertEqual(cr_current, cr_index + 1)
self.assertEqual(qr_current, qr_index + 2)
| |
'usecs': 0},
351: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 18,
'one_min_cpu': 0.0,
'pid': 355,
'process': 'VTEMPLATE IPC fl',
'runtime': 0,
'tty': 0,
'usecs': 0},
352: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 187,
'one_min_cpu': 0.0,
'pid': 356,
'process': 'CEM PROC',
'runtime': 1,
'tty': 0,
'usecs': 5},
353: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 2,
'one_min_cpu': 0.0,
'pid': 357,
'process': 'CEM HA',
'runtime': 0,
'tty': 0,
'usecs': 0},
354: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 358,
'process': 'CEM HA AC',
'runtime': 0,
'tty': 0,
'usecs': 0},
355: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 2,
'one_min_cpu': 0.0,
'pid': 359,
'process': 'L2X Switching Ev',
'runtime': 0,
'tty': 0,
'usecs': 0},
356: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 360,
'process': 'Probe Input',
'runtime': 0,
'tty': 0,
'usecs': 0},
357: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 2,
'one_min_cpu': 0.0,
'pid': 361,
'process': 'IP Inband Sessio',
'runtime': 1,
'tty': 0,
'usecs': 500},
358: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 362,
'process': 'DHCP SIP',
'runtime': 0,
'tty': 0,
'usecs': 0},
359: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 8223,
'one_min_cpu': 0.0,
'pid': 363,
'process': 'FRR Manager',
'runtime': 77,
'tty': 0,
'usecs': 9},
360: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 364,
'process': 'MFI Comm RP Proc',
'runtime': 0,
'tty': 0,
'usecs': 0},
361: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 365,
'process': 'Path set broker',
'runtime': 0,
'tty': 0,
'usecs': 0},
362: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 366,
'process': 'LFD Label Block',
'runtime': 0,
'tty': 0,
'usecs': 0},
363: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 5273,
'one_min_cpu': 0.0,
'pid': 367,
'process': 'LDP HA',
'runtime': 439,
'tty': 0,
'usecs': 83},
364: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 3,
'one_min_cpu': 0.0,
'pid': 368,
'process': 'MPLS VPN HA Clie',
'runtime': 0,
'tty': 0,
'usecs': 0},
365: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 7,
'one_min_cpu': 0.0,
'pid': 369,
'process': 'TSPTUN HA',
'runtime': 0,
'tty': 0,
'usecs': 0},
366: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 2,
'one_min_cpu': 0.0,
'pid': 370,
'process': 'RSVP HA Services',
'runtime': 0,
'tty': 0,
'usecs': 0},
367: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 2,
'one_min_cpu': 0.0,
'pid': 371,
'process': 'TE NSR OOS DB Pr',
'runtime': 0,
'tty': 0,
'usecs': 0},
368: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 17,
'one_min_cpu': 0.0,
'pid': 372,
'process': 'MPLS TP HA',
'runtime': 0,
'tty': 0,
'usecs': 0},
369: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 5,
'one_min_cpu': 0.0,
'pid': 373,
'process': 'AToM HA Bulk Syn',
'runtime': 0,
'tty': 0,
'usecs': 0},
370: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 17,
'one_min_cpu': 0.0,
'pid': 374,
'process': 'AToM MGR HA IPC',
'runtime': 0,
'tty': 0,
'usecs': 0},
371: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 2,
'one_min_cpu': 0.0,
'pid': 375,
'process': 'LFDp Input Proc',
'runtime': 2,
'tty': 0,
'usecs': 1000},
372: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 2,
'one_min_cpu': 0.0,
'pid': 376,
'process': 'AAA Cached Serve',
'runtime': 0,
'tty': 0,
'usecs': 0},
373: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 6,
'one_min_cpu': 0.0,
'pid': 377,
'process': 'ENABLE AAA',
'runtime': 0,
'tty': 0,
'usecs': 0},
374: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 378,
'process': 'EM Background Pr',
'runtime': 0,
'tty': 0,
'usecs': 0},
375: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 3,
'one_min_cpu': 0.0,
'pid': 379,
'process': 'LDAP process',
'runtime': 0,
'tty': 0,
'usecs': 0},
376: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 380,
'process': 'Opaque Database',
'runtime': 0,
'tty': 0,
'usecs': 0},
377: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 381,
'process': 'Key chain liveke',
'runtime': 0,
'tty': 0,
'usecs': 0},
378: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 2,
'one_min_cpu': 0.0,
'pid': 382,
'process': 'LINE AAA',
'runtime': 0,
'tty': 0,
'usecs': 0},
379: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 17,
'one_min_cpu': 0.0,
'pid': 383,
'process': 'LOCAL AAA',
'runtime': 0,
'tty': 0,
'usecs': 0},
380: {'five_min_cpu': 0.64,
'five_sec_cpu': 0.0,
'invoked': 6202,
'one_min_cpu': 0.44,
'pid': 384,
'process': 'BGP Scanner',
'runtime': 278040,
'tty': 0,
'usecs': 44830},
381: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 472,
'one_min_cpu': 0.0,
'pid': 385,
'process': 'TPLUS',
'runtime': 20,
'tty': 0,
'usecs': 42},
382: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 319,
'one_min_cpu': 0.0,
'pid': 386,
'process': 'DynCmd Package P',
'runtime': 6,
'tty': 0,
'usecs': 18},
383: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 510125,
'one_min_cpu': 0.01,
'pid': 387,
'process': 'MMA DB TIMER',
'runtime': 4924,
'tty': 0,
'usecs': 9},
384: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 2,
'one_min_cpu': 0.0,
'pid': 388,
'process': 'FLEX DSPRM MAIN',
'runtime': 0,
'tty': 0,
'usecs': 0},
385: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 2,
'one_min_cpu': 0.0,
'pid': 389,
'process': 'VSP_MGR',
'runtime': 0,
'tty': 0,
'usecs': 0},
386: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 2,
'one_min_cpu': 0.0,
'pid': 390,
'process': 'STUN_APP',
'runtime': 1,
'tty': 0,
'usecs': 500},
387: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 391,
'process': 'STUN_TEST',
'runtime': 0,
'tty': 0,
'usecs': 0},
388: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 2,
'one_min_cpu': 0.0,
'pid': 392,
'process': 'Manet Infra Back',
'runtime': 0,
'tty': 0,
'usecs': 0},
389: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 393,
'process': 'IDMGR CORE',
'runtime': 0,
'tty': 0,
'usecs': 0},
390: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 18101,
'one_min_cpu': 0.0,
'pid': 394,
'process': 'MPLS Auto Mesh P',
'runtime': 188,
'tty': 0,
'usecs': 10},
391: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 32875,
'one_min_cpu': 0.0,
'pid': 395,
'process': 'RSCMSM VOLUME MO',
'runtime': 678,
'tty': 0,
'usecs': 20},
392: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 396,
'process': 'CCSIP_EVENT_TRAC',
'runtime': 0,
'tty': 0,
'usecs': 0},
393: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 2,
'one_min_cpu': 0.0,
'pid': 397,
'process': 'Sip MPA',
'runtime': 0,
'tty': 0,
'usecs': 0},
394: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 398,
'process': 'QOS_MODULE_MAIN',
'runtime': 1,
'tty': 0,
'usecs': 1000},
395: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 399,
'process': 'IP TRUST Registe',
'runtime': 0,
'tty': 0,
'usecs': 0},
396: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 400,
'process': 'VoIP AAA',
'runtime': 0,
'tty': 0,
'usecs': 0},
397: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 18,
'one_min_cpu': 0.0,
'pid': 401,
'process': 'COND_DEBUG HA IP',
'runtime': 0,
'tty': 0,
'usecs': 0},
398: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 23,
'one_min_cpu': 0.0,
'pid': 402,
'process': 'PIM HA',
'runtime': 2,
'tty': 0,
'usecs': 86},
399: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 2,
'one_min_cpu': 0.0,
'pid': 403,
'process': 'MMON PROCESS',
'runtime': 0,
'tty': 0,
'usecs': 0},
400: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 404,
'process': 'QOS PERUSER',
'runtime': 0,
'tty': 0,
'usecs': 0},
401: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 405,
'process': 'RPMS_PROC_MAIN',
'runtime': 0,
'tty': 0,
'usecs': 0},
402: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 406,
'process': 'http client proc',
'runtime': 0,
'tty': 0,
'usecs': 0},
403: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 65763,
'one_min_cpu': 0.0,
'pid': 407,
'process': 'OSPF-65109 Router',
'runtime': 914,
'tty': 0,
'usecs': 13},
404: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 2,
'one_min_cpu': 0.0,
'pid': 408,
'process': 'SEGMENT ROUTING',
'runtime': 0,
'tty': 0,
'usecs': 0},
405: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 44,
'one_min_cpu': 0.0,
'pid': 409,
'process': 'AAA SEND STOP EV',
'runtime': 1,
'tty': 0,
'usecs': 22},
406: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 410,
'process': 'Test AAA Client',
'runtime': 0,
'tty': 0,
'usecs': 0},
407: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 411,
'process': 'dcm_cli_engine',
'runtime': 0,
'tty': 0,
'usecs': 0},
408: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 3,
'one_min_cpu': 0.0,
'pid': 412,
'process': 'dcm_cli_provider',
'runtime': 1,
'tty': 0,
'usecs': 333},
409: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 5,
'one_min_cpu': 0.0,
'pid': 413,
'process': 'DCM Core Thread',
'runtime': 0,
'tty': 0,
'usecs': 0},
410: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 580,
'one_min_cpu': 0.0,
'pid': 414,
'process': 'EEM ED Syslog',
'runtime': 14,
'tty': 0,
'usecs': 24},
411: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 4,
'one_min_cpu': 0.0,
'pid': 415,
'process': 'EEM ED Generic',
'runtime': 1,
'tty': 0,
'usecs': 250},
412: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 4,
'one_min_cpu': 0.0,
'pid': 416,
'process': 'EEM ED Track',
'runtime': 1,
'tty': 0,
'usecs': 250},
413: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 4,
| |
2.371743E+01, 5.973090E+01, 1.070476E+02, 1.631783E+02, 2.267778E+02,
2.970405E+02, 3.734061E+02, 4.554693E+02, 5.429478E+02, 6.356653E+02, 7.335395E+02,
8.365694E+02, 9.448250E+02, 1.058437E+03, 1.177588E+03, 1.302505E+03, 1.433451E+03,
1.570721E+03, 1.714634E+03, 1.865531E+03, 2.023770E+03, 2.189723E+03, 2.363775E+03,
2.546320E+03, 2.737759E+03, 2.938503E+03, 3.148970E+03, 3.369585E+03, 3.600778E+03,
3.842989E+03, 4.096664E+03, 4.362256E+03, 4.640230E+03, 4.931055E+03, 5.235214E+03,
5.553197E+03, 5.885507E+03, 6.232655E+03, 6.595167E+03, 6.973580E+03, 7.368444E+03,
7.780322E+03, 8.209792E+03, 8.657444E+03, 9.123885E+03, 9.609736E+03, 1.011563E+04,
1.064223E+04, 1.119020E+04, 1.176022E+04, 1.235300E+04, 1.296927E+04, 1.360974E+04,
1.427520E+04, 1.496640E+04, 1.568415E+04, 1.642926E+04, 1.720255E+04, 1.800489E+04,
1.883715E+04, 1.970021E+04, 2.059500E+04, 2.152244E+04, 2.248350E+04, 2.347915E+04,
2.451040E+04, 2.557826E+04, 2.668378E+04, 2.782802E+04, 2.901208E+04, 3.023707E+04,
3.150413E+04, 3.281442E+04, 3.416912E+04, 3.556945E+04, 3.701663E+04, 3.851194E+04,
4.005664E+04, 4.165205E+04, 4.329951E+04, 4.500037E+04, 4.675603E+04, 4.856789E+04,
5.043739E+04, 5.236600E+04, 5.435520E+04, 5.640652E+04, 5.852151E+04, 6.070172E+04,
6.294876E+04, 6.526426E+04, 6.764987E+04, 7.010727E+04, 7.263815E+04, 7.524427E+04,
7.792737E+04, 8.068925E+04, 8.353171E+04, 8.645661E+04, 8.946581E+04, 9.256120E+04,
9.574471E+04, 9.901829E+04, 1.023839E+05, 1.058436E+05, 1.093993E+05, 1.130531E+05,
1.168072E+05, 1.206635E+05, 1.246243E+05, 1.286917E+05, 1.328678E+05, 1.371549E+05,
1.415551E+05, 1.460708E+05, 1.507042E+05, 1.554576E+05, 1.603333E+05, 1.653337E+05,
1.704610E+05, 1.757178E+05, 1.811063E+05, 1.866290E+05, 1.922883E+05, 1.980868E+05,
2.040268E+05, 2.101108E+05, 2.163414E+05, 2.227210E+05, 2.292523E+05, 2.359378E+05,
2.427799E+05, 2.497814E+05, 2.569448E+05, 2.642727E+05, 2.717677E+05, 2.794324E+05,
2.872695E+05, 2.952817E+05, 3.034715E+05, 3.118417E+05, 3.203948E+05, 3.291337E+05,
3.380608E+05, 3.471791E+05, 3.564910E+05, 3.659994E+05, 3.757069E+05, 3.856162E+05,
3.957300E+05, 4.060510E+05, 4.165819E+05, 4.273253E+05, 4.382841E+05, 4.494608E+05,
4.608582E+05, 4.724789E+05, 4.843256E+05, 4.964010E+05, 5.087078E+05, 5.212486E+05,
5.340260E+05, 5.470427E+05, 5.603014E+05, 5.738046E+05, 5.875549E+05, 6.015551E+05,
6.158075E+05, 6.303150E+05, 6.450799E+05, 6.601048E+05, 6.753923E+05, 6.909449E+05,
7.067651E+05, 7.228554E+05, 7.392182E+05, 7.558561E+05, 7.727714E+05, 7.899665E+05,
8.074439E+05, 8.252060E+05, 8.432551E+05, 8.615934E+05, 8.802235E+05, 8.991475E+05,
9.183677E+05, 9.378864E+05, 9.577058E+05, 9.778282E+05, 9.982556E+05, 1.018990E+06,
1.040034E+06, 1.061390E+06, 1.083059E+06, 1.105044E+06, 1.127346E+06, 1.149969E+06,
1.172913E+06, 1.196180E+06, 1.219773E+06, 1.243694E+06, 1.267944E+06, 1.292525E+06,
1.317439E+06, 1.342688E+06, 1.368273E+06, 1.394197E+06, 1.420461E+06, 1.447066E+06,
1.474015E+06, 1.501309E+06, 1.528949E+06, 1.556938E+06, 1.585276E+06, 1.613965E+06,
1.643007E+06, 1.672403E+06, 1.702154E+06, 1.732262E+06, 1.762729E+06, 1.793555E+06,
1.824742E+06, 1.856291E+06, 1.888203E+06, 1.920480E+06, 1.953123E+06, 1.986132E+06,
2.019510E+06, 2.053256E+06, 2.087373E+06, 2.121861E+06, 2.156721E+06, 2.191954E+06,
2.227562E+06, 2.263544E+06, 2.299902E+06, 2.336637E+06, 2.373749E+06, 2.411240E+06,
2.449110E+06, 2.487359E+06, 2.525989E+06, 2.565001E+06, 2.604394E+06, 2.644169E+06,
2.684328E+06, 2.724871E+06, 2.765797E+06, 2.807108E+06, 2.848805E+06, 2.890886E+06,
2.933354E+06, 2.976209E+06, 3.019450E+06, 3.063078E+06, 3.107094E+06, 3.151498E+06,
3.196289E+06, 3.241469E+06, 3.287037E+06, 3.332994E+06, 3.379340E+06, 3.426074E+06,
3.473198E+06, 3.520711E+06, 3.568613E+06, 3.616905E+06, 3.665585E+06, 3.714655E+06,
3.764114E+06, 3.813962E+06, 3.864200E+06, 3.914826E+06, 3.965841E+06, 4.017245E+06,
4.069037E+06, 4.121218E+06, 4.173787E+06, 4.226743E+06, 4.280088E+06, 4.333819E+06,
4.387938E+06, 4.442443E+06, 4.497335E+06, 4.552613E+06, 4.608276E+06, 4.664325E+06,
4.720758E+06, 4.777576E+06, 4.834778E+06, 4.892363E+06, 4.950330E+06, 5.008681E+06,
5.067413E+06, 5.126526E+06, 5.186021E+06, 5.245895E+06, 5.306149E+06, 5.366782E+06,
5.427793E+06,
])
# ---------------------- M = 12, I = 1 ---------------------------
M = 12
I = 1
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[2]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
3.573524E+01, 2.896824E+03, 8.174539E+03, 1.500830E+04, 2.310835E+04, 3.233901E+04,
4.267884E+04, 5.420733E+04, 6.708747E+04, 8.154826E+04, 9.787188E+04, 1.163871E+05,
1.374678E+05, 1.615349E+05, 1.890610E+05, 2.205764E+05, 2.566761E+05, 2.980283E+05,
3.453830E+05, 3.995815E+05, 4.615675E+05, 5.323980E+05, 6.132560E+05, 7.054636E+05,
8.104973E+05, 9.300032E+05, 1.065814E+06, 1.219969E+06, 1.394732E+06, 1.592616E+06,
1.816403E+06, 2.069175E+06, 2.354336E+06, 2.675644E+06, 3.037246E+06, 3.443706E+06,
3.900048E+06, 4.411790E+06, 4.984997E+06, 5.626315E+06, 6.343026E+06, 7.143101E+06,
8.035255E+06, 9.029012E+06, 1.013476E+07, 1.136382E+07, 1.272855E+07, 1.424236E+07,
1.591986E+07, 1.777692E+07, 1.983076E+07, 2.210007E+07, 2.460508E+07, 2.736772E+07,
3.041171E+07, 3.376270E+07, 3.744839E+07, 4.149872E+07, 4.594593E+07, 5.082484E+07,
5.617291E+07, 6.203049E+07, 6.844098E+07, 7.545107E+07, 8.311086E+07, 9.147421E+07,
1.005989E+08, 1.105468E+08, 1.213843E+08, 1.331825E+08, 1.460176E+08, 1.599709E+08,
1.751294E+08, 1.915862E+08, 2.094406E+08, 2.287987E+08, 2.497734E+08, 2.724856E+08,
2.970635E+08, 3.236443E+08, 3.523735E+08, 3.834064E+08, 4.169078E+08, 4.530531E+08,
4.920286E+08, 5.340321E+08, 5.792739E+08, 6.279767E+08, 6.803770E+08, 7.367252E+08,
7.972871E+08, 8.623440E+08, 9.321937E+08, 1.007151E+09, 1.087551E+09, 1.173744E+09,
1.266104E+09, 1.365025E+09, 1.470923E+09, 1.584237E+09, 1.705430E+09, 1.834993E+09,
1.973439E+09, 2.121315E+09, 2.279192E+09, 2.447675E+09, 2.627399E+09, 2.819035E+09,
3.023286E+09, 3.240894E+09, 3.472638E+09, 3.719338E+09, 3.981857E+09, 4.261099E+09,
4.558015E+09, 4.873603E+09, 5.208913E+09, 5.565043E+09, 5.943148E+09, 6.344438E+09,
6.770183E+09, 7.221711E+09, 7.700418E+09, 8.207763E+09, 8.745275E+09, 9.314557E+09,
9.917283E+09, 1.055521E+10, 1.123017E+10, 1.194408E+10, 1.269895E+10, 1.349688E+10,
1.434006E+10, 1.523078E+10, 1.617143E+10, 1.716451E+10, 1.821263E+10, 1.931852E+10,
2.048501E+10, 2.171506E+10, 2.301179E+10, 2.437840E+10, 2.581826E+10, 2.733487E+10,
2.893189E+10, 3.061313E+10, 3.238254E+10, 3.424425E+10, 3.620258E+10, 3.826198E+10,
4.042712E+10, 4.270285E+10, 4.509420E+10, 4.760643E+10, 5.024498E+10, 5.301553E+10,
5.592397E+10, 5.897644E+10, 6.217929E+10, 6.553915E+10, 6.906289E+10, 7.275761E+10,
7.663079E+10, 8.069007E+10, 8.494343E+10, 8.939920E+10, 9.406595E+10, 9.895261E+10,
1.040684E+11, 1.094230E+11, 1.150262E+11, 1.208886E+11, 1.270205E+11, 1.334333E+11,
1.401384E+11, 1.471476E+11,
])
# ---------------------- M = 12, I = 2 ---------------------------
M = 12
I = 2
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[2]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
2.382603E+01, 1.931437E+03, 5.450319E+03, 1.000669E+04, 1.540736E+04, 2.156205E+04,
2.845720E+04, 3.614739E+04, 4.474377E+04, 5.440219E+04, 6.531440E+04, 7.770355E+04,
9.182310E+04, 1.079584E+05, 1.264298E+05, 1.475967E+05, 1.718627E+05, 1.996812E+05,
2.315613E+05, 2.680746E+05, 3.098624E+05, 3.576437E+05, 4.122233E+05, 4.745014E+05,
5.454834E+05, 6.262912E+05, 7.181744E+05, 8.225232E+05, 9.408828E+05, 1.074968E+06,
1.226680E+06, 1.398121E+06, 1.591620E+06, 1.809745E+06, 2.055330E+06, 2.331499E+06,
2.641689E+06, 2.989677E+06, 3.379614E+06, 3.816053E+06, 4.303982E+06, 4.848864E+06,
5.456671E+06, 6.133931E+06, 6.887769E+06, 7.725955E+06, 8.656957E+06, 9.689991E+06,
1.083508E+07, 1.210311E+07, 1.350592E+07, 1.505634E+07, 1.676827E+07, 1.865678E+07,
2.073817E+07, 2.303008E+07, 2.555154E+07, 2.832314E+07, 3.136707E+07, 3.470727E+07,
3.836952E+07, 4.238158E+07, 4.677333E+07, 5.157688E+07, 5.682677E+07, 6.256005E+07,
6.881650E+07, 7.563878E+07, 8.307263E+07, 9.116702E+07, 9.997437E+07, 1.095508E+08,
1.199563E+08, 1.312550E+08, 1.435154E+08, 1.568106E+08, 1.712185E+08, 1.868225E+08,
2.037110E+08, 2.219786E+08, 2.417258E+08, 2.630596E+08, 2.860939E+08, 3.109496E+08,
3.377555E+08, 3.666480E+08, 3.977722E+08, 4.312820E+08, 4.673407E+08, 5.061213E+08,
5.478072E+08, 5.925928E+08, 6.406837E+08, 6.922978E+08, 7.476655E+08, 8.070303E+08,
8.706501E+08, 9.387968E+08, 1.011758E+09, 1.089838E+09, 1.173356E+09, 1.262651E+09,
1.358080E+09, 1.460018E+09, 1.568862E+09, 1.685029E+09, 1.808960E+09, 1.941117E+09,
2.081987E+09, 2.232084E+09, 2.391947E+09, 2.562142E+09, 2.743268E+09, 2.935948E+09,
3.140842E+09, 3.358640E+09, 3.590069E+09, 3.835888E+09, 4.096897E+09, 4.373935E+09,
4.667879E+09, 4.979648E+09, 5.310211E+09, 5.660576E+09, 6.031804E+09, 6.425001E+09,
6.841331E+09, 7.282006E+09, 7.748297E+09, 8.241535E+09, 8.763108E+09, 9.314471E+09,
9.897139E+09, 1.051270E+10, 1.116282E+10, 1.184921E+10, 1.257370E+10, 1.333817E+10,
1.414458E+10, 1.499499E+10, 1.589154E+10, 1.683648E+10, 1.783212E+10, 1.888090E+10,
1.998535E+10, 2.114810E+10, 2.237192E+10, 2.365965E+10, 2.501428E+10, 2.643891E+10,
2.793676E+10, 2.951121E+10, 3.116574E+10, 3.290399E+10, 3.472976E+10, 3.664695E+10,
3.865967E+10, 4.077217E+10, 4.298886E+10, 4.531432E+10, 4.775333E+10, 5.031083E+10,
5.299197E+10, 5.580208E+10, 5.874669E+10, 6.183156E+10, 6.506265E+10, 6.844617E+10,
7.198850E+10, 7.569632E+10, 7.957652E+10, 8.363629E+10, 8.788300E+10, 9.232438E+10,
9.696836E+10, 1.018233E+11,
])
# ---------------------- M = 13, I = 1 ---------------------------
M = 13
I = 1
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[3]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
1.538496E+01, 1.602673E+01, 1.728778E+01, 2.005293E+01, 2.374009E+01, 2.800797E+01,
3.266623E+01, 3.759941E+01, 4.273326E+01, 4.801797E+01, 5.341894E+01, 5.891142E+01,
6.447729E+01, 7.010295E+01, 7.577806E+01, 8.149456E+01, 8.724613E+01, 9.302771E+01,
9.883525E+01, 1.046654E+02, 1.105156E+02, 1.163834E+02, 1.222671E+02, 1.281652E+02,
1.340764E+02, 1.399998E+02, 1.459345E+02, 1.518800E+02, 1.578359E+02, 1.638019E+02,
1.697777E+02, 1.757636E+02, 1.817594E+02, 1.877655E+02, 1.937821E+02, 1.998096E+02,
2.058484E+02, 2.118992E+02, 2.179624E+02, 2.240388E+02, 2.301290E+02, 2.362337E+02,
2.423537E+02, 2.484898E+02, 2.546428E+02, 2.608134E+02, 2.670027E+02, 2.732114E+02,
2.794403E+02, 2.856903E+02, 2.919624E+02, 2.982572E+02, 3.045757E+02, 3.109188E+02,
3.172871E+02, 3.236817E+02, 3.301032E+02, 3.365525E+02, 3.430303E+02, 3.495374E+02,
3.560745E+02, 3.626425E+02, 3.692419E+02, 3.758735E+02, 3.825380E+02, 3.892360E+02,
3.959682E+02, 4.027353E+02, 4.095377E+02, 4.163762E+02, 4.232513E+02, 4.301635E+02,
4.371135E+02, 4.441017E+02, 4.511287E+02, 4.581950E+02, 4.653011E+02, 4.724475E+02,
4.796345E+02, 4.868628E+02, 4.941326E+02, 5.014445E+02, 5.087989E+02, 5.161961E+02,
5.236366E+02, 5.311207E+02, 5.386489E+02, 5.462214E+02, 5.538387E+02, 5.615011E+02,
5.692089E+02, 5.769624E+02, 5.847620E+02, 5.926080E+02, 6.005006E+02, 6.084403E+02,
6.164272E+02, 6.244617E+02, 6.325440E+02, 6.406744E+02, 6.488532E+02, 6.570806E+02,
6.653569E+02, 6.736823E+02, 6.820571E+02, 6.904815E+02, 6.989557E+02, 7.074801E+02,
7.160547E+02, 7.246798E+02, 7.333557E+02, 7.420826E+02, 7.508606E+02, 7.596900E+02,
7.685709E+02, 7.775036E+02, 7.864883E+02, 7.955252E+02, 8.046144E+02, 8.137561E+02,
8.229506E+02, 8.321980E+02, 8.414985E+02, 8.508522E+02, 8.602594E+02, 8.697202E+02,
8.792348E+02, 8.888034E+02, 8.984261E+02, 9.081031E+02, 9.178345E+02, 9.276206E+02,
9.374615E+02, 9.473573E+02, 9.573083E+02, 9.673144E+02, 9.773761E+02, 9.874933E+02,
9.976662E+02, 1.007895E+03, 1.018180E+03, 1.028521E+03, 1.038918E+03, 1.049372E+03,
1.059883E+03, 1.070450E+03, 1.081075E+03, 1.091756E+03, 1.102495E+03, 1.113291E+03,
1.124144E+03, 1.135055E+03, 1.146024E+03, 1.157051E+03, 1.168136E+03, 1.179280E+03,
1.190482E+03, 1.201742E+03, 1.213061E+03, 1.224439E+03, 1.235875E+03, 1.247371E+03,
1.258926E+03, 1.270541E+03, 1.282215E+03, 1.293948E+03, 1.305742E+03, 1.317595E+03,
1.329509E+03, 1.341483E+03, 1.353517E+03, 1.365612E+03, 1.377767E+03, 1.389983E+03,
1.402260E+03, 1.414599E+03, 1.426998E+03, 1.439459E+03, 1.451981E+03, 1.464566E+03,
1.477211E+03, 1.489919E+03, 1.502689E+03, 1.515521E+03, 1.528416E+03, 1.541373E+03,
1.554393E+03, 1.567476E+03, 1.580621E+03, 1.593830E+03, 1.607102E+03, 1.620437E+03,
1.633836E+03, 1.647299E+03, 1.660825E+03, 1.674416E+03, 1.688070E+03, 1.701789E+03,
1.715573E+03, 1.729421E+03, 1.743334E+03, 1.757311E+03, 1.771354E+03, 1.785462E+03,
1.799635E+03, 1.813874E+03, 1.828179E+03, 1.842549E+03, 1.856985E+03, 1.871488E+03,
1.886056E+03, 1.900691E+03, 1.915393E+03, 1.930162E+03, 1.944997E+03, 1.959899E+03,
1.974869E+03, 1.989906E+03, 2.005010E+03, 2.020183E+03, 2.035423E+03, 2.050731E+03,
2.066107E+03, 2.081552E+03, 2.097065E+03, 2.112647E+03, 2.128297E+03, 2.144017E+03,
2.159805E+03, 2.175663E+03, 2.191591E+03, 2.207588E+03, 2.223654E+03, 2.239791E+03,
2.255998E+03, 2.272275E+03, 2.288623E+03, 2.305041E+03, 2.321530E+03, 2.338090E+03,
2.354721E+03, 2.371423E+03, 2.388197E+03, 2.405042E+03, 2.421959E+03, 2.438948E+03,
2.456009E+03, 2.473143E+03, 2.490349E+03, 2.507627E+03, 2.524978E+03, 2.542402E+03,
2.559899E+03, 2.577470E+03, 2.595114E+03, 2.612831E+03, 2.630622E+03, 2.648488E+03,
2.666427E+03, 2.684441E+03, 2.702529E+03, 2.720691E+03, 2.738929E+03, 2.757241E+03,
2.775629E+03, 2.794091E+03, 2.812630E+03, 2.831243E+03, 2.849933E+03, 2.868698E+03,
2.887540E+03, 2.906458E+03, 2.925453E+03, 2.944524E+03, 2.963671E+03, 2.982896E+03,
3.002198E+03, 3.021577E+03, 3.041034E+03, 3.060568E+03, 3.080180E+03, 3.099870E+03,
3.119638E+03, 3.139484E+03, 3.159409E+03, 3.179412E+03, 3.199494E+03, 3.219655E+03,
3.239895E+03, 3.260214E+03, 3.280613E+03, 3.301091E+03, 3.321649E+03, 3.342287E+03,
3.363005E+03, 3.383803E+03, 3.404681E+03, 3.425640E+03, 3.446680E+03, 3.467800E+03,
3.489001E+03, 3.510284E+03, 3.531647E+03, 3.553093E+03, 3.574619E+03, 3.596228E+03,
3.617918E+03, 3.639691E+03, 3.661545E+03, 3.683482E+03, 3.705502E+03, 3.727604E+03,
3.749788E+03, 3.772056E+03, 3.794407E+03, 3.816841E+03, 3.839358E+03, 3.861959E+03,
3.884644E+03, 3.907412E+03, 3.930264E+03, 3.953201E+03, 3.976221E+03, 3.999326E+03,
4.022515E+03, 4.045789E+03, 4.069147E+03, 4.092591E+03, 4.116119E+03, 4.139733E+03,
4.163432E+03, 4.187216E+03, 4.211086E+03, 4.235041E+03, 4.259082E+03, 4.283209E+03,
4.307422E+03, 4.331721E+03, 4.356107E+03, 4.380578E+03, 4.405137E+03, 4.429781E+03,
4.454513E+03, 4.479331E+03, 4.504237E+03, 4.529229E+03, 4.554309E+03, 4.579476E+03,
4.604730E+03, 4.630072E+03, 4.655502E+03, 4.681019E+03, 4.706624E+03, 4.732317E+03,
4.758098E+03, 4.783967E+03, 4.809924E+03, 4.835970E+03, 4.862104E+03, 4.888326E+03,
4.914637E+03, 4.941037E+03, 4.967526E+03, 4.994104E+03, 5.020770E+03, 5.047526E+03,
5.074371E+03, 5.101305E+03, 5.128328E+03, 5.155441E+03, 5.182643E+03, 5.209935E+03,
5.237316E+03, 5.264787E+03, 5.292348E+03, 5.319999E+03, 5.347739E+03, 5.375570E+03,
5.403491E+03, 5.431502E+03, 5.459603E+03, 5.487794E+03, 5.516076E+03, 5.544448E+03,
5.572910E+03, 5.601464E+03, 5.630107E+03, 5.658842E+03, 5.687667E+03, 5.716582E+03,
5.745589E+03, 5.774686E+03, 5.803874E+03, 5.833154E+03, 5.862524E+03, 5.891985E+03,
5.921537E+03, 5.951181E+03, 5.980916E+03, 6.010741E+03, 6.040658E+03, 6.070667E+03,
6.100766E+03, 6.130957E+03, 6.161240E+03, 6.191614E+03, 6.222079E+03, 6.252635E+03,
6.283284E+03, 6.314023E+03, 6.344855E+03, 6.375777E+03, 6.406792E+03, 6.437898E+03,
6.469095E+03, 6.500385E+03, 6.531765E+03, 6.563238E+03, 6.594802E+03, 6.626458E+03,
6.658206E+03, 6.690045E+03, 6.721976E+03, 6.753999E+03, 6.786113E+03, 6.818319E+03,
6.850617E+03, 6.883007E+03, 6.915488E+03, 6.948061E+03, 6.980726E+03, 7.013482E+03,
7.046330E+03, 7.079270E+03, 7.112302E+03, 7.145425E+03, 7.178640E+03, 7.211946E+03,
7.245344E+03, 7.278834E+03, 7.312415E+03, 7.346088E+03, 7.379852E+03, 7.413708E+03,
7.447655E+03, 7.481694E+03, 7.515824E+03, 7.550045E+03, 7.584358E+03, 7.618763E+03,
7.653258E+03,
])
# ---------------------- M = 13, I = 2 ---------------------------
M = 13
I = 2
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[2]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
1.597389E+01, 1.605971E+01, 1.732986E+01, 2.012387E+01, 2.384501E+01, 2.814934E+01,
3.284552E+01, 3.781754E+01, 4.299087E+01, 4.831550E+01, 5.375673E+01, 5.928972E+01,
6.489630E+01, 7.056284E+01, 7.627897E+01, 8.203662E+01, 8.783033E+01, 9.365333E+01,
9.950433E+01, 1.053783E+02, 1.112735E+02, 1.171880E+02, 1.231211E+02, 1.290729E+02,
1.350413E+02, 1.410255E+02, 1.470291E+02, 1.530492E+02, 1.590887E+02, 1.651462E+02,
1.712253E+02, 1.773248E+02, 1.834470E+02, 1.895945E+02, 1.957642E+02, 2.019629E+02,
2.081875E+02, 2.144411E+02, 2.207268E+02, 2.270436E+02, 2.333928E+02, 2.397777E+02,
2.461972E+02, 2.526550E+02, 2.591499E+02, 2.656857E+02, 2.722610E+02, 2.788770E+02,
2.855378E+02, 2.922419E+02, 2.989905E+02, 3.057878E+02, 3.126291E+02, 3.195186E+02,
3.264576E+02, 3.334441E+02, 3.404823E+02, 3.475737E+02, 3.547126E+02, 3.619069E+02,
3.691508E+02, 3.764523E+02, 3.838055E+02, 3.912149E+02, 3.986779E+02, 4.061993E+02,
4.137762E+02, 4.214096E+02, 4.290963E+02, 4.368453E+02, 4.446536E+02, 4.525179E+02,
4.604431E+02, 4.684258E+02, 4.764670E+02, 4.845716E+02, 4.927317E+02, 5.009570E+02,
5.092437E+02, 5.175879E+02, 5.259950E+02, 5.344656E+02, 5.429958E+02, 5.515910E+02,
5.602469E+02, 5.689641E+02, 5.777484E+02, 5.865952E+02, 5.955001E+02, 6.044738E+02,
6.135065E+02, 6.226095E+02, 6.317725E+02, 6.410014E+02, 6.502913E+02, 6.596482E+02,
6.690726E+02, 6.785594E+02, 6.881090E+02, 6.977277E+02, 7.074100E+02, 7.171565E+02,
7.269735E+02, 7.368494E+02, 7.467968E+02, 7.568101E+02, 7.668831E+02, 7.770291E+02,
7.872422E+02, 7.975161E+02, 8.078577E+02, 8.182674E+02, 8.287457E+02, | |
<filename>wrapper/source_hosts.py
""" Source hosts
Some migration sources require significant setup before they can export block
devices with nbdkit. This module holds the code used to set up nbdkit exports
on such sources.
For example, the current OpenStack export strategy is to shut down the source
VM, attach its volumes to a source conversion host, and export the volumes from
inside the conversion host via nbdkit. The OpenStackSourceHost class will take
care of the process up to this point, and some other class in the regular hosts
module will take care of copying the data from those exports to their final
migration destination. There is an exception for KVM-to-KVM migrations though,
because those aren't supposed to use virt-v2v - in this case, this module will
transfer the data itself instead of calling the main wrapper function.
"""
import errno
import fcntl
import json
import logging
import os
import subprocess
import time
from collections import namedtuple
from .common import RUN_DIR, LOG_DIR, VDDK_LIBDIR, disable_interrupt
from .hosts import OpenstackHost
from .state import STATE, Disk
from .pre_copy import PreCopy
NBD_READY_SENTINEL = 'nbdready' # Created when nbdkit exports are ready
DEFAULT_TIMEOUT = 600 # Maximum wait for openstacksdk operations
# Lock to serialize volume attachments. This helps prevent device path
# mismatches between the OpenStack SDK and /dev in the VM.
ATTACH_LOCK_FILE_SOURCE = '/var/lock/v2v-source-volume-lock'
ATTACH_LOCK_FILE_DESTINATION = '/var/lock/v2v-destination-volume-lock'
# Local directory to copy logs from source conversion host
SOURCE_LOGS_DIR = '/data/source_logs'
def detect_source_host(data, agent_sock):
""" Create the right source host object based on the input data. """
if 'osp_source_environment' in data:
return OpenStackSourceHost(data, agent_sock)
return None
def avoid_wrapper(source_host, host):
"""
Check if this combination of source and destination host should avoid
running virt-v2v.
"""
return source_host and source_host.avoid_wrapper(host)
def migrate_instance(source_host, host):
""" Run all the pieces of a source_host migration. """
if source_host:
try:
source_host.prepare_exports()
source_host.transfer_exports(host)
source_host.close_exports()
except RuntimeError:
logging.error('Got error migrating instance, attempting cleanup.')
source_host.close_exports()
raise
else:
logging.info('Ignoring migration request for empty source_host.')
def _use_lock(lock_file):
""" Boilerplate for functions that need to take a lock. """
def _decorate_lock(function):
def wait_for_lock(self):
with open(lock_file, 'wb+') as lock:
for second in range(DEFAULT_TIMEOUT):
try:
logging.info('Waiting for lock %s...', lock_file)
fcntl.flock(lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except OSError:
logging.info('Another conversion has the lock.')
time.sleep(1)
else:
raise RuntimeError(
'Unable to acquire lock {}!'.format(lock_file))
try:
function(self)
finally:
fcntl.flock(lock, fcntl.LOCK_UN)
return wait_for_lock
return _decorate_lock
class _BaseSourceHost(object):
""" Interface for source hosts. """
def prepare_exports(self):
""" Creates the nbdkit exports from the migration source. """
logging.info('No preparation needed for this migration source.')
def close_exports(self):
""" Stops the nbdkit exports on the migration source. """
logging.info('No cleanup needed for this migration source.')
def transfer_exports(self, host):
""" Performs a data copy to a destination host. """
logging.info('No transfer ability for this migration source.')
def avoid_wrapper(self, host):
""" Decide whether or not to avoid running virt-v2v. """
logging.info('No reason to avoid virt-v2v from this migration source.')
return True
VolumeMapping = namedtuple('VolumeMapping', [
'source_dev', # Device path (like /dev/vdb) on source conversion host
'source_id', # Volume ID on source conversion host
'dest_dev', # Device path on destination conversion host
'dest_id', # Volume ID on destination conversion host
'snap_id', # Root volumes need snapshot+new volume
'image_id', # Direct-from-image VMs create temporary snapshot image
'name', # Save volume name to set on destination
'size', # Volume size reported by OpenStack, in GB
'url', # Final NBD export address from source conversion host
'state' # STATE.Disk object for tracking progress
])
class OpenStackSourceHost(_BaseSourceHost):
""" Export volumes from an OpenStack instance. """
def __init__(self, data, agent_sock):
try:
import openstack
except ImportError as e:
raise RuntimeError('OpenStack SDK is not installed on this '
'conversion host!') from e
# Create a connection to the source cloud
osp_env = data['osp_source_environment']
osp_args = {arg[3:].lower(): osp_env[arg] for arg in osp_env}
osp_args['verify'] = not data.get('insecure_connection', False)
self.source_converter = data['osp_source_conversion_vm_id']
self.source_instance = data['osp_source_vm_id']
self.conn = openstack.connect(**osp_args)
# Create a connection to the destination cloud
osp_env = data['osp_environment']
osp_args = {arg[3:].lower(): osp_env[arg] for arg in osp_env}
osp_args['verify'] = not data.get('insecure_connection', False)
self.dest_converter = data['osp_server_id']
self.dest_conn = openstack.connect(**osp_args)
self.agent_sock = agent_sock
openstack.enable_logging(debug=False, http_debug=False, stream=None)
if self._converter() is None:
raise RuntimeError('Cannot find source instance {}'.format(
self.source_converter))
if self._destination() is None:
raise RuntimeError('Cannot find destination instance {}'.format(
self.dest_converter))
# Build up a list of VolumeMappings keyed by the original device path
self.volume_map = {}
# Temporary directory for logs on source conversion host
self.tmpdir = None
# SSH tunnel process
self.forwarding_process = None
# If there is a specific list of disks to transfer, remember them so
# only those disks get transferred.
self.source_disks = None
if 'source_disks' in data:
self.source_disks = data['source_disks']
# Allow UCI container ID (or name) to be passed in input JSON
self.uci_container = data.get('uci_container_image',
'v2v-conversion-host')
def prepare_exports(self):
""" Attach the source VM's volumes to the source conversion host. """
self._test_ssh_connection()
self._test_source_vm_shutdown()
self._get_root_and_data_volumes()
self._detach_data_volumes_from_source()
self._attach_volumes_to_converter()
self._export_volumes_from_converter()
@disable_interrupt
def close_exports(self):
""" Put the source VM's volumes back where they were. """
self._converter_close_exports()
self._detach_volumes_from_converter()
self._attach_data_volumes_to_source()
def transfer_exports(self, host):
self._create_destination_volumes()
self._attach_destination_volumes()
self._convert_destination_volumes()
self._detach_destination_volumes()
def avoid_wrapper(self, host):
""" Assume OpenStack to OpenStack migrations are always KVM to KVM. """
if isinstance(host, OpenstackHost):
logging.info('OpenStack->OpenStack migration, skipping virt-v2v.')
return True
return False
def _source_vm(self):
"""
Changes to the VM returned by get_server_by_id are not necessarily
reflected in existing objects, so just get a new one every time.
"""
return self.conn.get_server_by_id(self.source_instance)
def _converter(self):
""" Same idea as _source_vm, for source conversion host. """
return self.conn.get_server_by_id(self.source_converter)
def _destination(self):
""" Same idea as _source_vm, for destination conversion host. """
return self.dest_conn.get_server_by_id(self.dest_converter)
def _ssh_args(self):
""" Provide default set of SSH options. """
return [
'-o', 'BatchMode=yes',
'-o', 'StrictHostKeyChecking=no',
'-o', 'ConnectTimeout=10',
]
def _ssh_cmd(self, address, args):
""" Build an SSH command and environment using the running agent. """
environment = os.environ.copy()
environment['SSH_AUTH_SOCK'] = self.agent_sock
command = ['ssh']
command.extend(self._ssh_args())
command.extend(['cloud-user@'+address])
command.extend(args)
return command, environment
def _destination_out(self, args):
""" Run a command on the dest conversion host and get the output. """
address = self._destination().accessIPv4
command, environment = self._ssh_cmd(address, args)
output = subprocess.check_output(command, env=environment)
return output.decode('utf-8').strip()
def _converter_out(self, args):
""" Run a command on the source conversion host and get the output. """
address = self._converter().accessIPv4
command, environment = self._ssh_cmd(address, args)
output = subprocess.check_output(command, env=environment)
return output.decode('utf-8').strip()
def _converter_val(self, args):
""" Run a command on the source conversion host and get return code """
address = self._converter().accessIPv4
command, environment = self._ssh_cmd(address, args)
return subprocess.call(command, env=environment)
def _converter_sub(self, args):
""" Run a long-running command on the source conversion host. """
address = self._converter().accessIPv4
command, environment = self._ssh_cmd(address, args)
return subprocess.Popen(command, env=environment)
def _converter_scp(self, source, dest):
""" Copy a file to the source conversion host. """
environment = os.environ.copy()
environment['SSH_AUTH_SOCK'] = self.agent_sock
address = self._converter().accessIPv4
command = ['scp']
command.extend(self._ssh_args())
command.extend([source, 'cloud-user@'+address+':'+dest])
return subprocess.call(command, env=environment)
def _converter_scp_from(self, source, dest, recursive=False):
""" Copy a file from the source conversion host. """
environment = os.environ.copy()
environment['SSH_AUTH_SOCK'] = self.agent_sock
address = self._converter().accessIPv4
command = ['scp']
command.extend(self._ssh_args())
if recursive:
command.extend(['-r'])
command.extend(['cloud-user@'+address+':'+source, dest])
return subprocess.call(command, env=environment)
def _test_ssh_connection(self):
""" Quick SSH connectivity check for source conversion host. """
out = self._converter_out(['echo connected'])
if out != 'connected':
raise RuntimeError('Unable to SSH to source conversion host!')
def _test_source_vm_shutdown(self):
""" Make sure the source VM is shutdown, and fail if it isn't. """
server = self.conn.compute.get_server(self._source_vm().id)
if server.status != 'SHUTOFF':
raise RuntimeError('Source VM is not shut down!')
def _get_attachment(self, volume, vm):
"""
Get the attachment object from the volume with the matching server ID.
Convenience method for use only when the attachment is already certain.
"""
for attachment in volume.attachments:
if attachment.server_id == vm.id:
return attachment
raise RuntimeError('Volume is not attached to the specified instance!')
def _get_root_and_data_volumes(self):
"""
Volume mapping step one: get the IDs and sizes of all volumes on the
source VM. Key off the original device path to eventually preserve this
order on the destination.
"""
sourcevm = self._source_vm()
for server_volume in sourcevm.volumes:
volume = self.conn.get_volume_by_id(server_volume['id'])
logging.info('Inspecting volume: %s', volume.id)
if self.source_disks and volume.id not in self.source_disks:
logging.info('Volume is not in specified disk list, ignoring.')
continue
dev_path = self._get_attachment(volume, sourcevm).device
disk = Disk(dev_path, 0)
self.volume_map[dev_path] = VolumeMapping(
source_dev=None, source_id=volume.id, dest_dev=None,
dest_id=None, snap_id=None, image_id=None, name=volume.name,
size=volume.size, url=None, state=disk)
STATE.disks.append(disk)
logging.debug('STATE.disks is now %s', STATE.disks)
| |
Represents a match between a query and a saved artifact. `level` is a string
describing the match level, ranging from "functional" to "exact".
"""
metadata_url = attr.ib()
level = attr.ib()
# TODO Should we merge this with InventoryEntry?
@attr.s(frozen=True)
class ExternalCacheItem:
"""
Represents an inventory entry, but contains data intended to be exposed to users
via the Cache class.
"""
inventory = attr.ib()
abs_artifact_url = attr.ib()
abs_metadata_url = attr.ib()
descriptor = attr.ib()
class Inventory:
"""
Maintains a persistent mapping from Queries to artifact URLs. An Inventory
is backed by a "file system", which could correspond to either a local disk
or a cloud storage service. This file system is used to store
metadata records, each of which describes a Query and an artifact URL that
satisfies it. Metadata records are stored using a hierarchical naming
scheme whose levels correspond to the different levels of Provenance
matching.
"""
def __init__(self, name, tier, filesystem):
self.name = name
self.tier = tier
self._fs = filesystem
self.root_url = filesystem.root_url
def register_url(self, query, url, value_hash):
"""
Records metadata indicating that the provided Query is satisfied
by the provided URL, and returns a corresponding InventoryEntry.
"""
logger.debug(
"In %s inventory for %r, saving artifact URL %s ...",
self.tier,
query,
url,
)
expected_metadata_url = self._exact_metadata_url_for_query(query)
metadata_record = None
if self._fs.exists(expected_metadata_url):
# This shouldn't happen, because the CacheAccessor shouldn't write
# to this inventory if we already have an exact match.
logger.warn(
"In %s cache, attempted to create duplicate entry mapping %r " "to %s",
self.tier,
query,
url,
)
metadata_record = self._load_metadata_if_valid_else_delete(
expected_metadata_url,
)
if metadata_record is None:
metadata_url, metadata_record = self._create_and_write_metadata(
query, url, value_hash,
)
assert metadata_url == expected_metadata_url
logger.debug(
"... in %s inventory for %r, created metadata record at %s",
self.tier,
query,
metadata_url,
)
return InventoryEntry(
tier=self.tier,
has_artifact=True,
artifact_url=url,
provenance=metadata_record.provenance,
exactly_matches_query=True,
value_hash=metadata_record.value_hash,
)
def find_entry(self, query):
"""
Returns an InventoryEntry describing the closest match to the provided
Query.
"""
logger.debug("In %s inventory for %r, searching ...", self.tier, query)
n_prior_attempts = 0
while True:
if n_prior_attempts in (10, 100, 1000, 10000, 100000, 1000000):
message = f"""
While searching in the {self.tier} cache for an entry matching
{query!r}, found {n_prior_attempts} invalid metadata files;
either a lot of artifact files were manually deleted,
or there's a bug in the cache code
"""
if n_prior_attempts == 1000000:
raise AssertionError("Giving up: " + oneline(message))
else:
logger.warn(oneline(message))
n_prior_attempts += 1
match = self._find_best_match(query)
if not match:
logger.debug(
"... in %s inventory for %r, found no match", self.tier, query
)
return InventoryEntry(
tier=self.tier,
has_artifact=False,
artifact_url=None,
provenance=None,
exactly_matches_query=False,
value_hash=None,
)
metadata_record = self._load_metadata_if_valid_else_delete(
match.metadata_url
)
if metadata_record is None:
continue
logger.debug(
"... in %s inventory for %r, found %s match at %s",
self.tier,
query,
match.level,
match.metadata_url,
)
return InventoryEntry(
tier=self.tier,
has_artifact=True,
artifact_url=metadata_record.artifact_url,
provenance=metadata_record.provenance,
exactly_matches_query=(match.level == "exact"),
value_hash=metadata_record.value_hash,
)
def list_items(self):
metadata_urls = [
url for url in self._fs.search(self.root_url) if url.endswith(".yaml")
]
for metadata_url in metadata_urls:
metadata_record = self._load_metadata_if_valid_else_delete(metadata_url)
if metadata_record is None:
continue
artifact_url = metadata_record.artifact_url
yield ExternalCacheItem(
inventory=self,
abs_artifact_url=derelativize_url(artifact_url, metadata_url),
abs_metadata_url=metadata_url,
descriptor=metadata_record.descriptor,
)
def delete_url(self, url):
return self._fs.delete(url)
def _find_best_match(self, query):
equivalent_url_prefix = self._equivalent_metadata_url_prefix_for_query(query)
possible_urls = self._fs.search(equivalent_url_prefix)
equivalent_urls = [url for url in possible_urls if url.endswith(".yaml")]
if len(equivalent_urls) == 0:
return None
exact_url = self._exact_metadata_url_for_query(query)
if exact_url in equivalent_urls:
return MetadataMatch(metadata_url=exact_url, level="exact",)
samecode_url_prefix = self._samecode_metadata_url_prefix_for_query(query)
samecode_urls = [
url for url in equivalent_urls if url.startswith(samecode_url_prefix)
]
if len(samecode_urls) > 0:
return MetadataMatch(metadata_url=samecode_urls[0], level="samecode",)
nominal_url_prefix = self._nominal_metadata_url_prefix_for_query(query)
nominal_urls = [
url for url in equivalent_urls if url.startswith(nominal_url_prefix)
]
if len(nominal_urls) > 0:
return MetadataMatch(metadata_url=nominal_urls[0], level="nominal",)
return MetadataMatch(metadata_url=equivalent_urls[0], level="equivalent",)
def _equivalent_metadata_url_prefix_for_query(self, query):
return (
self._fs.root_url
+ "/"
+ valid_filename_from_query(query)
+ "/"
+ query.provenance.functional_hash
)
def _nominal_metadata_url_prefix_for_query(self, query):
minor_version_token = tokenize(query.provenance.code_version_minor)
return (
self._equivalent_metadata_url_prefix_for_query(query)
+ "/"
+ "mv_"
+ minor_version_token
)
def _samecode_metadata_url_prefix_for_query(self, query):
return (
self._nominal_metadata_url_prefix_for_query(query)
+ "/"
+ "bc_"
+ query.provenance.bytecode_hash
)
def _exact_metadata_url_for_query(self, query):
filename = f"metadata_{query.provenance.exact_hash}.yaml"
return self._nominal_metadata_url_prefix_for_query(query) + "/" + filename
def _load_metadata_if_valid_else_delete(self, url):
try:
metadata_yaml = self._fs.read_bytes(url).decode("utf8")
metadata_record = ArtifactMetadataRecord.from_yaml(metadata_yaml, url)
except Exception as e:
raise InternalCacheStateError.from_failure("metadata record", url, e)
if not self._fs.exists(metadata_record.artifact_url):
logger.info(
"Found invalid metadata record at %s, "
"referring to nonexistent artifact at %s; "
"deleting metadata record",
url,
metadata_record.artifact_url,
)
self.delete_url(url)
return None
else:
return metadata_record
def _create_and_write_metadata(self, query, artifact_url, value_hash):
metadata_url = self._exact_metadata_url_for_query(query)
metadata_record = ArtifactMetadataRecord.from_content(
dnode=query.dnode,
artifact_url=artifact_url,
provenance=query.provenance,
metadata_url=metadata_url,
value_hash=value_hash,
)
self._fs.write_bytes(metadata_record.to_yaml().encode("utf8"), metadata_url)
return metadata_url, metadata_record
class LocalStore:
"""
Represents the local disk cache. Provides both an Inventory that manages
artifact (file) URLs, and a method to generate those URLs (for creating
new files).
"""
def __init__(self, root_path_str):
root_path = Path(root_path_str).absolute()
self._artifact_root_path = root_path / "artifacts"
inventory_root_path = root_path / "inventory"
tmp_root_path = root_path / "tmp"
self.inventory = Inventory(
"local disk", "local", LocalFilesystem(inventory_root_path, tmp_root_path)
)
def generate_unique_dir_path(self, query):
n_attempts = 0
while True:
# TODO This path can be anything as long as it's unique, so we
# could make it more human-readable.
path = (
self._artifact_root_path
/ valid_filename_from_query(query)
/ str(uuid4())
)
if not path.exists():
return path
else:
n_attempts += 1
if n_attempts > 3:
raise AssertionError(
oneline(
f"""
Repeatedly failed to randomly generate a novel
directory name; {path} already exists"""
)
)
class GcsCloudStore:
"""
Represents the GCS cloud cache. Provides both an Inventory that manages
artifact (blob) URLs, and a method to generate those URLs (for creating
those blobs).
"""
def __init__(self, url):
self._tool = GcsTool(url)
self.inventory = Inventory(
"GCS", "cloud", GcsFilesystem(self._tool, "/inventory")
)
self._artifact_root_url_prefix = url + "/artifacts"
def generate_unique_url_prefix(self, query):
n_attempts = 0
while True:
# TODO This path can be anything as long as it's unique, so we
# could make it more human-readable.
url_prefix = "/".join(
[
str(self._artifact_root_url_prefix),
valid_filename_from_query(query),
str(uuid4()),
]
)
matching_blobs = self._tool.blobs_matching_url_prefix(url_prefix)
if len(list(matching_blobs)) == 0:
return url_prefix
else:
n_attempts += 1
if n_attempts > 3:
raise AssertionError(
oneline(
f"""
Repeatedly failed to randomly generate a novel
blob name; {self._artifact_root_url_prefix}
already exists"""
)
)
def upload(self, path, url):
# TODO For large individual files, we may still want to use gsutil.
if path.is_dir():
self._tool.gsutil_cp(str(path), url)
else:
assert path.is_file()
self._tool.blob_from_url(url).upload_from_filename(str(path))
def download(self, path, url):
blob = self._tool.blob_from_url(url)
# TODO For large individual files, we may still want to use gsutil.
if not blob.exists():
# `gsutil cp -r gs://A/B X/Y` doesn't work when B contains
# multiple files and Y doesn't exist yet. However, if B == Y, we
# can run `gsutil cp -r gs://A/B X`, which will create Y for us.
assert path.name == blob.name.rsplit("/", 1)[1]
self._tool.gsutil_cp(url, str(path.parent))
else:
blob.download_to_filename(str(path))
class FakeCloudStore(LocalStore):
"""
A mock version of the GcsCloudStore that's actually backed by local files.
Useful for running tests without setting up a GCS connection, which is
slow and requires some configuration.
"""
def __init__(self, root_path_str):
super(FakeCloudStore, self).__init__(root_path_str)
def generate_unique_url_prefix(self, query):
return url_from_path(self.generate_unique_dir_path(query))
def upload(self, path, url):
src_path = path
dst_path = path_from_url(url)
recursively_copy_path(src_path, dst_path)
def download(self, path, url):
src_path = path_from_url(url)
dst_path = path
recursively_copy_path(src_path, dst_path)
class LocalFilesystem:
"""
Implements a generic "FileSystem" interface for reading/writing small files
to local disk.
"""
def __init__(self, inventory_dir, tmp_dir):
self.root_url = url_from_path(inventory_dir)
self.tmp_root_path = tmp_dir
def exists(self, url):
return path_from_url(url).exists()
def search(self, url_prefix):
path_prefix = path_from_url(url_prefix)
if not path_prefix.is_dir():
return []
return [
url_from_path(path_prefix / sub_path)
for sub_path in path_prefix.glob("**/*")
]
def delete(self, url):
path = path_from_url(url)
if not path.exists():
return False
path.unlink()
return True
def write_bytes(self, content_bytes, url):
path = path_from_url(url)
ensure_parent_dir_exists(path)
ensure_dir_exists(self.tmp_root_path)
working_dir = Path(tempfile.mkdtemp(dir=str(self.tmp_root_path)))
try:
working_path = working_dir / "tmp_file"
working_path.write_bytes(content_bytes)
working_path.rename(path)
finally:
shutil.rmtree(str(working_dir))
def read_bytes(self, url):
return path_from_url(url).read_bytes()
class GcsFilesystem:
"""
Implements a generic "FileSystem" interface for reading/writing small files
to GCS.
"""
def __init__(self, gcs_tool, object_prefix_extension):
self._tool = gcs_tool
self.root_url = self._tool.url + object_prefix_extension
def exists(self, url):
# Checking for "existence" on GCS is slightly complicated. If the URL in
# question corresponds to a single file, we should find an object with a
# matching name. If it corresponds to directory of files, we should find one or
# more objects with a matching prefix (the expected name followed by a | |
import collections
import os
import tempfile
import typing
import audbackend
import audeer
import audformat
import audiofile
from audb.core import define
from audb.core.api import dependencies
from audb.core.dependencies import Dependencies
from audb.core.repository import Repository
def _check_for_duplicates(
db: audformat.Database,
num_workers: int,
verbose: bool,
):
r"""Ensures tables do not contain duplicated index entries."""
def job(table):
audformat.assert_no_duplicates(table._df)
tables = db.tables.values()
audeer.run_tasks(
job,
params=[([table], {}) for table in tables],
num_workers=num_workers,
progress_bar=verbose,
task_description='Check tables for duplicates',
)
def _find_tables(
db: audformat.Database,
db_root: str,
version: str,
deps: Dependencies,
verbose: bool,
) -> typing.List[str]:
r"""Update tables."""
# release dependencies to removed tables
db_tables = [f'db.{table}.csv' for table in db.tables]
for file in set(deps.tables) - set(db_tables):
deps._drop(file)
tables = []
for table in audeer.progress_bar(
db.tables,
desc='Find tables',
disable=not verbose,
):
file = f'db.{table}.csv'
checksum = audbackend.md5(os.path.join(db_root, file))
if file not in deps or checksum != deps.checksum(file):
deps._add_meta(file, version, table, checksum)
tables.append(table)
return tables
def _find_media(
db: audformat.Database,
db_root: str,
version: str,
deps: Dependencies,
archives: typing.Mapping[str, str],
num_workers: int,
verbose: bool,
) -> typing.Set[str]:
# release dependencies to removed media
# and select according archives for upload
media = set()
db_media = db.files
for file in set(deps.media) - set(db_media):
media.add(deps.archive(file))
deps._drop(file)
# update version of altered media and insert new ones
def job(file):
path = os.path.join(db_root, file)
if file not in deps:
checksum = audbackend.md5(path)
if file in archives:
archive = archives[file]
else:
archive = audeer.uid(from_string=file.replace('\\', '/'))
values = _media_values(
db_root,
file,
version,
archive,
checksum,
)
add_media.append(values)
elif not deps.removed(file):
checksum = audbackend.md5(path)
if checksum != deps.checksum(file):
archive = deps.archive(file)
values = _media_values(
db_root,
file,
version,
archive,
checksum,
)
update_media.append(values)
add_media = []
update_media = []
audeer.run_tasks(
job,
params=[([file], {}) for file in db_media],
num_workers=num_workers,
progress_bar=verbose,
task_description='Find media',
)
if update_media:
deps._update_media(update_media)
if add_media:
deps._add_media(add_media)
return media
def _media_values(
root: str,
file: str,
version: str,
archive: str,
checksum: str,
) -> typing.Tuple[str, str, int, int, str, float, str, int, float, int, str]:
r"""Return values of a media entry in dependencies."""
format = audeer.file_extension(file).lower()
try:
path = os.path.join(root, file)
bit_depth = audiofile.bit_depth(path)
if bit_depth is None: # pragma: nocover (non SND files)
bit_depth = 0
channels = audiofile.channels(path)
duration = audiofile.duration(path, sloppy=True)
sampling_rate = audiofile.sampling_rate(path)
except FileNotFoundError: # pragma: nocover
# If sox or mediafile are not installed
# we get a FileNotFoundError error
raise RuntimeError(
f"sox and mediainfo have to be installed "
f"to publish '{format}' media files."
)
return (
file,
archive,
bit_depth,
channels,
checksum,
duration,
format,
0, # removed
sampling_rate,
define.DependType.MEDIA,
version,
)
def _put_media(
media: typing.Set[str],
db_root: str,
db_name: str,
version: str,
deps: Dependencies,
backend: audbackend.Backend,
num_workers: typing.Optional[int],
verbose: bool,
):
# create a mapping from archives to media and
# select archives with new or altered files for upload
map_media_to_files = collections.defaultdict(list)
for file in deps.media:
if not deps.removed(file):
map_media_to_files[deps.archive(file)].append(file)
if deps.version(file) == version:
media.add(deps.archive(file))
# upload new and altered archives if it contains at least one file
if media:
def job(archive):
if archive in map_media_to_files:
for file in map_media_to_files[archive]:
update_media.append(file)
archive_file = backend.join(
db_name,
define.DEPEND_TYPE_NAMES[define.DependType.MEDIA],
archive,
)
backend.put_archive(
db_root,
map_media_to_files[archive],
archive_file,
version,
)
update_media = []
audeer.run_tasks(
job,
params=[([archive], {}) for archive in media],
num_workers=num_workers,
progress_bar=verbose,
task_description='Put media',
)
deps._update_media_version(update_media, version)
def _put_tables(
tables: typing.List[str],
db_root: str,
db_name: str,
version: str,
backend: audbackend.Backend,
num_workers: typing.Optional[int],
verbose: bool,
):
def job(table: str):
file = f'db.{table}.csv'
archive_file = backend.join(
db_name,
define.DEPEND_TYPE_NAMES[define.DependType.META],
table,
)
backend.put_archive(db_root, file, archive_file, version)
audeer.run_tasks(
job,
params=[([table], {}) for table in tables],
num_workers=num_workers,
progress_bar=verbose,
task_description='Put tables',
)
def publish(
db_root: str,
version: str,
repository: Repository,
*,
archives: typing.Mapping[str, str] = None,
previous_version: typing.Optional[str] = 'latest',
cache_root: str = None,
num_workers: typing.Optional[int] = 1,
verbose: bool = True,
) -> Dependencies:
r"""Publish database.
A database can have dependencies
to files of an older version of itself.
E.g. you might add a few new files to an existing database
and publish as a new version.
:func:`audb.publish` will upload then only the new files
and store dependencies on the already published files.
To allow for dependencies
you first have to load the version of the database
that the new version should depend on
with :func:`audb.load_to` to ``db_root``.
Afterwards you make your changes to that folder
and run :func:`audb.publish`.
Setting ``previous_version=None`` allows you
to start from scratch and upload all files
even if an older versions exist.
In this case you don't call :func:`audb.load_to`
before running :func:`audb.publish`.
Args:
db_root: root directory of database
version: version string
repository: name of repository
archives: dictionary mapping files to archive names.
Can be used to bundle files into archives,
which will speed up communication with the server
if the database contains many small files.
Archive name must not include an extension
previous_version: specifies the version
this publication should be based on.
If ``'latest'``
it will use automatically the latest published version
or ``None``
if no version was published.
If ``None`` it assumes you start from scratch
cache_root: cache folder where databases are stored.
If not set :meth:`audb.default_cache_root` is used.
Only used to read the dependencies of the previous version
num_workers: number of parallel jobs or 1 for sequential
processing. If ``None`` will be set to the number of
processors on the machine multiplied by 5
verbose: show debug messages
Returns:
dependency object
Raises:
RuntimeError: if version already exists
RuntimeError: if database tables reference non-existing files
RuntimeError: if database in ``db_root`` depends on other version
as indicated by ``previous_version``
RuntimeError: if database is not portable,
see :meth:`audformat.Database.is_portable`
RuntimeError: if non-standard formats like MP3 and MP4 are published,
but sox and/or mediafile is not installed
"""
db = audformat.Database.load(db_root, load_data=False)
backend = audbackend.create(
repository.backend,
repository.host,
repository.name,
)
remote_header = backend.join(db.name, define.HEADER_FILE)
versions = backend.versions(remote_header)
if version in versions:
raise RuntimeError(
'A version '
f"'{version}' "
'already exists for database '
f"'{db.name}'."
)
if previous_version == 'latest':
if len(versions) > 0:
previous_version = versions[-1]
else:
previous_version = None
# load database and dependencies
deps_path = os.path.join(db_root, define.DEPENDENCIES_FILE)
deps = Dependencies()
if os.path.exists(deps_path):
deps.load(deps_path)
# check if database folder depends on the right version
# dependencies shouldn't be there
if previous_version is None and len(deps) > 0:
raise RuntimeError(
f"You did not set a dependency to a previous version, "
f"but you have a '{define.DEPENDENCIES_FILE}' file present "
f"in {db_root}."
)
# dependencies missing
if previous_version is not None and len(deps) == 0:
raise RuntimeError(
f"You want to depend on '{previous_version}' "
f"of {db.name}, "
f"but you don't have a '{define.DEPENDENCIES_FILE}' file present "
f"in {db_root}. "
f"Did you forgot to call "
f"'audb.load_to({db_root}, {db.name}, "
f"version={previous_version}?"
)
# dependencies do not match version
if previous_version is not None and len(deps) > 0:
with tempfile.TemporaryDirectory() as tmp_dir:
previous_deps_path = os.path.join(
tmp_dir,
define.DEPENDENCIES_FILE,
)
previous_deps = dependencies(
db.name,
version=previous_version,
cache_root=cache_root,
)
previous_deps.save(previous_deps_path)
if audbackend.md5(deps_path) != audbackend.md5(previous_deps_path):
raise RuntimeError(
f"You want to depend on '{previous_version}' "
f"of {db.name}, "
f"but the MD5 sum of your "
f"'{define.DEPENDENCIES_FILE}' file "
f"in {db_root} "
f"does not match the MD5 sum of the corresponding file "
f"for the requested version in the repository. "
f"Did you forgot to call "
f"'audb.load_to({db_root}, {db.name}, "
f"version='{previous_version}') "
f"or modified the file manually?"
)
# load database from folder
db = audformat.Database.load(db_root, load_data=True)
# check all tables are conform with audformat
if not db.is_portable:
raise RuntimeError(
"Some files in the tables have absolute paths "
"or use '.' or '..' to address a folder. "
"Please replace those paths by relative paths "
"and use folder names instead of dots."
)
_check_for_duplicates(db, num_workers, verbose)
# check all files referenced in a table exists
missing_files = [
f for f in db.files
if not os.path.exists(os.path.join(db_root, f))
]
if len(missing_files) > 0:
number_of_presented_files = 20
error_msg = (
f'{len(missing_files)} files are referenced in tables '
'that cannot be found. '
f"Missing files are: '{missing_files[:number_of_presented_files]}"
)
if len(missing_files) <= number_of_presented_files:
error_msg += "'."
else:
error_msg += ", ...'."
raise RuntimeError(error_msg)
# make sure all tables are stored in CSV format
for table_id, table in db.tables.items():
table_path = os.path.join(db_root, f'db.{table_id}')
table_ext = audformat.define.TableStorageFormat.CSV
if | |
_ida_hexrays.user_iflags_find(*args)
def user_iflags_insert(*args):
"""
user_iflags_insert(map, key, val) -> user_iflags_iterator_t
Insert new ( 'citem_locator_t' , int32) pair into user_iflags_t.
@param map (C++: user_iflags_t *)
@param key (C++: const citem_locator_t &)
@param val (C++: const int32 &)
"""
return _ida_hexrays.user_iflags_insert(*args)
def user_iflags_erase(*args):
"""
user_iflags_erase(map, p)
Erase current element from user_iflags_t.
@param map (C++: user_iflags_t *)
@param p (C++: user_iflags_iterator_t)
"""
return _ida_hexrays.user_iflags_erase(*args)
def user_iflags_clear(*args):
"""
user_iflags_clear(map)
Clear user_iflags_t.
@param map (C++: user_iflags_t *)
"""
return _ida_hexrays.user_iflags_clear(*args)
def user_iflags_size(*args):
"""
user_iflags_size(map) -> size_t
Get size of user_iflags_t.
@param map (C++: user_iflags_t *)
"""
return _ida_hexrays.user_iflags_size(*args)
def user_iflags_free(*args):
"""
user_iflags_free(map)
Delete user_iflags_t instance.
@param map (C++: user_iflags_t *)
"""
return _ida_hexrays.user_iflags_free(*args)
def user_iflags_new(*args):
"""
user_iflags_new() -> user_iflags_t
Create a new user_iflags_t instance.
"""
return _ida_hexrays.user_iflags_new(*args)
class user_unions_iterator_t(object):
"""
Proxy of C++ user_unions_iterator_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
x = _swig_property(_ida_hexrays.user_unions_iterator_t_x_get, _ida_hexrays.user_unions_iterator_t_x_set)
def __eq__(self, *args):
"""
__eq__(self, p) -> bool
"""
return _ida_hexrays.user_unions_iterator_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, p) -> bool
"""
return _ida_hexrays.user_unions_iterator_t___ne__(self, *args)
def __init__(self, *args):
"""
__init__(self) -> user_unions_iterator_t
"""
this = _ida_hexrays.new_user_unions_iterator_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_user_unions_iterator_t
__del__ = lambda self : None;
user_unions_iterator_t_swigregister = _ida_hexrays.user_unions_iterator_t_swigregister
user_unions_iterator_t_swigregister(user_unions_iterator_t)
def user_unions_begin(*args):
"""
user_unions_begin(map) -> user_unions_iterator_t
Get iterator pointing to the beginning of user_unions_t.
@param map (C++: const user_unions_t *)
"""
return _ida_hexrays.user_unions_begin(*args)
def user_unions_end(*args):
"""
user_unions_end(map) -> user_unions_iterator_t
Get iterator pointing to the end of user_unions_t.
@param map (C++: const user_unions_t *)
"""
return _ida_hexrays.user_unions_end(*args)
def user_unions_next(*args):
"""
user_unions_next(p) -> user_unions_iterator_t
Move to the next element.
@param p (C++: user_unions_iterator_t)
"""
return _ida_hexrays.user_unions_next(*args)
def user_unions_prev(*args):
"""
user_unions_prev(p) -> user_unions_iterator_t
Move to the previous element.
@param p (C++: user_unions_iterator_t)
"""
return _ida_hexrays.user_unions_prev(*args)
def user_unions_first(*args):
"""
user_unions_first(p) -> ea_t const &
Get reference to the current map key.
@param p (C++: user_unions_iterator_t)
"""
return _ida_hexrays.user_unions_first(*args)
def user_unions_second(*args):
"""
user_unions_second(p) -> intvec_t
Get reference to the current map value.
@param p (C++: user_unions_iterator_t)
"""
return _ida_hexrays.user_unions_second(*args)
def user_unions_find(*args):
"""
user_unions_find(map, key) -> user_unions_iterator_t
Find the specified key in user_unions_t.
@param map (C++: const user_unions_t *)
@param key (C++: const ea_t &)
"""
return _ida_hexrays.user_unions_find(*args)
def user_unions_insert(*args):
"""
user_unions_insert(map, key, val) -> user_unions_iterator_t
Insert new (ea_t, intvec_t) pair into user_unions_t.
@param map (C++: user_unions_t *)
@param key (C++: const ea_t &)
@param val (C++: const intvec_t &)
"""
return _ida_hexrays.user_unions_insert(*args)
def user_unions_erase(*args):
"""
user_unions_erase(map, p)
Erase current element from user_unions_t.
@param map (C++: user_unions_t *)
@param p (C++: user_unions_iterator_t)
"""
return _ida_hexrays.user_unions_erase(*args)
def user_unions_clear(*args):
"""
user_unions_clear(map)
Clear user_unions_t.
@param map (C++: user_unions_t *)
"""
return _ida_hexrays.user_unions_clear(*args)
def user_unions_size(*args):
"""
user_unions_size(map) -> size_t
Get size of user_unions_t.
@param map (C++: user_unions_t *)
"""
return _ida_hexrays.user_unions_size(*args)
def user_unions_free(*args):
"""
user_unions_free(map)
Delete user_unions_t instance.
@param map (C++: user_unions_t *)
"""
return _ida_hexrays.user_unions_free(*args)
def user_unions_new(*args):
"""
user_unions_new() -> user_unions_t
Create a new user_unions_t instance.
"""
return _ida_hexrays.user_unions_new(*args)
class user_labels_iterator_t(object):
"""
Proxy of C++ user_labels_iterator_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
x = _swig_property(_ida_hexrays.user_labels_iterator_t_x_get, _ida_hexrays.user_labels_iterator_t_x_set)
def __eq__(self, *args):
"""
__eq__(self, p) -> bool
"""
return _ida_hexrays.user_labels_iterator_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, p) -> bool
"""
return _ida_hexrays.user_labels_iterator_t___ne__(self, *args)
def __init__(self, *args):
"""
__init__(self) -> user_labels_iterator_t
"""
this = _ida_hexrays.new_user_labels_iterator_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_user_labels_iterator_t
__del__ = lambda self : None;
user_labels_iterator_t_swigregister = _ida_hexrays.user_labels_iterator_t_swigregister
user_labels_iterator_t_swigregister(user_labels_iterator_t)
def user_labels_begin(*args):
"""
user_labels_begin(map) -> user_labels_iterator_t
Get iterator pointing to the beginning of user_labels_t.
@param map (C++: const user_labels_t *)
"""
return _ida_hexrays.user_labels_begin(*args)
def user_labels_end(*args):
"""
user_labels_end(map) -> user_labels_iterator_t
Get iterator pointing to the end of user_labels_t.
@param map (C++: const user_labels_t *)
"""
return _ida_hexrays.user_labels_end(*args)
def user_labels_next(*args):
"""
user_labels_next(p) -> user_labels_iterator_t
Move to the next element.
@param p (C++: user_labels_iterator_t)
"""
return _ida_hexrays.user_labels_next(*args)
def user_labels_prev(*args):
"""
user_labels_prev(p) -> user_labels_iterator_t
Move to the previous element.
@param p (C++: user_labels_iterator_t)
"""
return _ida_hexrays.user_labels_prev(*args)
def user_labels_first(*args):
"""
user_labels_first(p) -> int const &
Get reference to the current map key.
@param p (C++: user_labels_iterator_t)
"""
return _ida_hexrays.user_labels_first(*args)
def user_labels_second(*args):
"""
user_labels_second(p) -> qstring &
Get reference to the current map value.
@param p (C++: user_labels_iterator_t)
"""
return _ida_hexrays.user_labels_second(*args)
def user_labels_find(*args):
"""
user_labels_find(map, key) -> user_labels_iterator_t
Find the specified key in user_labels_t.
@param map (C++: const user_labels_t *)
@param key (C++: const int &)
"""
return _ida_hexrays.user_labels_find(*args)
def user_labels_insert(*args):
"""
user_labels_insert(map, key, val) -> user_labels_iterator_t
Insert new (int, qstring) pair into user_labels_t.
@param map (C++: user_labels_t *)
@param key (C++: const int &)
@param val (C++: const qstring &)
"""
return _ida_hexrays.user_labels_insert(*args)
def user_labels_erase(*args):
"""
user_labels_erase(map, p)
Erase current element from user_labels_t.
@param map (C++: user_labels_t *)
@param p (C++: user_labels_iterator_t)
"""
return _ida_hexrays.user_labels_erase(*args)
def user_labels_clear(*args):
"""
user_labels_clear(map)
Clear user_labels_t.
@param map (C++: user_labels_t *)
"""
return _ida_hexrays.user_labels_clear(*args)
def user_labels_size(*args):
"""
user_labels_size(map) -> size_t
Get size of user_labels_t.
@param map (C++: user_labels_t *)
"""
return _ida_hexrays.user_labels_size(*args)
def user_labels_free(*args):
"""
user_labels_free(map)
Delete user_labels_t instance.
@param map (C++: user_labels_t *)
"""
return _ida_hexrays.user_labels_free(*args)
def user_labels_new(*args):
"""
user_labels_new() -> user_labels_t
Create a new user_labels_t instance.
"""
return _ida_hexrays.user_labels_new(*args)
class eamap_iterator_t(object):
"""
Proxy of C++ eamap_iterator_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
x = _swig_property(_ida_hexrays.eamap_iterator_t_x_get, _ida_hexrays.eamap_iterator_t_x_set)
def __eq__(self, *args):
"""
__eq__(self, p) -> bool
"""
return _ida_hexrays.eamap_iterator_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, p) -> bool
"""
return _ida_hexrays.eamap_iterator_t___ne__(self, *args)
def __init__(self, *args):
"""
__init__(self) -> eamap_iterator_t
"""
this = _ida_hexrays.new_eamap_iterator_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_eamap_iterator_t
__del__ = lambda self : None;
eamap_iterator_t_swigregister = _ida_hexrays.eamap_iterator_t_swigregister
eamap_iterator_t_swigregister(eamap_iterator_t)
def eamap_begin(*args):
"""
eamap_begin(map) -> eamap_iterator_t
Get iterator pointing to the beginning of eamap_t.
@param map (C++: const eamap_t *)
"""
return _ida_hexrays.eamap_begin(*args)
def eamap_end(*args):
"""
eamap_end(map) -> eamap_iterator_t
Get iterator pointing to the end of eamap_t.
@param map (C++: const eamap_t *)
"""
return _ida_hexrays.eamap_end(*args)
def eamap_next(*args):
"""
eamap_next(p) -> eamap_iterator_t
Move to the next element.
@param p (C++: eamap_iterator_t)
"""
return _ida_hexrays.eamap_next(*args)
def eamap_prev(*args):
"""
eamap_prev(p) -> eamap_iterator_t
Move to the previous element.
@param p (C++: eamap_iterator_t)
"""
return _ida_hexrays.eamap_prev(*args)
def eamap_first(*args):
"""
eamap_first(p) -> ea_t const &
Get reference to the current map key.
@param p (C++: eamap_iterator_t)
"""
return _ida_hexrays.eamap_first(*args)
def eamap_second(*args):
"""
eamap_second(p) -> cinsnptrvec_t
Get reference to the current map value.
@param p (C++: eamap_iterator_t)
"""
return _ida_hexrays.eamap_second(*args)
def eamap_find(*args):
"""
eamap_find(map, key) -> eamap_iterator_t
Find the specified key in eamap_t.
@param map (C++: const eamap_t *)
@param key (C++: const ea_t &)
"""
return _ida_hexrays.eamap_find(*args)
def eamap_insert(*args):
"""
eamap_insert(map, key, val) -> eamap_iterator_t
Insert new (ea_t, cinsnptrvec_t) pair into eamap_t.
@param map (C++: eamap_t *)
@param key (C++: const ea_t &)
@param val (C++: const cinsnptrvec_t &)
"""
return _ida_hexrays.eamap_insert(*args)
def eamap_erase(*args):
"""
eamap_erase(map, p)
Erase current element from eamap_t.
@param map (C++: eamap_t *)
@param p (C++: eamap_iterator_t)
"""
return _ida_hexrays.eamap_erase(*args)
def eamap_clear(*args):
"""
eamap_clear(map)
Clear eamap_t.
@param map (C++: eamap_t *)
"""
return _ida_hexrays.eamap_clear(*args)
def eamap_size(*args):
"""
eamap_size(map) -> size_t
Get size of eamap_t.
@param map (C++: eamap_t *)
"""
return _ida_hexrays.eamap_size(*args)
def eamap_free(*args):
"""
eamap_free(map)
Delete eamap_t instance.
@param map (C++: eamap_t *)
"""
return _ida_hexrays.eamap_free(*args)
def eamap_new(*args):
"""
eamap_new() -> eamap_t
Create a new eamap_t instance.
"""
return _ida_hexrays.eamap_new(*args)
class boundaries_iterator_t(object):
"""
Proxy of C++ boundaries_iterator_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
x = _swig_property(_ida_hexrays.boundaries_iterator_t_x_get, _ida_hexrays.boundaries_iterator_t_x_set)
def __eq__(self, *args):
"""
__eq__(self, p) -> bool
"""
return _ida_hexrays.boundaries_iterator_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, p) -> bool
"""
return _ida_hexrays.boundaries_iterator_t___ne__(self, *args)
def __init__(self, *args):
"""
__init__(self) -> boundaries_iterator_t
"""
this = _ida_hexrays.new_boundaries_iterator_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_boundaries_iterator_t
__del__ = lambda | |
<reponame>gitter-badger/BoAT-X-Framework<filename>tools/eth2c.py
#!/usr/bin/python
# Copyright (C) 2018-2021 aitos.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This python script generates Ethereum's C language interface function from contract ABI (solidity).
# Not all contract ABI can be converted to C interface because C is lack of object-oriented programming
# capability. If the tool fails to generate the interface, you may have to organize the contract call
# manually.
# The generated C API is named "<ContractName><ContractApiName>", with a transaction pointer argument
# followed by contract arguments.
#
# For state-less contract call, the generated C API retuns a HEX string representing what is received
# from the blockchain network. If the call is successful, the string is the return value of the contract
# function. The return value string has to be parsed manually as per the contract prototype. If the call
# fails, it returns NULL.
#
# For value transfer or state-ful contract call, i.e. a transaction, the generated C API returns a HEX
# string representing the transaction hash. If the transaction fails, it returns NULL.
import sys
import json
import os.path
import string
generated_declaration_block_str = '''/******************************************************************************
This file is generated from contract ABI. DO NOT modify it by hand.
******************************************************************************/
'''
generated_include_block_str = '''
// Generated C function interface from smart contract ABI
#include "boatiotsdk.h"
#include "sha3.h"
'''
# Types specially for Solidity
generated_types_for_solidity_str = '''
// Types specially defined for mapping from Solidity
typedef BUINT8 Bbytes1[1];
typedef BUINT8 Bbytes2[2];
typedef BUINT8 Bbytes3[3];
typedef BUINT8 Bbytes4[4];
typedef BUINT8 Bbytes5[5];
typedef BUINT8 Bbytes6[6];
typedef BUINT8 Bbytes7[7];
typedef BUINT8 Bbytes8[8];
typedef BUINT8 Bbytes9[9];
typedef BUINT8 Bbytes10[10];
typedef BUINT8 Bbytes11[11];
typedef BUINT8 Bbytes12[12];
typedef BUINT8 Bbytes13[13];
typedef BUINT8 Bbytes14[14];
typedef BUINT8 Bbytes15[15];
typedef BUINT8 Bbytes16[16];
typedef BUINT8 Bbytes17[17];
typedef BUINT8 Bbytes18[18];
typedef BUINT8 Bbytes19[19];
typedef BUINT8 Bbytes20[20];
typedef BUINT8 Bbytes21[21];
typedef BUINT8 Bbytes22[22];
typedef BUINT8 Bbytes23[23];
typedef BUINT8 Bbytes24[24];
typedef BUINT8 Bbytes25[25];
typedef BUINT8 Bbytes26[26];
typedef BUINT8 Bbytes27[27];
typedef BUINT8 Bbytes28[28];
typedef BUINT8 Bbytes29[29];
typedef BUINT8 Bbytes30[30];
typedef BUINT8 Bbytes31[31];
typedef BUINT8 Bbytes32[32];
typedef Bbytes16 BUINT128;
typedef Bbytes16 SUINT128;
typedef Bbytes32 BUINT256;
typedef Bbytes32 SUINT256;
'''
# Map from type used in smart contract to type in C
# Types not listed are not supported
type_mapping = {
# Variable-length types not supported yet
'string' :'BCHAR*',
'bytes' :'BUINT8*',
#Fixed-length types
'address' :'BoatAddress',
'bool' :'BUINT8',
'uint8' :'BUINT8',
'uint16' :'BUINT16',
'uint32' :'BUINT32',
'uint64' :'BUINT64',
'uint128' :'BUINT128',
'uint256' :'BUINT256',
'int8' :'BSINT8',
'int16' :'BSINT16',
'int32' :'BSINT32',
'int64' :'BSINT64',
'int128' :'BSINT128',
'int256' :'BSINT256',
'bytes1' :'Bbytes1',
'bytes2' :'Bbytes2',
'bytes3' :'Bbytes3',
'bytes4' :'Bbytes4',
'bytes5' :'Bbytes5',
'bytes6' :'Bbytes6',
'bytes7' :'Bbytes7',
'bytes8' :'Bbytes8',
'bytes9' :'Bbytes9',
'bytes10' :'Bbytes10',
'bytes11' :'Bbytes11',
'bytes12' :'Bbytes12',
'bytes13' :'Bbytes13',
'bytes14' :'Bbytes14',
'bytes15' :'Bbytes15',
'bytes16' :'Bbytes16',
'bytes17' :'Bbytes17',
'bytes18' :'Bbytes18',
'bytes19' :'Bbytes19',
'bytes20' :'Bbytes20',
'bytes21' :'Bbytes21',
'bytes22' :'Bbytes22',
'bytes23' :'Bbytes23',
'bytes24' :'Bbytes24',
'bytes25' :'Bbytes25',
'bytes26' :'Bbytes26',
'bytes27' :'Bbytes27',
'bytes28' :'Bbytes28',
'bytes29' :'Bbytes29',
'bytes30' :'Bbytes30',
'bytes31' :'Bbytes31',
'bytes32' :'Bbytes32'
}
class CFunctionGen():
def __init__(self, abi_file_name, output_path):
self.abi_object = None
self.c_file_content = ''
self.h_file_content = ''
with open(abi_file_name) as file_handle:
self.abi_object = json.load(file_handle)
self.abi_file_name = os.path.basename(abi_file_name)
#print(self.abi_object);
self.output_path = output_path
def require_endian_change(self, abitype):
types_to_change_endian = {
'bool' :'BUINT8',
'uint8' :'BUINT8',
'uint16' :'BUINT16',
'uint32' :'BUINT32',
'uint64' :'BUINT64',
'uint128' :'BUINT128',
'uint256' :'BUINT256',
'int8' :'BSINT8',
'int16' :'BSINT16',
'int32' :'BSINT32',
'int64' :'BSINT64',
'int128' :'BSINT128',
'int256' :'BSINT256'
}
if abitype in types_to_change_endian.keys():
return True
else:
return False
def is_array_type(self, abitype):
types_of_array = {
'address' :'BoatAddress',
'uint128' :'BUINT128',
'int128' :'BSINT128',
'uint256' :'BUINT256',
'int256' :'BSINT256',
'bytes1' :'Bbytes1',
'bytes2' :'Bbytes2',
'bytes3' :'Bbytes3',
'bytes4' :'Bbytes4',
'bytes5' :'Bbytes5',
'bytes6' :'Bbytes6',
'bytes7' :'Bbytes7',
'bytes8' :'Bbytes8',
'bytes9' :'Bbytes9',
'bytes10' :'Bbytes10',
'bytes11' :'Bbytes11',
'bytes12' :'Bbytes12',
'bytes13' :'Bbytes13',
'bytes14' :'Bbytes14',
'bytes15' :'Bbytes15',
'bytes16' :'Bbytes16',
'bytes17' :'Bbytes17',
'bytes18' :'Bbytes18',
'bytes19' :'Bbytes19',
'bytes20' :'Bbytes20',
'bytes21' :'Bbytes21',
'bytes22' :'Bbytes22',
'bytes23' :'Bbytes23',
'bytes24' :'Bbytes24',
'bytes25' :'Bbytes25',
'bytes26' :'Bbytes26',
'bytes27' :'Bbytes27',
'bytes28' :'Bbytes28',
'bytes29' :'Bbytes29',
'bytes30' :'Bbytes30',
'bytes31' :'Bbytes31',
'bytes32' :'Bbytes32'
}
if abitype in types_of_array.keys():
return True
else:
return False
def is_nonFixedSize_type(self, abitype):
if abitype == 'string':
return True
elif abitype == 'bytes':
return True
else:
return False
def is_needFlagInputLen_type(self, abitype):
if abitype == 'bytes':
return True
elif abitype.find('[]') != -1:
return True
else:
return False
#check function has non-fixed input type or not
def is_has_nonFixed_type(self, abi_item):
inputs = abi_item['inputs']
inputs_len = len(inputs)
i = 0
while i < inputs_len:
inputType = inputs[i]['type']
if self.is_nonFixedSize_type(inputType) == True:
return True
i += 1
return False
def gen_input_name(self, abi_item):
if len(abi_item['name']) == 0:
#print 'input name is null'
return type_mapping[abi_item['type']] + 'Value'
else:
return abi_item['name']
def gen_nonFixed_mallocSize_exp(self, abi_item, spaceNum = 38):
inputs = abi_item['inputs']
inputs_len = len(inputs)
nonFixedSize_type_malloc = ''
i = 0
while i < inputs_len:
inputType = inputs[i]['type']
inputName = self.gen_input_name(inputs[i])
if self.is_nonFixedSize_type(inputType) == True:
if len(nonFixedSize_type_malloc) != 0:
nonFixedSize_type_malloc += ' ' * spaceNum
if self.is_needFlagInputLen_type(inputType) == True:
nonFixedSize_type_malloc += 'BOAT_ROUNDUP(' + inputName + 'Len , 32) + 32' + ' + \\\n'
else:
nonFixedSize_type_malloc += 'BOAT_ROUNDUP(' + 'strlen(' + inputName + '), 32) + 32' + ' + \\\n'
else:
pass
i += 1
if len(nonFixedSize_type_malloc) != 0:
nonFixedSize_type_malloc += ' ' * spaceNum
return nonFixedSize_type_malloc
#gen non-fixed type offset location
#string rtn
def get_nonFixed_offset(self, abi_item, index):
inputs = abi_item['inputs']
inputs_len = len(inputs)
offset_int = inputs_len * 32
offset_str = ''
i = 0
while i < index:
inputType = inputs[i]['type']
inputName = self.gen_input_name(inputs[i])
if self.is_nonFixedSize_type(inputType) == True:
if len(offset_str) != 0:
offset_str += ' ' * 27
if self.is_needFlagInputLen_type(inputType) == True:
offset_str += 'BOAT_ROUNDUP(' + inputName + 'Len , 32) + 32 ' + '+ \\\n'
else:
offset_str += 'BOAT_ROUNDUP(' + 'strlen(' + inputName + '), 32) + 32 ' + '+ \\\n'
else:
pass
i += 1
return offset_str[0:len(offset_str) - 2] + str(offset_int)
#genetate non-fixed data length
def get_nonFixed_length(self, abi_item,input_nonFixed_type):
inputs = abi_item['inputs']
inputs_len = len(inputs)
length_str = ''
i = 0
while i < inputs_len:
inputType = inputs[i]['type']
inputName = self.gen_input_name(inputs[i])
if self.is_nonFixedSize_type(inputType) == True:
if input_nonFixed_type == inputType:
if self.is_needFlagInputLen_type(inputType) == True:
length_str += 'BOAT_ROUNDUP(' + inputName + 'Len , 32)'
else:
length_str += 'BOAT_ROUNDUP(' + 'strlen(' + inputName + '), 32)'
break
else:
pass
i += 1
return length_str
def generate_c_funcs(self):
if self.abi_object != None:
self.c_file_content += generated_declaration_block_str
self.c_file_content += generated_include_block_str
self.c_file_content += generated_types_for_solidity_str
self.h_file_content += generated_declaration_block_str
self.h_file_content += generated_include_block_str
self.h_file_content += generated_types_for_solidity_str
for abi_item in self.abi_object['abi']:
if abi_item['type'] == 'function':
self.generate_func_prototype(abi_item)
self.generate_func_body(abi_item)
def generate_func_prototype(self, abi_item):
inputName_str = ''
# Extract type of return value
if len(abi_item['outputs']) == 0:
retval_str = 'BCHAR *' #'void'
else:
#retval_str = type_mapping[abi_item['outputs'][0]['type']]
# For stateful transaction, returns Tx Hash;
# For state-less function call, returns a string representing the return value
retval_str = 'BCHAR *'
# Extract function name (Prefix with ABI file name because multiple contracts may have same function names)
func_name_str = self.abi_file_name.replace('.json','')
func_name_str = func_name_str.replace('.','_') + '_' + abi_item['name']
# Extract function arguments
inputs = abi_item['inputs']
inputs_len = len(inputs)
input_str = '('
input_str += 'BoatEthTx *tx_ptr'
if inputs_len != 0:
input_str += ', '
i = 0
while i < inputs_len:
input = inputs[i]
inputName_str = self.gen_input_name(input)
try:
input_str += type_mapping[input['type']] + ' ' + inputName_str
#for type 'bytes', <type>[], add a input param to indicate the input lengths
if self.is_needFlagInputLen_type(input['type']) == True:
input_str += ', BUINT32 ' + inputName_str + 'Len'
except:
print(abi_item['name'] + '(): Solidity type (' + input['type'] + ') is incompatible with C interface auto generator.')
print('You may have to manually construct the transaction.')
quit(-1)
if i != inputs_len -1:
input_str += ', '
i = i+1
input_str += ')'
# Generate function prototype
self.c_file_content | |
import glob, os, shutil, sys, json
from pathlib import Path
import pylab as plt
import trimesh
import open3d
from easydict import EasyDict
import numpy as np
from tqdm import tqdm
import utils
from features import MeshFPFH
FIX_BAD_ANNOTATION_HUMAN_15 = 0
# Labels for all datasets
# -----------------------
sigg17_part_labels = ['---', 'head', 'hand', 'lower-arm', 'upper-arm', 'body', 'upper-lag', 'lower-leg', 'foot']
sigg17_shape2label = {v: k for k, v in enumerate(sigg17_part_labels)}
model_net_labels = [
'bathtub', 'bed', 'chair', 'desk', 'dresser', 'monitor', 'night_stand', 'sofa', 'table', 'toilet',
'wardrobe', 'bookshelf', 'laptop', 'door', 'lamp', 'person', 'curtain', 'piano', 'airplane', 'cup',
'cone', 'tent', 'radio', 'stool', 'range_hood', 'car', 'sink', 'guitar', 'tv_stand', 'stairs',
'mantel', 'bench', 'plant', 'bottle', 'bowl', 'flower_pot', 'keyboard', 'vase', 'xbox', 'glass_box'
]
model_net_shape2label = {v: k for k, v in enumerate(model_net_labels)}
model_net_weights = [265, 1186, 2300, 407, 404, 956, 381, 1645, 755, 919, 145, 1002, 260, 204, 303, 248, 330, 617, 1874,
159, 213, 295, 267, 189, 303, 587, 332, 447, 483, 275, 817, 354, 623, 868, 119, 385, 412, 1216,
278, 183]
future3d_labels = ['Children Cabinet', 'Nightstand', 'Bookcase / jewelry Armoire','Wardrobe', 'Coffee Table', 'Corner/Side Table',
'Sideboard / Side Cabinet / Console Table','Wine Cabinet', 'TV Stand', 'Drawer Chest / Corner cabinet',
'Shelf', 'Round End Table', 'King-size Bed', 'Bunk Bed', 'Bed Frame', 'Single bed', 'Kids Bed', 'Dining Chair',
'Lounge Chair / Cafe Chair / Office Chair', 'Dressing Chair', 'Classic Chinese Chair','Barstool',
'Dressing Table', 'Dining Table', 'Desk', 'Three-Seat / Multi-seat Sofa', 'armchair', 'Loveseat Sofa',
'L-shaped Sofa', 'Lazy Sofa', 'Chaise Longue Sofa', 'Footstool / Sofastool / Bed End Stool / Stool',
'Pendant Lamp', 'Ceiling Lamp']
future3d_excluded_labels = ['Dressing Chair', 'Chaise Longue Sofa']
future3d_labels = [x.lower() for x in future3d_labels]
future3d_super_labels = [x.lower() for x in ['Cabinet/Shelf/Desk', 'Bed', 'Chair', 'Table', 'Sofa', 'Pier/Stool', 'Lighting']]
future_3d_labels_to_super = [12, 5, 5, 3, 6, 1, 2]
future3d_shape2label = {v: k for k, v in enumerate(future3d_labels)}
future3d_weights = [259, 1045, 262, 724, 1644, 1171, 1046, 169, 581, 643, 187, 168, 1260, 140, 395, 482, 139, 1139,
1411, 24, 32, 365, 291, 736, 198, 2479, 1741, 1169, 385, 204, 4, 885, 1915, 611]
cubes_labels = [
'apple', 'bat', 'bell', 'brick', 'camel',
'car', 'carriage', 'chopper', 'elephant', 'fork',
'guitar', 'hammer', 'heart', 'horseshoe', 'key',
'lmfish', 'octopus', 'shoe', 'spoon', 'tree',
'turtle', 'watch'
]
cubes_shape2label = {v: k for k, v in enumerate(cubes_labels)}
shrec11_labels = [
'armadillo', 'man', 'centaur', 'dinosaur', 'dog2',
'ants', 'rabbit', 'dog1', 'snake', 'bird2',
'shark', 'dino_ske', 'laptop', 'santa', 'flamingo',
'horse', 'hand', 'lamp', 'two_balls', 'gorilla',
'alien', 'octopus', 'cat', 'woman', 'spiders',
'camel', 'pliers', 'myScissor', 'glasses', 'bird1'
]
shrec11_shape2label = {v: k for k, v in enumerate(shrec11_labels)}
coseg_labels = [
'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c',
]
coseg_shape2label = {v: k for k, v in enumerate(coseg_labels)}
# ShapenetCore-55
shapenet_synsetIds = ['02691156', '02747177', '02773838', '02801938', '02808440', '02818832', '02828884', '02843684',
'02871439', '02876657', '02880940', '02924116', '02933112', '02942699', '02946921', '02954340',
'02958343', '02992529', '03001627', '03046257', '03085013', '03207941', '03211117', '03261776',
'03325088', '03337140', '03467517', '03513137', '03593526', '03624134', '03636649', '03642806',
'03691459', '03710193', '03759954', '03761084', '03790512', '03797390', '03928116', '03938244',
'03948459', '03991062', '04004475', '04074963', '04090263', '04099429', '04225987', '04256520',
'04330267', '04379243', '04401088', '04460130', '04468005', '04530566', '04554684']
shapenet_synset_to_label = {'02691156': 'airplane,aeroplane,plane',
'02747177': 'ashcan,trash can,garbage can,wastebin,ash bin,ash-bin,ashbin,dustbin,trash barrel,trash bin',
'02773838': 'bag,traveling bag,travelling bag,grip,suitcase',
'02801938': 'basket,handbasket',
'02808440': 'bathtub,bathing tub,bath,tub',
'02818832': 'bed',
'02828884': 'bench',
'02843684': 'birdhouse',
'02871439': 'bookshelf',
'02876657': 'bottle',
'02880940': 'bowl',
'02924116': 'bus,autobus,coach,charabanc,double-decker,jitney,motorbus,motorcoach,omnibus,passenger vehi',
'02933112': 'cabinet',
'02942699': 'camera,photographic camera',
'02946921': 'can,tin,tin can',
'02954340': 'cap',
'02958343': 'car,auto,automobile,machine,motorcar',
'03001627': 'chair',
'03046257': 'clock',
'03085013': 'computer keyboard,keypad',
'03207941': 'dishwasher,dish washer,dishwashing machine',
'03211117': 'display,video display',
'03261776': 'earphone,earpiece,headphone,phone',
'03325088': 'faucet,spigot',
'03337140': 'file,file cabinet,filing cabinet',
'03467517': 'guitar',
'03513137': 'helmet',
'03593526': 'jar',
'03624134': 'knife',
'03636649': 'lamp',
'03642806': 'laptop,laptop computer',
'03691459': 'loudspeaker,speaker,speaker unit,loudspeaker system,speaker system',
'03710193': 'mailbox,letter box',
'03759954': 'microphone,mike',
'03761084': 'microwave,microwave oven',
'03790512': 'motorcycle,bike',
'03797390': 'mug',
'03928116': 'piano,pianoforte,forte-piano',
'03938244': 'pillow',
'03948459': 'pistol,handgun,side arm,shooting iron',
'03991062': 'pot,flowerpot',
'04004475': 'printer,printing machine',
'04074963': 'remote control,remote',
'04090263': 'rifle',
'04099429': 'rocket,projectile',
'04225987': 'skateboard',
'04256520': 'sofa,couch,lounge',
'04330267': 'stove',
'04379243': 'table',
'04401088': 'telephone,phone,telephone set',
'02992529': 'cellular telephone,cellular phone,cellphone,cell,mobile phone',
'04460130': 'tower',
'04468005': 'train,railroad train',
'04530566': 'vessel,watercraft',
'04554684': 'washer,automatic washer,washing machine'}
shapenet_labels = [shapenet_synset_to_label[x] for x in shapenet_synsetIds]
shapenet_shapeid2label = {v: k for k, v in enumerate(shapenet_synsetIds)}
def rotate_vertices(vertices, angle):
y = angle * np.pi / 180
R = np.array(((np.cos(y),-np.sin(y), 0),
(np.sin(y), np.cos(y), 0),
(0 , 0, 1)),
dtype=vertices.dtype)
np.dot(vertices, R, out=vertices)
def calc_mesh_area(mesh):
t_mesh = trimesh.Trimesh(vertices=mesh['vertices'], faces=mesh['faces'], process=False)
mesh['area_faces'] = t_mesh.area_faces
mesh['area_vertices'] = np.zeros((mesh['vertices'].shape[0]))
for f_index, f in enumerate(mesh['faces']):
for v in f:
mesh['area_vertices'][v] += mesh['area_faces'][f_index] / f.size
def calc_vertex_labels_from_face_labels(mesh, face_labels):
vertices = mesh['vertices']
faces = mesh['faces']
all_vetrex_labels = [[] for _ in range(vertices.shape[0])]
vertex_labels = -np.ones((vertices.shape[0],), dtype=np.int)
n_classes = int(np.max(face_labels))
assert np.min(face_labels) == 1 # min label is 1, for compatibility to human_seg labels representation
v_labels_fuzzy = -np.ones((vertices.shape[0], n_classes))
for i in range(faces.shape[0]):
label = face_labels[i]
for f in faces[i]:
all_vetrex_labels[f].append(label)
for i in range(vertices.shape[0]):
counts = np.bincount(all_vetrex_labels[i])
vertex_labels[i] = np.argmax(counts)
v_labels_fuzzy[i] = np.zeros((1, n_classes))
for j in all_vetrex_labels[i]:
v_labels_fuzzy[i, int(j) - 1] += 1 / len(all_vetrex_labels[i])
return vertex_labels, v_labels_fuzzy
def prepare_edges_and_kdtree(mesh):
vertices = mesh['vertices']
faces = mesh['faces']
mesh['edges'] = [set() for _ in range(vertices.shape[0])]
for i in range(faces.shape[0]):
for v in faces[i]:
mesh['edges'][v] |= set(faces[i])
for i in range(vertices.shape[0]):
if i in mesh['edges'][i]:
mesh['edges'][i].remove(i)
mesh['edges'][i] = list(mesh['edges'][i])
max_vertex_degree = np.max([len(e) for e in mesh['edges']])
for i in range(vertices.shape[0]):
if len(mesh['edges'][i]) < max_vertex_degree:
mesh['edges'][i] += [-1] * (max_vertex_degree - len(mesh['edges'][i]))
mesh['edges'] = np.array(mesh['edges'], dtype=np.int32)
mesh['kdtree_query'] = []
t_mesh = trimesh.Trimesh(vertices=vertices, faces=faces, process=False)
n_nbrs = min(10, vertices.shape[0] - 2)
for n in range(vertices.shape[0]):
d, i_nbrs = t_mesh.kdtree.query(vertices[n], n_nbrs)
i_nbrs_cleared = [inbr for inbr in i_nbrs if inbr != n and inbr < vertices.shape[0]]
if len(i_nbrs_cleared) > n_nbrs - 1:
i_nbrs_cleared = i_nbrs_cleared[:n_nbrs - 1]
mesh['kdtree_query'].append(np.array(i_nbrs_cleared, dtype=np.int32))
mesh['kdtree_query'] = np.array(mesh['kdtree_query'])
assert mesh['kdtree_query'].shape[1] == (n_nbrs - 1), 'Number of kdtree_query is wrong: ' + str(mesh['kdtree_query'].shape[1])
def prepare_face_edges(mesh):
tmesh = trimesh.Trimesh(mesh['vertices'], mesh['faces'])
mesh['faces_edges'] = tmesh.face_adjacency
mesh['faces_edges_angles'] = tmesh.face_adjacency_angles
def add_fields_and_dump_model(mesh_data, fileds_needed, out_fn, dataset_name, dump_model=True):
m = {}
for k, v in mesh_data.items():
if k in fileds_needed:
m[k] = v
for field in fileds_needed:
if field not in m.keys():
if field == 'labels':
m[field] = np.zeros((0,))
if field == 'dataset_name':
m[field] = dataset_name
if field == 'walk_cache':
m[field] = np.zeros((0,))
if field == 'kdtree_query' or field == 'edges':
prepare_edges_and_kdtree(m)
if field == 'tri_centers':
t_mesh = trimesh.Trimesh(vertices=mesh_data.vertices, faces=mesh_data.faces, process=False)
m[field] = t_mesh.triangles_center
if field == 'tri_edges':
prepare_face_edges(m)
if field == 'vertex_normals':
t_mesh = trimesh.Trimesh(vertices=mesh_data.vertices, faces=mesh_data.faces, process=False)
m[field] = t_mesh.vertex_normals
if field == 'mfpfh':
fph = MeshFPFH(EasyDict(m), 2)
m[field] = fph.calc_fpfh()
if dump_model:
np.savez(out_fn, **m)
return m
def get_sig17_seg_bm_labels(mesh, file, seg_path):
# Finding the best match file name .. :
in_to_check = file.replace('obj', 'txt')
in_to_check = in_to_check.replace('off', 'txt')
in_to_check = in_to_check.replace('_fix_orientation', '')
if in_to_check.find('MIT_animation') != -1 and in_to_check.split('/')[-1].startswith('mesh_'):
in_to_check = '/'.join(in_to_check.split('/')[:-2])
in_to_check = in_to_check.replace('MIT_animation/meshes_', 'mit/mit_')
in_to_check += '.txt'
elif in_to_check.find('/scape/') != -1:
in_to_check = '/'.join(in_to_check.split('/')[:-1])
in_to_check += '/scape.txt'
elif in_to_check.find('/faust/') != -1:
in_to_check = '/'.join(in_to_check.split('/')[:-1])
in_to_check += '/faust.txt'
seg_full_fn = []
for fn in Path(seg_path).rglob('*.txt'):
tmp = str(fn)
tmp = tmp.replace('/segs/', '/meshes/')
tmp = tmp.replace('_full', '')
tmp = tmp.replace('shrec_', '')
tmp = tmp.replace('_corrected', '')
if tmp == in_to_check:
seg_full_fn.append(str(fn))
if len(seg_full_fn) == 1:
seg_full_fn = seg_full_fn[0]
else:
print('\nin_to_check', in_to_check)
print('tmp', tmp)
raise Exception('!!')
face_labels = np.loadtxt(seg_full_fn)
if FIX_BAD_ANNOTATION_HUMAN_15 and file.endswith('test/shrec/15.off'):
face_center = []
for f in mesh.faces:
face_center.append(np.mean(mesh.vertices[f, :], axis=0))
face_center = np.array(face_center)
idxs = (face_labels == 6) * (face_center[:, 0] < 0) * (face_center[:, 1] < -0.4)
face_labels[idxs] = 7
np.savetxt(seg_full_fn + '.fixed.txt', face_labels.astype(np.int))
return face_labels
def get_labels(dataset_name, mesh, file, fn2labels_map=None):
v_labels_fuzzy = np.zeros((0,))
if dataset_name == 'faust':
face_labels = np.load('faust_labels/faust_part_segmentation.npy').astype(np.int)
vertex_labels, v_labels_fuzzy = calc_vertex_labels_from_face_labels(mesh, face_labels)
model_label = np.zeros((0,))
return model_label, vertex_labels, v_labels_fuzzy
elif dataset_name.startswith('coseg') or dataset_name == 'human_seg_from_meshcnn':
labels_fn = '/'.join(file.split('/')[:-2]) + '/seg/' + file.split('/')[-1].split('.')[-2] + '.eseg'
e_labels = np.loadtxt(labels_fn)
v_labels = [[] for _ in range(mesh['vertices'].shape[0])]
faces = mesh['faces']
fuzzy_labels_fn = '/'.join(file.split('/')[:-2]) + '/sseg/' + file.split('/')[-1].split('.')[-2] + '.seseg'
seseg_labels = np.loadtxt(fuzzy_labels_fn)
v_labels_fuzzy = np.zeros((mesh['vertices'].shape[0], seseg_labels.shape[1]))
edge2key = dict()
edges = []
edges_count = 0
for face_id, face in enumerate(faces):
faces_edges = []
for i in range(3):
cur_edge = (face[i], face[(i + 1) % 3])
faces_edges.append(cur_edge)
for idx, edge in enumerate(faces_edges):
edge = tuple(sorted(list(edge)))
faces_edges[idx] = edge
if edge not in edge2key:
v_labels_fuzzy[edge[0]] += seseg_labels[edges_count]
v_labels_fuzzy[edge[1]] += seseg_labels[edges_count]
edge2key[edge] = edges_count
edges.append(list(edge))
v_labels[edge[0]].append(e_labels[edges_count])
v_labels[edge[1]].append(e_labels[edges_count])
edges_count += 1
assert np.max(np.sum(v_labels_fuzzy != 0, axis=1)) | |
# OpenPharmacophore
from openpharmacophore._private_tools.exceptions import InvalidFeatureError, InvalidFileFormat
from openpharmacophore.io import (from_pharmer, from_moe, from_ligandscout,
to_ligandscout, to_moe, to_pharmagist, to_pharmer)
from openpharmacophore import PharmacophoricPoint
from openpharmacophore.algorithms.discretize import discretize
from openpharmacophore.pharmacophore.pharmacophoric_point import distance_bewteen_pharmacophoric_points
from openpharmacophore.pharmacophore.color_palettes import get_color_from_palette_for_feature
# Third party
import networkx as nx
import nglview as nv
import numpy as np
import pyunitwizard as puw
from rdkit import Geometry, RDLogger
from rdkit.Chem import ChemicalFeatures
from rdkit.Chem.Pharm3D import Pharmacophore as rdkitPharmacophore
RDLogger.DisableLog('rdApp.*') # Disable rdkit warnings
# Standard library
import copy
import itertools
class Pharmacophore():
""" Native object for pharmacophores.
Openpharmacophore native class to store pharmacophoric models. A pharmacophore can be constructed
from a list of elements or from a file.
Parameters
----------
elements : list openpharmacophore.PharmacophoricPoint
List of pharmacophoric elements
Attributes
----------
elements : list openpharmacophore.PharmacophoricPoint
List of pharmacophoric elements
n_elements : int
Number of pharmacophoric elements
"""
def __init__(self, elements=[]):
self.elements = elements
self.n_elements = len(elements)
@classmethod
def from_file(cls, file_name, **kwargs):
"""
Class method to load a pharmacophore from a file.
Parameters
---------
file_name : str
Name of the file containing the pharmacophore
"""
fextension = file_name.split(".")[-1]
if fextension == "json":
points, _ , _ = from_pharmer(file_name, False)
elif fextension == "ph4":
points = from_moe(file_name)
elif fextension == "pml":
points = from_ligandscout(file_name)
else:
raise InvalidFileFormat(f"Invalid file format, \"{file_name}\" is not a supported file format")
return cls(elements=points)
def add_to_NGLView(self, view, palette='openpharmacophore'):
"""Add the pharmacophore representation to a view (NGLWidget) from NGLView.
Each pharmacophoric element is added to the NGLWidget as a new component.
Parameters
----------
view : nglview.NGLWidget
View as NGLView widget where the representation of the pharmacophore is going to be
added.
palette : str or dict
Color palette name or dictionary. (Default: 'openpharmacophore')
Note
----
Nothing is returned. The `view` object is modified in place.
"""
first_element_index = len(view._ngl_component_ids)
for ii, element in enumerate(self.elements):
# Add Spheres
center = puw.get_value(element.center, to_unit="angstroms").tolist()
radius = puw.get_value(element.radius, to_unit="angstroms")
feature_color = get_color_from_palette_for_feature(element.feature_name, color_palette=palette)
label = f"{element.feature_name}_{ii}"
view.shape.add_sphere(center, feature_color, radius, label)
# Add vectors
if element.has_direction:
label = f"{element.feature_name}_vector"
if element.feature_name == "hb acceptor":
end_arrow = puw.get_value(element.center - 2 * radius * puw.quantity(element.direction, "angstroms"), to_unit='angstroms').tolist()
view.shape.add_arrow(end_arrow, center, feature_color, 0.2, label)
else:
end_arrow = puw.get_value(element.center + 2 * radius * puw.quantity(element.direction, "angstroms"), to_unit='angstroms').tolist()
view.shape.add_arrow(center, end_arrow, feature_color, 0.2, label)
# Add opacity to spheres
last_element_index = len(view._ngl_component_ids)
for jj in range(first_element_index, last_element_index):
view.update_representation(component=jj, opacity=0.8)
def show(self, palette='openpharmacophore'):
""" Show the pharmacophore model.
Parameters
----------
palette : str or dict.
Color palette name or dictionary. (Default: 'openpharmacophore')
Returns
-------
nglview.NGLWidget
An nglview.NGLWidget is returned with the 'view' of the pharmacophoric model and the
molecular system used to elucidate it.
"""
view = nv.NGLWidget()
self.add_to_NGLView(view, palette=palette)
return view
def add_element(self, pharmacophoric_element):
"""Add a new element to the pharmacophore.
Parameters
----------
pharmacophoric_element : openpharmacophore.PharmacophricPoint
The pharmacophoric point that will be added.
Note
------
The pharmacophoric element given as input argument is added to the pharmacophore
as a new entry of the list `elements`.
"""
self.elements.append(pharmacophoric_element)
self.n_elements +=1
def remove_elements(self, element_indices):
""" Remove elements from the pharmacophore.
Parameters
----------
element_indices : int or list of int
Indices of the elements to be removed. Can be a list of integers if multiple elements will be
removed or a single integer to remove one element.
Note
-----
The pharmacophoric element given as input argument is removed from the pharmacophore.
"""
if isinstance(element_indices, int):
self.elements.pop(element_indices)
self.n_elements -=1
elif isinstance(element_indices, list):
new_elements = [element for i, element in enumerate(self.elements) if i not in element_indices]
self.elements = new_elements
self.n_elements = len(self.elements)
def remove_feature(self, feat_type):
""" Remove an especific feature type from the pharmacophore elements list
Parameters
----------
feat_type : str
Name or type of the feature to be removed.
Note
-----
The pharmacophoric elements of the feature type given as input argument
are removed from the pharmacophore.
"""
feats = PharmacophoricPoint.get_valid_features()
if feat_type not in feats:
raise InvalidFeatureError(f"Cannot remove feature. \"{feat_type}\" is not a valid feature type")
temp_elements = [element for element in self.elements if element.feature_name != feat_type]
if len(temp_elements) == self.n_elements: # No element was removed
raise InvalidFeatureError(f"Cannot remove feature. The pharmacophore does not contain any {feat_type}")
self.elements = temp_elements
self.n_elements = len(self.elements)
def _reset(self):
"""Private method to reset all attributes to default values.
Note
----
Nothing is returned. All attributes are set to default values.
"""
self.elements.clear()
self.n_elements = 0
self.extractor = None
self.molecular_system = None
def to_ligandscout(self, file_name):
"""Method to export the pharmacophore to the ligandscout compatible format.
Parameters
----------
file_name : str
Name of file to be written with the ligandscout format of the pharmacophore.
Note
----
Nothing is returned. A new file is written.
"""
return to_ligandscout(self, file_name=file_name)
def to_pharmer(self, file_name):
"""Method to export the pharmacophore to the pharmer compatible format.
Parameters
----------
file_name : str
Name of file to be written with the pharmer format of the pharmacophore.
Note
----
Nothing is returned. A new file is written.
"""
return to_pharmer(self, file_name=file_name)
def to_pharmagist(self, file_name):
"""Method to export the pharmacophore to the pharmagist compatible format.
Parameters
----------
file_name : str
Name of file to be written with the pharmagist format of the pharmacophore.
Note
----
Nothing is returned. A new file is written.
"""
return to_pharmagist(self, file_name=file_name)
def to_moe(self, file_name):
"""Method to export the pharmacophore to the MOE compatible format.
Parameters
----------
file_name: str
Name of file to be written with the MOE format of the pharmacophore.
Note
----
Nothing is returned. A new file is written.
"""
return to_moe(self, file_name=file_name)
def to_rdkit(self):
""" Returns an rdkit pharmacophore with the elements from the original pharmacophore.
rdkit pharmacophores do not store the elements radii, so they are returned as well.
Returns
-------
rdkit_pharmacophore : rdkit.Chem.Pharm3D.Pharmacophore
The rdkit pharmacophore.
radii : list of float
List with the radius in angstroms of each pharmacophoric point.
"""
rdkit_element_name = { # dictionary to map openpharmacophore feature names to rdkit feature names
"aromatic ring": "Aromatic",
"hydrophobicity": "Hydrophobe",
"hb acceptor": "Acceptor",
"hb donor": "Donor",
"positive charge": "PosIonizable",
"negative charge": "NegIonizable",
}
points = []
radii = []
for element in self.elements:
feat_name = rdkit_element_name[element.feature_name]
center = puw.get_value(element.center, to_unit="angstroms")
center = Geometry.Point3D(center[0], center[1], center[2])
points.append(ChemicalFeatures.FreeChemicalFeature(
feat_name,
center
))
radius = puw.get_value(element.radius, to_unit="angstroms")
radii.append(radius)
rdkit_pharmacophore = rdkitPharmacophore.Pharmacophore(points)
return rdkit_pharmacophore, radii
def to_nx_graph(self, dmin=2.0, dmax=13.0, bin_size=1.0):
""" Obtain a networkx graph representation of the pharmacophore.
The pharmacophore graph is a graph whose nodes are pharmacophoric features and
its edges are the euclidean distance between those features. The distance is
discretized into bins so more molecules can match the pharmacophore.
Parameters
----------
dmin : float
The minimun distance in angstroms from which two pharmacophoric points are considered different.
dmax : flaot
The maximum distance in angstroms between pharmacohoric points.
bin_size : float
The size of the bins that will be used to bin the distances.
Returns
-------
pharmacophore_graph : networkx.Graph
The pharmacophore graph
"""
pharmacophore_graph = nx.Graph()
bins = np.arange(dmin, dmax, bin_size)
# We keep track of feature counts to avoid repeated nodes
feat_count = {
"A" : 0,
"D" : 0,
"R" : 0,
"H" : 0,
"P" : 0,
"N" : 0,
"E" : 0,
"I" : 0,
}
# First update the names with the count of each feature
elements = copy.deepcopy(self.elements)
for element in elements:
feat_count[element.short_name] += 1
element.short_name += str(feat_count[element.short_name])
# Now we can add edges without repeating the nodes
for points in itertools.combinations(elements, 2):
distance = distance_bewteen_pharmacophoric_points(points[0], points[1])
binned_distance = discretize(distance, bins)
pharmacophore_graph.add_edge(points[0].short_name,
points[1].short_name,
dis=binned_distance)
return pharmacophore_graph
def distance_matrix(self):
""" Compute the distance matrix of the pharmacophore.
Returns
-------
dis_matrix : np.ndarray of shape(n_elements, n_elements)
The distance matrix.
"""
n_elements = self.n_elements
dis_matrix = np.zeros((n_elements, n_elements))
for ii in range(n_elements):
for jj in range(ii, n_elements):
if ii == jj:
dis_matrix[ii, jj] = 0
else:
distance = distance_bewteen_pharmacophoric_points(
self.elements[ii],
self.elements[jj])
dis_matrix[ii, jj] = distance
dis_matrix[jj, ii] = distance
return dis_matrix
def feature_count(self):
""" Count the number | |
<reponame>jq-shell/python-jqsh
import decimal
import enum
import jqsh.context
import jqsh.filter
import jqsh.values
import string
import unicodedata
class Incomplete(Exception):
pass
TokenType = enum.Enum('TokenType', [
'assign',
'close_array',
'close_object',
'close_paren',
'colon',
'comma',
'command',
'comment',
'dot',
'format_string',
'global_variable',
'illegal',
'minus',
'modulo',
'multiply',
'name',
'number',
'open_array',
'open_object',
'open_paren',
'pipe',
'plus',
'semicolon',
'string',
'string_end',
'string_end_incomplete',
'string_incomplete',
'string_middle',
'string_start',
'trailing_whitespace'
], module=__name__)
class Token:
def __eq__(self, other):
return self.type is other.type and self.text == other.text
def __init__(self, token_type, token_string=None, text=None, line=None, column=None):
self.type = token_type
self.string = token_string # ''.join(token.string for token in tokenize(jqsh_string)) == jqsh_string
self.text = text # metadata like the name of a name token or the digits of a number literal. None for simple tokens
self.line = line
self.column = column
def __repr__(self):
return 'jqsh.parser.Token(' + repr(self.type) + ('' if self.string is None else ', token_string=' + repr(self.string)) + ('' if self.text is None else ', text=' + repr(self.text)) + ')'
def __str__(self):
if self.string is None:
return "'" + repr(self) + "'"
else:
return self.string
atomic_tokens = {
TokenType.name: jqsh.filter.Name,
TokenType.number: jqsh.filter.NumberLiteral,
TokenType.string: jqsh.filter.StringLiteral
}
escapes = { # string literal escape sequences, sans \u and \(
'"': '"',
'/': '/',
'\\': '\\',
'b': '\b',
'f': '\f',
'n': '\n',
'r': '\r',
't': '\t'
}
json_tokens = [ # token types that are allowed in pure JSON
TokenType.close_array,
TokenType.close_object,
TokenType.colon,
TokenType.comma,
TokenType.name,
TokenType.number,
TokenType.open_array,
TokenType.open_object,
TokenType.string
]
keyword_paren_filters = {
'if': jqsh.filter.Conditional,
'try': jqsh.filter.Try
}
keyword_parens = { # a dictionary that maps starting keywords of keyword parens to the possible inner keywords. All keyword parens end with the “end” keyword.
'if': {'then', 'elif', 'elseIf', 'else'},
'try': {'catch', 'then', 'except', 'else'}
}
matching_parens = { # a dictionary that maps opening parenthesis-like tokens (parens) to the associated closing parens
TokenType.open_array: TokenType.close_array,
TokenType.open_object: TokenType.close_object,
TokenType.open_paren: TokenType.close_paren
}
operators = [
{
'binary': False,
TokenType.command: jqsh.filter.Command,
TokenType.global_variable: jqsh.filter.GlobalVariable
},
{
TokenType.dot: jqsh.filter.Apply
},
'variadic apply',
{
TokenType.multiply: jqsh.filter.Multiply
},
{
TokenType.plus: jqsh.filter.Add
},
{
TokenType.colon: jqsh.filter.Pair
},
{
TokenType.comma: jqsh.filter.Comma
},
{
TokenType.assign: jqsh.filter.Assign
},
{
'rtl': True,
TokenType.pipe: jqsh.filter.Pipe
},
{
TokenType.semicolon: jqsh.filter.Semicolon
}
]
paren_filters = {
TokenType.open_array: jqsh.filter.Array,
TokenType.open_object: jqsh.filter.Object,
TokenType.open_paren: jqsh.filter.Parens
}
symbols = {
'!': TokenType.command,
'$': TokenType.global_variable,
'%': TokenType.modulo,
'(': TokenType.open_paren,
')': TokenType.close_paren,
'*': TokenType.multiply,
'+': TokenType.plus,
',': TokenType.comma,
'-': TokenType.minus,
'.': TokenType.dot,
':': TokenType.colon,
';': TokenType.semicolon,
'=': TokenType.assign,
'@': TokenType.format_string,
'[': TokenType.open_array,
']': TokenType.close_array,
'{': TokenType.open_object,
'|': TokenType.pipe,
'}': TokenType.close_object
}
def illegal_token_exception(token, position=None, expected=None, line_numbers=False):
if token.type is TokenType.illegal and token.text:
return SyntaxError('illegal character' + ((' in line ' + str(token.line) if line_numbers and token.line is not None else '') if position is None else ' at position ' + repr(position)) + ': ' + repr(token.text[0]) + ' (U+' + format(ord(token.text[0]), 'x').upper() + ' ' + unicodedata.name(token.text[0], 'unknown character') + ')')
else:
return SyntaxError('illegal ' + ('' if token.type is TokenType.illegal else token.type.name + ' ') + 'token' + ((' in line ' + str(token.line) if line_numbers and token.line is not None else '') if position is None else ' at position ' + repr(position)) + ('' if expected is None else ' (expected ' + ' or '.join(sorted(expected_token_type.name for expected_token_type in expected)) + ')'))
def parse(tokens, *, line_numbers=False, allowed_filters={'default': True}, context=jqsh.context.FilterContext()):
def filter_is_allowed(the_filter):
if isinstance(allowed_filters, dict):
if the_filter.__class__ in allowed_filters:
if isinstance(allowed_filters[the_filter.__class__], bool):
return allowed_filters[the_filter.__class__]
else:
return allowed_filters[the_filter.__class__](the_filter)
else:
if isinstance(allowed_filters.get('default', False), bool):
return allowed_filters.get('default', False)
else:
return allowed_filters['default'](the_filter)
elif the_filter.__class__ in allowed_filters:
return True
else:
return False
def make_keyword_paren_filter(attributes):
attributes = list(attributes)
return keyword_paren_filters[attributes[0][0]]((attribute_name, parse(attribute_tokens, line_numbers=line_numbers, allowed_filters=allowed_filters, context=context)) for attribute_name, attribute_tokens in attributes)
def raise_for_filter(the_filter):
if filter_is_allowed(the_filter):
return the_filter
else:
raise jqsh.filter.NotAllowed('disallowed filter: ' + str(the_filter))
if isinstance(tokens, str):
tokens = list(tokenize(tokens))
tokens = [token for token in tokens if isinstance(token, jqsh.filter.Filter) or token.type is not TokenType.comment]
if not len(tokens):
return raise_for_filter(jqsh.filter.Filter()) # token list is empty, return an empty filter
for token in tokens:
if token.type is TokenType.illegal:
raise illegal_token_exception(token, line_numbers=line_numbers)
if isinstance(tokens[-1], Token) and tokens[-1].type is TokenType.trailing_whitespace:
if len(tokens) == 1:
return raise_for_filter(jqsh.filter.Filter()) # token list consists entirely of whitespace, return an empty filter
else:
tokens[-2].string += tokens[-1].string # merge the trailing whitespace into the second-to-last token
tokens.pop() # remove the trailing_whitespace token
# parenthesis-like filters
paren_balance = 0
paren_start = None
middle_keywords = []
for i, token in reversed(list(enumerate(tokens))): # iterating over the token list in reverse because we modify it in the process
if not isinstance(token, Token):
continue
elif token.type in matching_parens.values() or token == Token(TokenType.name, text='end'):
if paren_balance == 0:
paren_start = i
if token == Token(TokenType.name, text='end'):
middle_keywords = []
paren_balance += 1
elif token.type in matching_parens.keys() or token.type is TokenType.name and token.text in keyword_parens.keys():
paren_balance -= 1
if paren_balance < 0:
raise Incomplete('too many opening parens of type ' + repr(token.text if token.type is TokenType.name else token.type))
elif paren_balance == 0:
if token.type is TokenType.name:
middle_keywords = [index for index in middle_keywords if tokens[index].text in keyword_parens[token.text]]
attributes = []
last_index = paren_start
for index in middle_keywords:
attributes.append((tokens[index].text, tokens[index + 1:last_index]))
last_index = index
attributes.append((token.text, tokens[i + 1:last_index]))
tokens[i:paren_start + 1] = [raise_for_filter(make_keyword_paren_filter(reversed(attributes)))]
else:
if matching_parens[token.type] is tokens[paren_start].type:
tokens[i:paren_start + 1] = [raise_for_filter(paren_filters[token.type](attribute=parse(tokens[i + 1:paren_start], line_numbers=line_numbers, allowed_filters=allowed_filters)))] # parse the inside of the parens
else:
raise SyntaxError('opening paren of type ' + repr(token.type) + ' does not match closing paren of type ' + repr(tokens[paren_start].type))
paren_start = None
elif paren_balance == 1 and token.type is TokenType.name:
middle_keywords.append(i)
if paren_balance != 0:
raise SyntaxError('mismatched parens')
# atomic filters
for i, token in reversed(list(enumerate(tokens))):
if isinstance(token, Token) and token.type in atomic_tokens:
tokens[i] = raise_for_filter(atomic_tokens[token.type](token.text))
# operators
for precedence_group in operators:
if precedence_group == 'variadic apply':
start = None
for i, token in reversed(list(enumerate(tokens))):
if isinstance(token, jqsh.filter.Filter):
if start is None:
start = i
else:
if start is not None and start > i + 1:
tokens[i + 1:start + 1] = [raise_for_filter(jqsh.filter.Apply(*tokens[i + 1:start + 1]))]
start = None
if start is not None and start > 0:
tokens[:start + 1] = [raise_for_filter(jqsh.filter.Apply(*tokens[:start + 1]))]
continue
if not precedence_group.get('binary', True):
for i, token in reversed(list(enumerate(tokens))):
if isinstance(token, Token) and token.type in precedence_group:
if len(tokens) == i + 1:
raise SyntaxError('expected a filter after ' + repr(token) + ', nothing found')
elif isinstance(tokens[i + 1], Token):
raise SyntaxError('expected a filter after ' + repr(token) + ', found ' + repr(tokens[i + 1]) + ' instead')
tokens[i:i + 2] = [raise_for_filter(precedence_group[token.type](attribute=tokens[i + 1]))]
continue
ltr = not precedence_group.get('rtl', False)
if ltr:
tokens.reverse()
left_operand = None
right_operand = None
has_previous_operand = False
has_next_operand = False
for i, token in reversed(list(enumerate(tokens))):
if isinstance(token, jqsh.filter.Filter) and has_next_operand:
tokens[i:i + (3 if has_previous_operand else 2)] = [precedence_group[tokens[i + 1].type](left=left_operand, right=right_operand)]
has_next_operand = False
elif isinstance(token, Token) and token.type in precedence_group:
left_operand, has_left_operand = (tokens[i - 1], True) if i > 0 and isinstance(tokens[i - 1], jqsh.filter.Filter) else (raise_for_filter(jqsh.filter.Filter()), False)
right_operand, has_right_operand = (tokens[i + 1], True) if i + 1 < len(tokens) and isinstance(tokens[i + 1], jqsh.filter.Filter) else (raise_for_filter(jqsh.filter.Filter()), False)
has_previous_operand = has_right_operand
has_next_operand = has_left_operand
if ltr:
left_operand, right_operand = right_operand, left_operand
has_left_operand, has_right_operand = has_right_operand, has_left_operand
if not has_next_operand:
tokens[i:i + (2 if has_previous_operand else 1)] = [precedence_group[token.type](left=left_operand, right=right_operand)]
else:
has_next_operand = False
if ltr:
tokens.reverse()
if len(tokens) == 1 and isinstance(tokens[0], jqsh.filter.Filter):
return tokens[0] # finished parsing
else:
raise SyntaxError('Could not parse token list: ' + repr(tokens))
def parse_json(tokens, allow_extension_types=False):
if isinstance(tokens, str):
tokens = list(tokenize(tokens))
if len(tokens) == 0 or len(tokens) == 1 and isinstance(tokens[0], Token) and tokens[0].type is TokenType.trailing_whitespace:
raise Incomplete('JSON is empty')
if isinstance(tokens[-1], Token) and tokens[-1].type is TokenType.trailing_whitespace:
tokens.pop()
ret_path = []
key = None
token_index = 0
while token_index < len(tokens):
token = tokens[token_index]
if allow_extension_types and isinstance(token, jqsh.values.Value):
ret_path = set_value_at_ret_path(ret_path, key, token)
token_index += 1
elif token.type is TokenType.name:
if token.text == 'false':
ret_path = set_value_at_ret_path(ret_path, key, jqsh.values.Boolean(False))
token_index += 1
elif token.text == 'null':
ret_path = set_value_at_ret_path(ret_path, key, jqsh.values.Null())
token_index += 1
elif token.text == 'true':
ret_path = set_value_at_ret_path(ret_path, key, jqsh.values.Boolean(True))
token_index += 1
else:
raise SyntaxError('Illegal name token | |
== None or cursor.kind.is_invalid() or cursor.spelling != var:
cursor = self.find_type(data, template[0])
else:
pointer = 0 # get the pointer level from the cursor instead
if cursor != None and not cursor.kind.is_invalid() and \
cursor.spelling == typename and \
cursor.kind == cindex.CursorKind.VAR_DECL:
# We're trying to use a variable as a type.. This isn't valid
cursor = None
ret = []
if cursor != None and not cursor.kind.is_invalid():
# It's going to be a declaration of some kind, so
# get the returned cursor
pointer += cursor.get_returned_pointer_level()
cursor = cursor.get_returned_cursor()
if cursor == None:
ret = []
else:
# Probably a member of the current class
clazz = extract_class_from_function(data)
if clazz == None:
clazz = extract_class(data)
if clazz != None:
cursor = self.find_type(data, clazz)
if cursor != None and not cursor.kind.is_invalid():
func = False
if typename.endswith("()"):
func = True
typename = typename[:-2]
member = cursor.get_member(typename, func)
cursor, template, pointer = self.solve_member(data, cursor, member, template)
if member != None and (cursor == None or cursor.kind.is_invalid()):
ret = []
if cursor == None or cursor.kind.is_invalid():
# Is it by any chance a struct variable or an ObjC class?
cursor = self.find_type(data, template[0])
if cursor == None or cursor.kind.is_invalid() or \
cursor.spelling != typename or \
(not tocomplete.startswith("::") and \
cursor.kind != cindex.CursorKind.VAR_DECL and \
cursor.kind != cindex.CursorKind.OBJC_INTERFACE_DECL) or \
(tocomplete.startswith("::") and \
not (cursor.kind == cindex.CursorKind.CLASS_DECL or \
cursor.kind == cindex.CursorKind.STRUCT_DECL or \
cursor.kind == cindex.CursorKind.OBJC_INTERFACE_DECL or \
cursor.kind == cindex.CursorKind.CLASS_TEMPLATE)):
cursor = None
if cursor != None and not cursor.kind.is_invalid():
# It's going to be a declaration of some kind, so
# get the returned cursor
pointer = cursor.get_returned_pointer_level()
cursor = cursor.get_returned_cursor()
if cursor == None:
ret = []
if cursor == None or cursor.kind.is_invalid():
# Is it a non-member function?
func = False
if typename.endswith("()"):
func = True
typename = typename[:-2]
cached_results = cache_complete_startswith(self.cache, typename)
if cached_results:
for x in cached_results[0]:
if x.cursor.spelling == typename:
if x.cursor.kind == cindex.CursorKind.VAR_DECL or \
x.cursor.kind == cindex.CursorKind.FUNCTION_DECL:
cursor = x.cursor
pointer = cursor.get_returned_pointer_level()
cursor = cursor.get_returned_cursor()
if cursor == None:
ret = []
break
if cursor != None and not cursor.kind.is_invalid():
r = cursor
m2 = None
count = 0
while len(tocomplete) and count < 10:
if r == None or \
not (r.kind == cindex.CursorKind.CLASS_DECL or \
r.kind == cindex.CursorKind.STRUCT_DECL or \
r.kind == cindex.CursorKind.UNION_DECL or \
r.kind == cindex.CursorKind.OBJC_INTERFACE_DECL or \
r.kind == cindex.CursorKind.CLASS_TEMPLATE):
if r != None and not (r.kind == cindex.CursorKind.TEMPLATE_TYPE_PARAMETER or \
(r.kind == cindex.CursorKind.TYPEDEF_DECL and len(r.get_children()))):
ret = []
r = None
break
count += 1
match = re.search(r"^([^\.\-\(:\[\]]+)?(\[\]|\(|\.|->|::)(.*)", tocomplete)
if match == None:
# probably Objective C code
match = re.search(r"^(\S+)?(\s+)(.*)", tocomplete)
if match == None:
break
if r.kind == cindex.CursorKind.OBJC_INTERFACE_DECL:
pointer = 0
tocomplete = match.group(3)
count = 1
function = False
if match.group(2) == "(":
function = True
tocomplete = tocomplete[1:]
left = re.match(r"(\.|\->|::)?(.*)", tocomplete)
tocomplete = left.group(2)
if left.group(1) != None:
tocomplete = left.group(1) + tocomplete
nextm2 = match.group(2)
if match.group(1) == None and pointer == 0 and r.kind != cindex.CursorKind.OBJC_INTERFACE_DECL:
if match.group(2) == "->":
comp = r.get_member("operator->", True)
r, template, pointer = self.solve_member(data, r, comp, template)
if pointer > 0:
pointer -= 1
if comp == None or comp.kind.is_invalid():
ret = []
elif match.group(2) == "[]":
# TODO: different index types?
comp = r.get_member("operator[]", True)
r, template, pointer = self.solve_member(data, r, comp, template)
if comp == None or comp.kind.is_invalid():
ret = []
elif match.group(1) == None and pointer > 0:
if (nextm2 == "->" or nextm2 == "[]"):
pointer -= 1
elif nextm2 == ".":
# Trying to dot-complete a pointer, this is invalid
# so there can be no completions
ret = []
r = None
break
if match.group(1):
member = match.group(1)
if "[" in member:
member = get_base_type(member)
if "]" in member:
member = member[:member.find("]")]
if m2 == " ":
function = True
member = r.get_member(member, function)
r, template, pointer = self.solve_member(data, r, member, template)
if r == None and member != None:
# This can't be completed as a cursor object isn't returned
# from this member
ret = []
if match.group(2) != "(":
tocomplete = match.group(2) + tocomplete
m2 = nextm2
if r != None and not r.kind.is_invalid() and (pointer == 0 or r.kind == cindex.CursorKind.OBJC_INTERFACE_DECL):
clazz = extract_class_from_function(data)
if clazz == None:
clazz = extract_class(data)
selfcompletion = clazz == r.spelling
comp = cache_completeCursor(self.cache, r)
replaces = []
if template[1] != None:
tempnames = []
for child in r.get_children():
if child.kind == cindex.CursorKind.TEMPLATE_TYPE_PARAMETER:
tempnames.append(child.spelling)
count = min(len(template[1]), len(tempnames))
for i in range(count):
s = template[1][i][0]
if isinstance(s, cindex.Cursor):
s = s.spelling
replaces.append((r"(^|,|\(|\d:|\s+)(%s)($|,|\s+|\))" % tempnames[i], r"\1%s\3" % s))
if comp:
ret = []
if r.kind == cindex.CursorKind.OBJC_INTERFACE_DECL:
isStatic = var == None
if m2 == ".":
for c in comp[0]:
add = True
if c.cursor.kind == cindex.CursorKind.OBJC_IVAR_DECL:
continue
for child in c.cursor.get_children():
if child.kind == cindex.CursorKind.PARM_DECL:
add = False
break
if add:
ret.append((c.display, c.insert))
elif m2 == "->":
for c in comp[0]:
if c.cursor.kind != cindex.CursorKind.OBJC_IVAR_DECL:
continue
ret.append((c.display, c.insert))
else:
for c in comp[0]:
if c.static == isStatic and c.cursor.kind != cindex.CursorKind.OBJC_IVAR_DECL:
ret.append((c.display, c.insert))
else:
for c in comp[0]:
if not c.static and c.cursor.kind != cindex.CursorKind.ENUM_CONSTANT_DECL and \
c.cursor.kind != cindex.CursorKind.ENUM_DECL and \
c.cursor.kind != cindex.CursorKind.TYPEDEF_DECL and \
c.cursor.kind != cindex.CursorKind.CLASS_DECL and \
c.cursor.kind != cindex.CursorKind.STRUCT_DECL and \
c.cursor.kind != cindex.CursorKind.CLASS_TEMPLATE and \
(c.access == cindex.CXXAccessSpecifier.PUBLIC or \
(selfcompletion and not (c.baseclass and c.access == cindex.CXXAccessSpecifier.PRIVATE))):
disp = c.display
ins = c.insert
for r in replaces:
disp = re.sub(r[0], r[1], disp)
ins = re.sub(r[0], r[1], ins)
add = (disp, ins)
ret.append(add)
ret = self.filter(ret)
return remove_duplicates(ret)
else:
constr = re.search(r"(^|\W)new\s+$", before) != None
cached_results = cache_complete_startswith(self.cache, prefix)
if cached_results:
ret = [(x.display, x.insert) for x in cached_results[0]]
variables = extract_variables(data) if not constr else []
var = [("%s\t%s" % (v[1], re.sub(r"(^|\b)\s*static\s+", "", v[0])), v[1]) for v in variables]
if len(var) and ret == None:
ret = []
for v in var:
if v[1].startswith(prefix):
ret.append(v)
clazz = extract_class_from_function(data)
if clazz == None:
clazz = extract_class(data)
if clazz != None:
c = self.find_type(data, clazz)
if c != None and not c.kind.is_invalid():
comp = cache_completeCursor(self.cache, c)
if comp:
for c in comp[0]:
if not c.static and \
not (c.baseclass and c.access == cindex.CXXAccessSpecifier.PRIVATE):
add = (c.display, c.insert)
ret.append(add)
namespaces = extract_used_namespaces(data)
ns = extract_namespace(data)
if ns:
namespaces.append(ns)
for ns in namespaces:
ns = ns.split("::")
add = self.complete_namespace(ns)
if add:
ret.extend(add)
ret = self.filter(ret, constr)
return remove_duplicates(ret)
def clangcomplete(self, filename, row, col, unsaved_files, membercomp):
ret = None
unsaved = None
if len(unsaved_files):
unsaved = (cindex._CXUnsavedFile * len(unsaved_files))()
for i, (name, value) in enumerate(unsaved_files):
if not isinstance(value, str):
value = value.encode("ascii", "ignore")
unsaved[i].name = name
unsaved[i].contents = value
unsaved[i].length = len(value)
comp = cache_clangComplete(self.cache, filename, row, col, unsaved, len(unsaved_files), membercomp)
if comp:
ret = [(c.display, c.insert) for c in comp[0]]
return ret
def format_cursor(cursor):
return "%s:%d:%d" % (cursor.location.file.name, cursor.location.line,
cursor.location.column)
def get_cursor_spelling(cursor):
cursor_spelling = None
if cursor != None:
cursor_spelling = cursor.spelling or cursor.displayname
cursor_spelling = re.sub(r"^(enum\s+|(class|struct)\s+(\w+::)*)", "", cursor_spelling)
return cursor_spelling
searchcache = {}
class ExtensiveSearch:
def quickpanel_extensive_search(self, idx):
if idx == 0:
for cpu in range(get_cpu_count()):
t = threading.Thread(target=self.worker)
t.start()
self.queue.put((0, "*/+"))
elif len(self.options) > 2:
self.found_callback(self.options[idx][1])
def __init__(self, cursor, spelling, found_callback, folders, opts, opts_script, name="", impl=True, search_re=None, file_re=None):
self.name = name
if impl:
self.re = re.compile(r"\w+[\*&\s]+(?:\w+::)?(%s\s*\([^;\{]*\))(?=\s*\{)" % re.escape(spelling))
self.impre = re.compile(r"(\.cpp|\.c|\.cc|\.m|\.mm)$")
else:
self.re = re.compile(r"\w+[\*&\s]+(?:\w+::)?(%s\s*\([^;\{]*\))(?=\s*;)" % re.escape(spelling))
self.impre = re.compile(r"(\.h|\.hpp)$")
if search_re != None:
self.re = search_re
if file_re != None:
self.impre = file_re
self.spelling = spelling
self.folders = folders
self.opts = opts
self.opts_script = opts_script
self.impl = impl
self.target = ""
self.cursor = None
if cursor:
self.cursor = format_cursor(cursor)
self.queue = Queue.PriorityQueue()
self.candidates = Queue.Queue()
self.lock = threading.RLock()
self.timer = None
self.status_count = 0
self.found_callback = found_callback
self.options = [["Yes", "Do extensive search"], ["No", "Don't do extensive search"]]
k = self.key()
if k in searchcache:
| |
"""Convert the object references in each Rule in a Rules_List to the actual objects.
This method assumes that all the rules in the list are from the same device.
:type device_id: int
:param device_id: The ID of the device that contains the rules. If not specified, taken from the first rule.
:type rule_list: Secure_Track.XML_Objects.REST.Rules.Rules_List|XML_List[Rule]
:rtype: Secure_Track.XML_Objects.REST.Rules.Rules_List
"""
if device_id is None:
device_id = rule_list[0].device_id
logger.info("De-referencing rules for device ID %s.", device_id)
rule_to_src_ids = {}
rule_to_dst_ids = {}
rule_to_srv_ids = {}
for rule in rule_list:
rule_to_src_ids[rule] = [src.id for src in rule.src_networks]
rule_to_dst_ids[rule] = [dst.id for dst in rule.dst_networks]
rule_to_srv_ids[rule] = [srv.id for srv in rule.dst_services]
network_objects = self.get_network_objects_by_device_and_object_ids(device_id, set(
itertools.chain(itertools.chain.from_iterable(rule_to_src_ids.values()),
itertools.chain.from_iterable(rule_to_dst_ids.values()))))
network_object_id_to_network_objects = {network_object.id: network_object for network_object in network_objects}
services = self.get_services_by_device_and_object_ids(device_id, set(rule_to_srv_ids.values()))
service_id_to_service = {service.id: service for service in services}
for index, rule in enumerate(rule_list):
rule.src_networks = [network_object_id_to_network_objects[src_id] for src_id in rule_to_src_ids[rule]]
rule.dst_networks = [network_object_id_to_network_objects[dst_id] for dst_id in rule_to_dst_ids[rule]]
rule.dst_services = [service_id_to_service[service_id] for service_id in rule_to_srv_ids[rule]]
rule_list[index] = rule
return rule_list
def get_services_by_device_and_object_ids(self, device_id, service_ids):
"""Get the network objects for a device.
:param device_id: The device ID for which we want to get network objects.
:type device_id: int
:param service_ids: The ID of the service
:type service_ids: int|collections.Iterable[int]
:return: The service for the specified device with the specified ID.
:rtype: Single_Service|Group_Service
:raise ValueError: If a device with the specified ID does not exist.
:raise IOError: If there was a communication problem trying to get the network objects.
"""
if isinstance(service_ids, collections.Iterable):
service_ids = ",".join([str(service_id) for service_id in service_ids])
logger.info("Getting service with ID %s for device %s.", service_ids, device_id)
try:
response_string = self.get_uri("/securetrack/api/devices/{}/services/{}".format(device_id, service_ids),
expected_status_codes=200).response.content
except RequestException:
message = "Failed to get the service with ID {} for device ID {}.".format(service_ids, device_id)
logger.critical(message)
raise IOError(message)
except REST_Not_Found_Error:
message = "Device with ID {} does not exist.".format(device_id)
logger.critical(message)
raise ValueError(message)
else:
return Services_List.from_xml_string(response_string)
def get_services_by_revision_and_object_ids(self, revision_id, service_ids=""):
"""Get the services for a revision.
:param revision_id: The revision ID for which we want to get network objects.
:type revision_id: int
:param service_ids: The ID of the service
:type service_ids: int|collections.Iterable[int]
:return: The service for the specified revision with the specified ID.
:rtype: Services_List
:raise ValueError: If a revision with the specified ID does not exist.
:raise IOError: If there was a communication problem trying to get the services.
"""
if isinstance(service_ids, collections.Iterable):
service_ids = ",".join([str(service_id) for service_id in service_ids])
logger.info("Getting service with ID %s for revision %s.", service_ids, revision_id)
try:
response_string = self.get_uri("/securetrack/api/revisions/{}/services/{}".format(revision_id, service_ids),
expected_status_codes=200).response.content
except RequestException:
message = "Failed to get the service with ID {} for revision ID {}.".format(service_ids, revision_id)
logger.critical(message)
raise IOError(message)
except REST_Not_Found_Error:
message = "Device with ID {} does not exist.".format(revision_id)
logger.critical(message)
raise ValueError(message)
else:
return Services_List.from_xml_string(response_string)
def get_network_objects_by_revision_and_object_ids(self, revision_id, network_object_ids=""):
"""Get the network objects for a device.
:param revision_id: The revision ID for which we want to get network objects.
:type revision_id: int
:param network_object_ids: The ID of the network object to get
:type network_object_ids: int|collections.Iterable[int]
:return: The network objects for the specified revision.
:rtype: Network_Objects_List
:raise ValueError: If a revision with the specified ID does not exist.
:raise IOError: Ifp there was a communication problem trying to get the network objects.
"""
logger.info("Getting network object with ID %s for revision %s.", network_object_ids, revision_id)
if isinstance(network_object_ids, collections.Iterable):
network_object_ids = ",".join([str(network_object_id) for network_object_id in network_object_ids])
try:
response_string = self.get_uri(
"/securetrack/api/revisions/{}/network_objects/{}".format(revision_id, network_object_ids),
expected_status_codes=200).response.content
except RequestException:
message = "Failed to get the list of rules for revision ID {}.".format(revision_id)
logger.critical(message)
raise IOError(message)
except (REST_Not_Found_Error, IndexError):
message = "Revision with ID {} does not exist.".format(revision_id)
logger.critical(message)
raise ValueError(message)
return Network_Objects_List.from_xml_string(response_string)
def get_network_objects_by_device_and_object_ids(self, device_id, network_object_ids):
"""Get the network objects for a device.
:param device_id: The device ID for which we want to get network objects.
:type device_id: int
:param network_object_ids: The ID of the network object to get
:type network_object_ids: int|collections.Iterable[int]
:return: The network objects for the specified device.
:rtype: Network_Objects_List
:raise ValueError: If a device with the specified ID does not exist.
:raise IOError: Ifp there was a communication problem trying to get the network objects.
"""
logger.info("Getting network object with ID %s for device %s.", network_object_ids, device_id)
if isinstance(network_object_ids, collections.Iterable):
network_object_ids = ",".join([str(network_object_id) for network_object_id in network_object_ids])
try:
response_string = self.get_uri(
"/securetrack/api/devices/{}/network_objects/{}".format(device_id, network_object_ids),
expected_status_codes=200).response.content
except RequestException:
message = "Failed to get the list of rules for device ID {}.".format(device_id)
logger.critical(message)
raise IOError(message)
except (REST_Not_Found_Error, IndexError):
message = "Device with ID {} does not exist.".format(device_id)
logger.critical(message)
raise ValueError(message)
return Network_Objects_List.from_xml_string(response_string)
def __get_device_to_id(self, devices):
if devices is None:
device_id_to_device = {device.id: device for device in self.get_devices_list()}
else:
device_id_to_device = {device.id: device for device in devices}
return device_id_to_device
def get_latest_ready_revision_for_device_id(self, device_id):
"""Get the latest ready for the specified device ID.
:param device_id: The device ID for which to get the latest specified revision.
:type device_id: int|str
:return: The latest ready revision for the device.
:rtype: Device_Revision
:raise ValueError: If the device has not ready revisions.
"""
revisions = self.get_device_revisions_by_id(device_id)
revisions.sort()
for revision in revisions:
if revision.is_ready():
return revision
raise ValueError("No ready revisions for device with ID {}.".format(device_id))
def get_latest_revision_for_device_id(self, device_id):
"""Get the latest revision for device
:param device_id: The device ID for which we want to get the latest revision
:type device_id: str|int
return: The latest revision for the device
:rtype: Device_Revision
:raise ValueError: If no revision was found
:raise IOError: API call failed
"""
logger.info("Getting SecureTrack latest revision for device with ID %s.", device_id)
try:
response_string = self.get_uri("/securetrack/api/devices/{}/latest_revision/".format(device_id),
expected_status_codes=200).response.content
except REST_Not_Found_Error:
message = "Device with ID {} does not exist or has no revisions.".format(device_id)
logger.critical(message)
raise ValueError(message)
except RequestException:
message = "Failed to GET latest revision for the device with ID {}.".format(device_id)
logger.critical(message)
raise IOError(message)
return Device_Revision.from_xml_string(response_string)
def get_device_routes(self, device_id, is_generic=None, start=None, count=None):
"""Get the list of device routes from SecureTrack.
:return: list of available routes
:rtype: RoutesList
"""
logger.info("Getting device routes from SecureTrack")
device_route_uri_suffix = ""
if is_generic:
device_route_uri_suffix += "&is_generic={}".format(is_generic)
if start and count:
device_route_uri_suffix += "&start={}&count={}".format(start, count)
try:
response_string = self.get_uri(
"/securetrack/api/devices/topology_routes?mgmtId={}{}".format(device_id, device_route_uri_suffix),
expected_status_codes=200).response.content
except REST_Not_Found_Error:
message = "Device with ID {} does not exist.".format(device_id)
logger.critical(message)
raise ValueError(message)
except RequestException:
message = "Failed to get routes for device id '{}'".format(device_id)
logger.critical(message)
raise IOError(message)
return RoutesList.from_xml_string(response_string)
def get_internet_referral_object_for_device_id(self, device_id):
"""Get the internet referral object for StoneSoft (except master engine) or Check Point SMC/CMA devices.
:rtype: InternetReferralObject
:type device_id: int
"""
logger.info("Getting internet referral object for SecureTrack device with ID %s", device_id)
uri = "/securetrack/api/internet_referral/{}".format(device_id)
try:
response = self.get_uri(uri, 200).response.content
except RequestException as error:
message = "Failed to get internet referral object for device. Error: {}".format(error)
logger.critical(message)
raise IOError(message)
return InternetReferralObject.from_xml_string(response)
def set_internet_referral_object_for_device_id(self, device_id, internet_referral_object):
"""Set the internet referral object for StoneSoft (except master engine) or Check Point SMC/CMA devices.
:type internet_referral_object: InternetReferralObject
:type device_id: int
"""
logger.info("Setting internet referral object for SecureTrack device with ID %s", device_id)
uri = "/securetrack/api/internet_referral/{}".format(device_id)
try:
self.put_uri(uri, internet_referral_object.to_xml_string(), expected_status_codes=200).response.content
except RequestException as error:
message = "Failed to set internet referral object for device. Error: {}".format(error)
logger.critical(message)
raise IOError(message)
def get_topology_path(self, sources, destinations, services, url_params=None):
"""
TODO: docstring
:param sources:
:param destinations:
:param services:
:param url_params:
:return:
"""
if not url_params:
url_params=""
else:
param_builder = URLParamBuilderDict(url_params)
url_params = param_builder.build(prepend_question_mark=False)
src = ",".join(sources) if sources else '0.0.0.0'
dst = ",".join(destinations) if destinations else '0.0.0.0'
srv = ",".join(services) if services else 'ANY'
try:
uri = "/securetrack/api/topology/path?src={}&dst={}&service={}{}".format(src, dst, srv, url_params)
path_cal_results = self.get_uri(uri, expected_status_codes=200).response.content
return PathCalculationResults.from_xml_string(path_cal_results)
except RequestException as error:
message = "Failed to get SecureTrack topology path. Error: {}".format(error)
raise IOError(message)
def get_nat_rules_by_device_id(self, device_id, input_interface="Any", **kwargs):
""" Get NAT rules based on device id
:param device_id: SecureTrack device ID
:param input_interface: Input interface for NAT rules, "any" for all interfaces
:param kwargs:
output_interface: output interface for NAT rules, "any" for all interfaces
nat_stage: NAT stage for NAT rules allowable values: "pre_policy" , "post_policy" or "both"
nat_type: NAT stage for NAT rules allowable values: "vip" or "reverse_vip"
:return:
"""
logger.info("Getting NAT rules for device {}, input_interface {}".format(device_id, input_interface))
| |
ids_trip = self.times_depart.get_ids_sorted()
ids_vtype = self.ids_vtype[ids_trip]
#ids_vtypes_exclude = self.ids_vtype.get_ids_from_indices(vtypes_exclude)
inds_selected = np.ones(len(ids_vtype), np.bool)
for id_vtype in ids_vtype_exclude:
inds_selected[ids_vtype == id_vtype] = False
ids_trip_selected = ids_trip[inds_selected]
ids_vtype_selected = set(ids_vtype[inds_selected])
#ids_vtypes_selected = set(ids_vtypes).difference(ids_vtypes_exclude)
self.parent.vtypes.write_xml(fd, indent=indent,
ids=ids_vtype_selected,
is_print_begin_end=False)
self.write_xml(fd, indent=indent,
ids=ids_trip_selected,
attrconfigs_excluded=[self.routes, self.ids_routes],
is_print_begin_end=False)
fd.write(xm.end(xmltag))
fd.close()
return filepath
def export_routes_xml(self, filepath=None, method_routechoice=None, encoding='UTF-8'):
"""
Export routes to SUMO xml file.
Method takes care of sorting trips by departure time.
"""
if method_routechoice is None:
method_routechoice = self.get_route_first
if filepath is None:
filepath = self.get_routefilepath()
print 'export_routes_xml', filepath
try:
fd = open(filepath, 'w')
except:
print 'WARNING in write_obj_to_xml: could not open', filepath
return False
xmltag_routes, xmltag_veh, attrname_id = ("routes", "vehicle", "ids_sumo")
xmltag_trip = "trip"
xmltag_rou = "route"
fd.write('<?xml version="1.0" encoding="%s"?>\n' % encoding)
fd.write(xm.begin(xmltag_routes))
indent = 2
#ids_modes_used = set(self.parent.vtypes.ids_mode[self.ids_vtype.get_value()])
self.parent.vtypes.write_xml(fd, indent=indent,
ids=set(self.ids_vtype.get_value()),
is_print_begin_end=False
)
ids_mode = self.parent.vtypes.ids_mode
id_pedestrian = MODES['pedestrian']
routes = self.routes.get_value()
# here we could write the route info
# but we do write it inside each trip so that it can be parsed
# in the same way as duarouter output
# routes.write_xml( fd, indent=indent,
# attrconfigs_excluded = [routes.costs, routes.probabilities],
# is_print_begin_end = False)
# let's write trip info manually
tripconfigs = [self.ids_vtype,
self.times_depart,
self.ids_edge_depart,
self.ids_edge_arrival,
self.inds_lane_depart,
self.positions_depart,
self.speeds_depart,
self.inds_lane_arrival,
self.positions_arrival,
self.speeds_arrival,
]
routeconfigs = [routes.ids_edges,
routes.colors,
]
attrconfig_id = getattr(self.get_attrsman(), attrname_id)
xmltag_id = attrconfig_id.xmltag
for id_trip in self.times_depart.get_ids_sorted():
if ids_mode[self.ids_vtype[id_trip]] == id_pedestrian:
self.write_persontrip_xml(fd, id_trip,
method_routechoice=method_routechoice,
indent=indent+2)
else:
id_route = method_routechoice(id_trip)
if id_route >= 0: # a valid route has been found
# init vehicle route only if valid route exists
fd.write(xm.start(xmltag_veh, indent+2))
else:
# init trip instead of route
fd.write(xm.start(xmltag_trip, indent+2))
# print ' make tag and id',_id
fd.write(xm.num(xmltag_id, attrconfig_id[id_trip]))
# print ' write columns',len(scalarcolconfigs)>0,len(idcolconfig_include_tab)>0,len(objcolconfigs)>0
for attrconfig in tripconfigs:
# print ' attrconfig',attrconfig.attrname
attrconfig.write_xml(fd, id_trip)
if id_route >= 0: # a valid route has been found
# write route id
#fd.write(xm.num('route', id_route ))
# instead of route id we write entire route here
fd.write(xm.stop())
fd.write(xm.start(xmltag_rou, indent+4))
for attrconfig in routeconfigs:
# print ' attrconfig',attrconfig.attrname
attrconfig.write_xml(fd, id_route)
# end route and vehicle
fd.write(xm.stopit())
fd.write(xm.end(xmltag_veh, indent+2))
else:
# end trip without route
fd.write(xm.stopit())
fd.write(xm.end(xmltag_routes))
fd.close()
return filepath
def write_persontrip_xml(self, fd, id_trip, indent=2, method_routechoice=None):
# currently no routes are exported, only origin and destination edges
# if method_routechoice is None:
# method_routechoice = self.get_route_first
xmltag_person = 'person'
id_route = method_routechoice(id_trip)
fd.write(xm.start(xmltag_person, indent))
self.ids_sumo.write_xml(fd, id_trip)
self.times_depart.write_xml(fd, id_trip)
self.ids_vtype.write_xml(fd, id_trip)
fd.write(xm.stop())
fd.write(xm.start('walk', indent=indent+2))
# print 'write walk',id_trip,self.positions_depart[id_trip],self.positions_arrival[id_trip]
self.ids_edge_depart.write_xml(fd, id_trip)
if self.positions_depart[id_trip] > 0:
self.positions_depart.write_xml(fd, id_trip)
self.ids_edge_arrival.write_xml(fd, id_trip)
if self.positions_arrival[id_trip] > 0:
self.positions_arrival.write_xml(fd, id_trip)
fd.write(xm.stopit()) # ends walk
fd.write(xm.end(xmltag_person, indent=indent))
def get_route_first(self, id_trip):
ids_route = self.ids_routes[id_trip]
if ids_route is None:
return -1
elif len(ids_route) > 0:
return ids_route[0]
else:
return -1 # no route found
def import_routes_xml(self, filepath, is_clear_trips=False,
is_generate_ids=True, is_add=False):
print 'import_routes_xml from %s generate own trip ' % (filepath)
if is_clear_trips:
self.clear_trips()
counter = RouteCounter()
parse(filepath, counter)
reader = RouteReader(self, counter)
try:
parse(filepath, reader)
# print ' call make_routes',is_generate_ids,is_add
reader.insert_routes(is_generate_ids=is_generate_ids,
is_add=is_add)
except KeyError:
print >> sys.stderr, "Error: Problems with reading routes!"
raise
def import_trips_xml(self, filepath, is_clear_trips=False, is_generate_ids=True):
print 'import_trips_xml from %s generate own trip ' % (filepath)
if is_clear_trips:
self.clear_trips()
counter = TripCounter()
parse(filepath, counter)
reader = TripReader(self, counter.n_trip)
print ' n_trip=', counter.n_trip
try:
parse(filepath, reader)
reader.insert_trips(is_generate_ids=is_generate_ids)
except KeyError:
print >> sys.stderr, "Error: Problems with reading trips!"
raise
class TripCounter(handler.ContentHandler):
"""Parses a SUMO route XML file and counts trips."""
def __init__(self):
self.n_trip = 0
def startElement(self, name, attrs):
# print 'startElement',name,self.n_trip
if name == 'trip':
self.n_trip += 1
class TripReader(handler.ContentHandler):
"""Reads trips from trip or route file into trip table"""
def __init__(self, trips, n_trip):
# print 'RouteReader.__init__',demand.ident
self._trips = trips
demand = trips.parent
net = demand.get_scenario().net
self._ids_vtype_sumo = demand.vtypes.ids_sumo
self._ids_edge_sumo = net.edges.ids_sumo
self.ids_sumo = np.zeros(n_trip, np.object)
self.ids_vtype = np.zeros(n_trip, np.int32)
self.times_depart = np.zeros(n_trip, np.int32)
self.ids_edge_depart = np.zeros(n_trip, np.int32)
self.ids_edge_arrival = np.zeros(n_trip, np.int32)
self.inds_lane_depart = np.zeros(n_trip, np.int32)
self.positions_depart = np.zeros(n_trip, np.float32)
self.speeds_depart = np.zeros(n_trip, np.float32)
self.inds_lane_arrival = np.zeros(n_trip, np.int32)
self.positions_arrival = np.zeros(n_trip, np.float32)
self.speeds_arrival = np.zeros(n_trip, np.float32)
self.routes = np.zeros(n_trip, np.object)
self._ind_trip = -1
self._has_routes = False
self._ids_sumoedge_current = []
self._id_sumoveh_current = None
#self._time_depart = 0
#self._attrs = {}
#self._is_generate_ids = is_generate_ids
self._intervals_current = ''
def startElement(self, name, attrs):
# <vehicle id="3_21" type="bus" depart="2520.00">
# <route edges="bottom1to1/0 1/0to0/0 0/0tobottom0"/>
# </vehicle>
# print 'startElement',name
if name == 'trip':
# print ' startElement',attrs['id'],attrs['depart']
self._ind_trip += 1
self._id_sumoveh_current = attrs['id']
self.ids_sumo[self._ind_trip] = self._id_sumoveh_current
print 'startElement ids_vtype', attrs['type'], self._ids_vtype_sumo.get_id_from_index(str(attrs['type']))
self.ids_vtype[self._ind_trip] = self._ids_vtype_sumo.get_id_from_index(str(attrs['type']))
self.times_depart[self._ind_trip] = int(float(attrs['depart']))
if attrs.has_key('from'):
self.ids_edge_depart[self._ind_trip] = self._ids_edge_sumo.get_id_from_index(str(attrs['from']))
if attrs.has_key('to'):
self.ids_edge_arrival[self._ind_trip] = self._ids_edge_sumo.get_id_from_index(str(attrs['to']))
ind_lane_depart_raw = attrs.get('departLane', 'free')
if OPTIONMAP_LANE_DEPART.has_key(ind_lane_depart_raw):
self.inds_lane_depart[self._ind_trip] = OPTIONMAP_LANE_DEPART[ind_lane_depart_raw]
else:
self.inds_lane_depart[self._ind_trip] = int(ind_lane_depart_raw)
positions_depart_raw = attrs.get('departPos', 'base')
if OPTIONMAP_POS_DEPARTURE.has_key(positions_depart_raw):
self.positions_depart[self._ind_trip] = OPTIONMAP_POS_DEPARTURE[positions_depart_raw]
else:
self.positions_depart[self._ind_trip] = float(positions_depart_raw)
self.speeds_depart[self._ind_trip] = attrs.get('departSpeed', 0.0)
ind_lane_arrival_raw = attrs.get('arrivalLane', 'current')
if OPTIONMAP_LANE_ARRIVAL.has_key(ind_lane_arrival_raw):
self.inds_lane_arrival[self._ind_trip] = OPTIONMAP_LANE_ARRIVAL[ind_lane_arrival_raw]
else:
self.inds_lane_arrival[self._ind_trip] = int(ind_lane_arrival_raw)
positions_arrival_raw = attrs.get('arrivalPos', 'max')
if OPTIONMAP_POS_ARRIVAL.has_key(positions_arrival_raw):
self.positions_arrival[self._ind_trip] = OPTIONMAP_POS_ARRIVAL[positions_arrival_raw]
else:
self.positions_arrival[self._ind_trip] = float(positions_arrival_raw)
self.speeds_arrival[self._ind_trip] = attrs.get('arrivalSpeed', 0.0)
def _get_kwargs(self):
return {'ids_sumo': self.ids_sumo,
'times_depart': self.times_depart,
'ids_edge_depart': self.ids_edge_depart,
'ids_edge_arrival': self.ids_edge_arrival,
'inds_lane_depart': self.inds_lane_depart,
'positions_depart': self.positions_depart,
'speeds_depart': self.speeds_depart,
'inds_lane_arrival': self.inds_lane_arrival,
'positions_arrival': self.positions_arrival,
'speeds_arrival': self.speeds_arrival,
}
def insert_trips(self, is_generate_ids=True):
# print 'TripReader.insert_trips self.ids_vtype',self.ids_vtype
kwargs = self._get_kwargs()
ids_trips = self._trips.make_trips(self.ids_vtype,
is_generate_ids=is_generate_ids,
**kwargs)
return ids_trips
class RouteCounter(handler.ContentHandler):
"""Parses a SUMO route XML file and counts trips."""
def __init__(self):
self.n_veh = 0
self.n_pers = 0
#self.n_rou = 0
def startElement(self, name, attrs):
# print 'startElement',name,self.n_trip
if name == 'vehicle':
self.n_veh += 1
elif name == 'person':
self.n_pers += 1
# elif name == 'route':
# if attrs.has_key('id'):
# self.n_rou += 1
class RouteReader(TripReader):
"""Reads trips from trip or route file into trip table"""
def __init__(self, trips, counter):
# print 'RouteReader.__init__',demand.ident
self._trips = trips
n_veh = counter.n_veh
n_per = counter.n_pers
#n_rou = counter.n_rou
n_trip = n_veh+n_per
demand = trips.parent
net = demand.get_scenario().net
self._ids_vtype_sumo = demand.vtypes.ids_sumo
self._ids_edge_sumo = net.edges.ids_sumo
self.ids_sumo = np.zeros(n_trip, np.object)
self.ids_vtype = np.zeros(n_trip, np.int32)
self.times_depart = np.zeros(n_trip, np.int32)
self.ids_edge_depart = np.zeros(n_trip, np.int32)
self.ids_edge_arrival = np.zeros(n_trip, np.int32)
self.inds_lane_depart = np.zeros(n_trip, np.int32)
self.positions_depart = np.zeros(n_trip, np.float32)
self.speeds_depart = np.zeros(n_trip, np.float32)
self.inds_lane_arrival = np.zeros(n_trip, np.int32)
self.positions_arrival = np.zeros(n_trip, np.float32)
self.speeds_arrival = np.zeros(n_trip, np.float32)
self.routes = np.zeros(n_trip, np.object)
self._ind_trip = -1
self._has_routes = False
self._ids_sumoedge_current = []
self._id_sumoveh_current = None
#self._time_depart = 0
#self._attrs = {}
#self._is_generate_ids = is_generate_ids
self._intervals_current = ''
def startElement(self, name, attrs):
# <vehicle id="3_21" type="bus" depart="2520.00">
# <route edges="bottom1to1/0 1/0to0/0 0/0tobottom0"/>
# </vehicle>
# print 'startElement',name
if name == 'vehicle':
# print ' startElement',attrs['id'],attrs['depart']
self._ind_trip += 1
self._id_sumoveh_current = attrs['id']
self.ids_sumo[self._ind_trip] = self._id_sumoveh_current
# print 'startElement ids_vtype',attrs['type'], self._ids_vtype_sumo.get_id_from_index(str(attrs['type']))
self.ids_vtype[self._ind_trip] = self._ids_vtype_sumo.get_id_from_index(str(attrs['type']))
self.times_depart[self._ind_trip] = int(float(attrs['depart']))
if attrs.has_key('from'):
self.ids_edge_depart[self._ind_trip] = self._ids_edge_sumo.get_id_from_index(str(attrs['from']))
if attrs.has_key('to'):
self.ids_edge_arrival[self._ind_trip] = self._ids_edge_sumo.get_id_from_index(str(attrs['to']))
ind_lane_depart_raw = attrs.get('departLane', 'free')
if OPTIONMAP_LANE_DEPART.has_key(ind_lane_depart_raw):
self.inds_lane_depart[self._ind_trip] = OPTIONMAP_LANE_DEPART[ind_lane_depart_raw]
else:
self.inds_lane_depart[self._ind_trip] = int(ind_lane_depart_raw)
positions_depart_raw = attrs.get('departPos', 'base')
if OPTIONMAP_POS_DEPARTURE.has_key(positions_depart_raw):
self.positions_depart[self._ind_trip] = OPTIONMAP_POS_DEPARTURE[positions_depart_raw]
else:
self.positions_depart[self._ind_trip] = float(positions_depart_raw)
self.speeds_depart[self._ind_trip] = attrs.get('departSpeed', 0.0)
ind_lane_arrival_raw = attrs.get('arrivalLane', 'current')
if OPTIONMAP_LANE_ARRIVAL.has_key(ind_lane_arrival_raw):
self.inds_lane_arrival[self._ind_trip] = OPTIONMAP_LANE_ARRIVAL[ind_lane_arrival_raw]
else:
self.inds_lane_arrival[self._ind_trip] = int(ind_lane_arrival_raw)
positions_arrival_raw = attrs.get('arrivalPos', 'max')
if OPTIONMAP_POS_ARRIVAL.has_key(positions_arrival_raw):
self.positions_arrival[self._ind_trip] = OPTIONMAP_POS_ARRIVAL[positions_arrival_raw]
else:
self.positions_arrival[self._ind_trip] = float(positions_arrival_raw)
self.speeds_arrival[self._ind_trip] = attrs.get('arrivalSpeed', 0.0)
if name == 'route':
self._has_routes = True
# print ' ',attrs.get('edges', '')
self._ids_sumoedge_current = attrs.get('edges', '')
self._intervals_current = attrs.get('intervals', '')
# def characters(self, content):
# if (len(self._route_current)>0)&(self._intervals_current!=''):
# self._intervals_current = self._intervals_current + content
def endElement(self, name):
if name == 'vehicle':
# print 'endElement',name,self._id_current,len(self._intervals_current)
if (self._id_sumoveh_current is not None):
ids_edge = []
for id_sumoedge in self._ids_sumoedge_current.split(' '):
if not id_sumoedge in ('', ' ', ','):
if self._ids_edge_sumo.has_index(id_sumoedge):
ids_edge.append(self._ids_edge_sumo.get_id_from_index(id_sumoedge.strip()))
self.routes[self._ind_trip] = ids_edge
if len(ids_edge) >= 1:
self.ids_edge_depart[self._ind_trip] = ids_edge[0]
self.ids_edge_arrival[self._ind_trip] = ids_edge[-1]
self._id_sumoveh_current = None
#self._attrs = {}
self._ids_sumoedge_current = []
# elif name in ['routes','trips']:
# self.make_trips()
def process_intervals(self):
interval = []
es = self._intervals_current.rstrip().split(" ")
for e in es:
p = e.split(",")
interval.append((float(p[0]), float(p[1])))
self._intervals_current = ''
return interval
def _get_kwargs(self):
return {'ids_sumo': self.ids_sumo,
'times_depart': self.times_depart,
'ids_edge_depart': self.ids_edge_depart,
'ids_edge_arrival': self.ids_edge_arrival,
'inds_lane_depart': self.inds_lane_depart,
'positions_depart': self.positions_depart,
'speeds_depart': self.speeds_depart,
'inds_lane_arrival': self.inds_lane_arrival,
'positions_arrival': self.positions_arrival,
'speeds_arrival': self.speeds_arrival,
}
def insert_routes(self, is_generate_ids=True, is_add=False):
# print 'TripReader.make_routes',is_generate_ids, is_add
ids_trip = None
if is_add:
is_generate_ids = False
# get trip ids from xml file
| |
specific to the script, which are edit in our HTML form, in enter_edition_mode().
# They must have a default value. Maybe we could always have an edition mode when their value
# is not set.
# If the parameter is "cimom", it will extract the host of Uris like these: Wee GetHost()
# https://jdd:test@acme.com:5959/cimv2:CIM_RegisteredProfile.InstanceID="acme:1"
def get_parameters(self,paramkey):
# Default value if no CGI argument.
try:
dflt_value = self.m_parameters[paramkey]
# sys.stderr.write("get_parameters %s Default=%s\n" % ( paramkey, dflt_value ) )
has_dflt_val = True
except KeyError:
has_dflt_val = False
# unchecked_hidden
has_arg_value = True
try:
# If the script parameter is passed as a CGI argument.
# BEWARE !!! An empty argument triggers an exception !!!
# Same problem if the same argument appears several times: This will be a list.
param_val = self.m_arguments[paramkey].value
#sys.stderr.write("get_parameters paramkey='%s' param_val='%s' as CGI\n" % ( paramkey, param_val ) )
except KeyError:
DEBUG("get_parameters paramkey='%s' not as CGI", paramkey )
has_arg_value = False
# Now converts it to the type of the default value. Otherwise untouched.
if has_dflt_val:
if has_arg_value:
paramTyp = type(dflt_value)
param_val = paramTyp( param_val )
#sys.stderr.write("get_parameters paramkey='%s' param_val='%s' after conversion to %s\n" % ( paramkey, param_val, str(paramTyp) ) )
else:
# If the parameters were edited but the value did not appear,
# it can only be a Boolean with a clear check box.
# https://stackoverflow.com/questions/1809494/post-the-checkboxes-that-are-unchecked
# Unchecked check boxes are not POSTed.
try:
self.m_arguments["edimodtype"]
param_val = False
# Sets the right value of the parameter because HTML form do not POST unchecked check boxes.
# Therefore, if in edit mode, a parameter is not returned, it can only be a False boolean.
self.m_parameters[paramkey] = param_val
DEBUG("get_parameters paramkey='%s' set to FALSE", paramkey )
except KeyError:
param_val = dflt_value
DEBUG("get_parameters paramkey='%s' set to param_val='%s'", paramkey, param_val )
else:
if not has_arg_value:
#sys.stderr.write("get_parameters no value nor default for paramkey='%s' m_parameters=%s\n" % ( paramkey, str(self.m_parameters)))
# lib_util.InfoMessageHtml("get_parameters no value nor default for %s\n" % paramkey )
param_val = ""
else:
DEBUG("get_parameters nothing for paramkey='%s'", ( paramkey ))
# TODO: Beware, empty strings are NOT send by the HTML form,
# TODO: so an empty string must be equal to the default value.
return param_val
# This is used for compatibility with the legacy scripts, which has a single id.
# Now all parameters must have a key. As a transition, GetId() will return the value of
# the value of an unique key-value pair.
# If this class is not in DMTF, we might need some sort of data dictionary.
def GetId(self):
DEBUG("GetId m_entity_type=%s m_entity_id=%s", self.m_entity_type, str( self.m_entity_id ) )
try:
# If this is a top-level url, no object type, therefore no id.
if self.m_entity_type == "":
return ""
split_kv = lib_util.SplitMoniker(self.m_entity_id)
DEBUG("GetId split_kv=%s", str( split_kv))
# If this class is defined in our ontology, then we know the first property.
ent_onto = lib_util.OntologyClassKeys(self.m_entity_type)
if ent_onto:
keyFirst = ent_onto[0]
# Only if this mandatory key is in the dict.
try:
return split_kv[keyFirst]
except KeyError:
# This is a desperate case...
pass
# Returns the first value but this is not reliable at all.
for key in split_kv:
return split_kv[key]
except KeyError:
pass
# If no parameters although one was requested.
self.enter_edition_mode()
return ""
# TODO: Ca va etre de facon generale le moyen d'acces aux donnees et donc inclure le cimom
# soit par example cimom=http://192.168.1.83:5988 ou bien seulement un nom de machine.
# C'est ce que WMI va utiliser. On peut imaginer aussi de mettre un serveur ftp ?
# Ou bien un serveur SNMP ?
# C est plus un serveur qu un host. Le host est une propriete de l'objet, pas une clef d'acces.
# C est ce qui va permettre d acceder au meme fichier par un disque partage et par ftp.
def GetHost(self):
return self.m_entity_host
# TODO: Would probably be faster by searching for the last "/".
# '\\\\MYHOST-HP\\root\\cimv2:Win32_Process.Handle="0"' => "root\\cimv2:Win32_Process"
# https://jdd:<EMAIL>:5959/cimv2:Win32_SoftwareFeature.Name="Havana",ProductName="Havana",Version="1.0" => ""
def get_namespace_type(self):
return lib_util.parse_namespace_type(self.m_entity_type)
# When in merge mode, these parameters must be aggregated, and used only during
# the unique generation of graphic data.
# TODO: "OutCgiRdf" should be changed to a more appropriate name, such as "DisplayTripleStore"
def OutCgiRdf(self, dot_layout = "", collapsed_properties=[] ):
global globalCgiEnvList
DEBUG("OutCgiRdf globalMergeMode=%d m_calling_url=%s m_page_title=%s",globalMergeMode,self.m_calling_url, self.m_page_title.replace("\n","<NL>") )
self.m_layoutParams = make_dot_layout( dot_layout, collapsed_properties )
mode = lib_util.GuessDisplayMode()
topUrl = lib_util.TopUrl( self.m_entity_type, self.m_entity_id )
if self.m_page_title is None:
self.m_page_title = "PAGE TITLE SHOULD BE SET"
self.m_page_subtitle = "PAGE SUBTITLE SHOULD BE SET"
# See if this can be used in lib_client.py and merge_scritps.py.
if globalMergeMode:
# At the end, only one call to OutCgiMode() will be made.
globalCgiEnvList.append(self)
else:
OutCgiMode( self, topUrl, mode )
# Example: cgiEnv.add_parameterized_links( "Next", { paramkeyStartIndex : startIndex + maxInstances } )
def add_parameterized_links(self, urlLabel, paramsMap):
"""This adds the parameters of an URL which points to the same page,
but with different CGI parameters. This URLS will displays basically
the same things, from the same script."""
# We want to display links associated to the parameters.
# The use case is "Prev/Next" when paging between many values.
# This calculates the URLS and returns a map of { "label":"urls" }
# Copy the existing parameters of the script. This will be updated.
prmsCopy = dict()
for argK in cgi.FieldStorage():
argV = cgi.FieldStorage()[argK].value
# sys.stderr.write("add_parameterized_links argK=%s argV=%s\n"%(argK,argV))
prmsCopy[argK] = lib_util.urllib_quote(argV)
# Update these parameters with the values specific for this label.
for paramKey in paramsMap:
# Check that it is a valid parameter.
try:
self.m_parameters[paramKey]
except KeyError:
ErrorMessageHtml("Parameter %s should be defined for a link"%paramKey)
prmsCopy[paramKey] = paramsMap[paramKey]
DEBUG("prmsCopy=%s",str(prmsCopy))
# Now create an URL with these updated params.
idxCgi = self.m_calling_url.find("?")
if idxCgi < 0:
labelledUrl = self.m_calling_url
else:
labelledUrl = self.m_calling_url[:idxCgi]
# FIXME: ENCODING PROBLEM HERE.
# OK http://127.0.0.1/Survol/survol/class_wbem.py?Start+index=0&Max+instances=800&xid=http%3A%2F%2Fprimhillcomputers.ddns.net%3A5988%2Froot%2Fcimv2%3APG_UnixProcess.&edimodtype=root%2Fcimv2%3APG_UnixProcess
# OK http://rchateau-hp:8000/survol/class_wbem.py?xid=http%3A%2F%2F192.168.0.17%3A5988%2Froot%2Fcimv2%3APG_UnixProcess.
# KO http://rchateau-hp:8000/survol/class_wbem.py?xid=http%3A//192.168.0.17%3A5988/root/cimv2%3APG_UnixProcess.
# Conversion to str() because of integer parameters.
kvPairsConcat = "&amp;".join( "%s=%s" % ( paramKey,str(prmsCopy[paramKey]).replace("/","%2F")) for paramKey in prmsCopy )
labelledUrl += "?" + kvPairsConcat
DEBUG("labelledUrl=%s",labelledUrl)
self.m_parameterized_links[urlLabel] = labelledUrl
# Graphs might contain the same entities calculated by different servers.
# This can happen here, when several URLs are merged.
# This can happen also in the JavaScript client, where several URLs
# are dragged and dropped in the same browser session.
# The same object will have different URLs depending on the server where it is detected.
# For example, a remote database might be seen from different machines.
# These different nodes, representing the same object, must be associated.
# For this, we calculated for each node, its universal alias.
# This is done, basically, by taking the URL, replacing the host name of where
# the object sits, by an IP address.
# Nodes with the same
def _bind_identical_nodes(self):
# This maps each universal alias to the set of nodes which have it.
# At the end, all nodes with the same universal alias are
# linked with a special property.
dict_uni_to_objs = dict()
def _has_univ_alias(an_object):
if lib_kbase.IsLiteral(an_object):
return False
if (an_object.find("entity.py") >= 0) or (an_object.find("entity_wbem.py") >= 0) or(an_object.find("entity_wmi.py") >= 0):
return True
return False
# This calculates the universal alias for each node representing an object.
def _prepare_binding(an_object):
if not _has_univ_alias(an_object):
return
uni_descr = lib_exports.NodeToUniversalAlias(an_object)
try:
dict_uni_to_objs[uni_descr].add(an_object)
except KeyError:
dict_uni_to_objs[uni_descr] = {an_object}
for aSubj, aPred, anObj in self.m_graph:
_prepare_binding(aSubj)
_prepare_binding(anObj)
for an_uni_descr in dict_uni_to_objs:
related_nodes = dict_uni_to_objs[an_uni_descr]
if len(related_nodes) < 2:
continue
node_previous = None
# These specific links must be very visible and short.
# They should be displayed identically in SVG and D3.
# Ideally, all objects with the same alias should be a single graphic shape,
# with all scripts of each object.
for other_node in related_nodes:
if node_previous:
self.m_graph.add((node_previous, lib_properties.pc.property_alias, other_node))
node_previous = other_node
################################################################################
globalErrorMessageEnabled = True
# Used when merging several scripts, otherwise there is no way to find
# which scripts produced an error.
def enable_error_message(flag):
global globalErrorMessageEnabled
globalErrorMessageEnabled | |
> 475 or head.ycor() < -273 or head.xcor() < -478 or head.ycor() > 231 :
time.sleep(1)
head.goto(0,0)
head.direction = "stop"
count += 1
for segment in segments:
segment.goto(10000,10000)
segments.clear()
score = 0
delay = 0.03
if count > 2 :
break
else :
pen.clear()
pen.write("Score: {} High Score: {}".format(score, h_score), align = "center", font = ("Terminal", 18, "normal"))
#snake eats food
if head.distance(food) < 20 :
x = randint(-470, 470)
y = randint(-260, 225)
food.goto(x, y)
new_segment = turtle.Turtle()
new_segment.speed(0)
new_segment.shape("square")
new_segment.color("light green")
new_segment.penup()
segments.append(new_segment)
delay -= 0.0005
#increase score
score = score + 10
if score > h_score:
h_score = score
pen.clear()
pen.write("Score: {} High Score: {}".format(score, h_score), align = "center", font = ("Terminal", 18, "normal"))
# move segments
for index in range(len(segments)-1, 0, -1) :
x = segments[index-1].xcor()
y = segments[index-1].ycor()
segments[index].goto(x, y)
#move segment to where head is
if len(segments) > 0 :
x = head.xcor()
y = head.ycor()
segments[0].goto(x, y)
move()
#collision with body
for segment in segments :
if segment.distance(head) < 7 :
time.sleep(1)
head.goto(0, 0)
head.direction = "stop"
for segment in segments :
segment.goto(2000, 20000)
segments.clear()
score = 0
delay = 0.03
count += 1
if count > 2 :
break
else :
pen.clear()
pen.write("Score: {} High Score: {}".format(score, h_score), align = "center", font = ("Terminal", 18, "normal"))
time.sleep(delay)
head.clear()
food.clear()
wall.clear()
head.ht()
food.ht()
pen.clear()
pen.goto(0,80)
window.update()
pen.color("red")
pen.write("G A M E O V E R !",align="center", font=("Terminal",30,"normal"))
pen.goto(0,30)
pen.color("cornflower blue")
pen.write("Your High Score : {}".format(h_score),align="center", font=("Terminal",20,"normal"))
pen.goto(0,-250)
pen.color("red")
pen.write("Press Enter to play again or Esc to go to menu",align= "center", font = ("Terminal", 18, "normal"))
window.listen()
window.onkeypress(Exit, "Escape")
window.onkeypress(Continue, "Return")
#___________________________________________________________________________________________________________________________________________________________________________________________________________________________________________#
def Hangman() :
global string,let_used,num,clues
num=0
window=turtle.Screen()
window.title("Mini ARCADE - Hangman")
window.setup(width=1000, height=600)
window.bgcolor('black')
window.tracer(0)
window.delay(0.5)
pen = turtle.Turtle()
pen.speed(0)
pen.penup()
pen.ht()
pen.goto(0,20)
pen.color("red")
pen.write("WELCOME TO HANGMAN",align="center", font=("Terminal",30,"normal"))
pen.goto(0,-20)
pen.color("cornflower blue")
pen.write("- You have 3 clues for guessing the word or phrase or name", align= "center", font = ("Terminal",12, "normal"))
pen.goto(0,-60)
pen.write("- You can commit a maximum of 8 mistakes", align= "center", font = ("Terminal", 12, "normal"))
time.sleep(3)
pen.clear()
pen.color("red")
face=turtle.Turtle()
face.hideturtle()
face.pu()
face.goto(-50,0)
torso=turtle.Turtle()
torso.hideturtle()
torso.pu()
leg1=turtle.Turtle()
leg1.hideturtle()
leg1.pu()
vstand=turtle.Turtle()
vstand.hideturtle()
vstand.pu()
extra=turtle.Turtle()
extra.hideturtle()
extra.pu()
extra.goto(-50,50)
extra.setheading(180)
torso.goto(-50,-57.284)
torso.setheading(270)
leg1.goto(-50,-157.284)
leg2=turtle.Turtle()
leg2.hideturtle()
leg2.pu()
leg2.goto(-50,-157.284)
leg1.right(60)
leg2.right(120)
hand1=turtle.Turtle()
hand1.hideturtle()
hand1.pu()
hand1.right(60)
hand2=turtle.Turtle()
hand2.hideturtle()
hand2.pu()
hand2.right(120)
hand1.goto(-50,-77.284)
hand2.goto(-50,-77.284)
vstand.goto(100,-250)
vstand.setheading(90)
letter=turtle.Turtle()
letter.hideturtle()
letter.pu()
letter.goto(0,150)
used=turtle.Turtle()
used.ht()
used.pu()
used.goto(0,-290)
let_used='LETTERS USED: '
used.color('white')
used.write(let_used,align='center',font=('Arial','16','bold'))
writeclue=turtle.Turtle()
writeclue.hideturtle()
writeclue.pu()
writeclue.goto(300,-50)
writeclue.pencolor('red')
writeclue.write('CLUES (Press space)',align='center',font=('Arial','14','bold'))
tc=turtle.Turtle()
tc.hideturtle()
tc.pu()
tc.goto(-400,-200)
tc.pencolor('blue')
tc.write('CATEGORY', align = 'center', font = ('Arial', '20', 'bold'))
tc.goto(-400,-250)
window.update()
for i in window.turtles() :
i.pd()
i.pensize(3)
i.pencolor('white')
word=['PEWDIEPIE','<NAME>','<NAME>','<NAME>','<NAME>','<NAME>','<NAME>','<NAME>','<NAME>','<NAME>']
clue1=['The king of YouTube','Late basketball player','Retired Cricketer','Performs expensive stunts and philanthropy','Real-life Tony Stark.','He is color blind','A pioneer of the personal computer revolution of 1970-1980s','The greatest batsman of all time','An English actor, comedian and writer','Canadian Actor']
clue2=['Leader of the Floor Gang','Got an award named after him in 2020','Played in the Ashes','Took an initiative to plant 20 million trees','The coolest billionaire','Internet entrepreneur, and philanthropist','American business magnate, investor','Was an Australian International cricketer','Suffered from a speech impediment as a child ','Down-to-earth and charismatic']
clue3=['Has over a 100 million subscribers on YouTube','Played for the Los Angeles Lakers',"Was the Australian cricket team's captain",'Real name is <NAME>','CEO of Tesla,Inc and SpaceX','Co-founder and CEO of Facebook','Co-founder and late CEO of Apple','His Test batting average is 99.94','Mr.Bean','<NAME>']
category=['YouTuber','Sports','Sports','YouTuber','Personality','Personality','Personality','Sports','Celebrity','Celebrity']
window.update()
choice=randint(0,9)
ender=turtle.Turtle()
ender.hideturtle()
ender.pencolor('red')
string=''
tc.color("yellow")
clues=[clue1[choice],clue2[choice],clue3[choice],'ALL CLUES REVEALED']
window.tracer(True)
def Dummy() :
return
def dummy (x,y) :
return
def Continue() :
ender.clear()
window.turtles().clear()
window.onkeypress(Dummy, "Escape")
window.onkeypress(Dummy, "Return")
window.onclick(dummy)
Hangman()
def Exit() :
ender.clear()
window.turtles().clear()
used.clear()
window.onkeypress(Dummy, "Escape")
window.onkeypress(Dummy, "Return")
window.onclick(dummy)
Main_Menu()
def head() :
window.tracer(2)
for i in range(600) :
face.fd(0.3)
face.right(0.6)
face.ht()
face.pu()
def line (a,x) :
window.tracer(2)
for i in range(x):
a.fd(1)
def draw (i) :
global let_used
if i == 0 :
head()
elif i == 1 :
line(torso,100)
elif i == 2 :
line(hand1,50)
elif i == 3 :
line(hand2,50)
elif i == 4 :
line(leg1,50)
elif i == 5 :
line(leg2,50)
elif i == 6 :
line(vstand,300)
elif i == 7 :
vstand.left(90)
line(vstand,150)
line(extra,20)
elif i == 8 :
vstand.left(90)
line(vstand,50)
for i in window.turtles()[0:-1] :
i.clear()
i.reset()
i.ht()
time.sleep(0.5)
ender.clear()
ender.pu()
used.clear()
ender.goto(0,80)
ender.write("G A M E O V E R !",align="center", font=("Terminal",30,"normal"))
ender.goto(0,30)
ender.fd(0)
ender.color("cornflower blue")
ender.write("YOU LOST! The word is {}.".format(word[choice]), align = "center", font=("Terminal",25,"normal"))
ender.goto(0,-250)
ender.color("red")
ender.write("Press Enter to play again or Esc to go to menu", align = "center", font = ("Terminal", 18, "normal"))
window.listen()
window.onkeypress(Exit,"Escape")
window.onkeypress(Continue,"Return")
def reset (x) :
window.tracer(0)
global string, let_used, num
no_gap = []
for i in range(1, len(string)-1, 3) :
no_gap.append(string[i])
for i in range(len(word[choice])) :
if word[choice][i] == x :
no_gap[i] = x
string = ''
check = ''
for i in no_gap :
string += ' ' + i + ' '
check += i
letter.clear()
letter.write(string, align = 'center', font = ('Arial', 32, 'bold'))
window.update()
window.tracer(True)
if x not in word[choice] and x not in let_used[12::] :
draw(num)
num += 1
if x not in let_used[12::] :
let_used += x + ' '
used.clear()
used.write(let_used, align = 'center', font = ('Arial','16','bold'))
if check==word[choice] :
for i in window.turtles()[0:-1] :
i.clear()
i.reset()
i.ht()
ender.clear()
time.sleep(0.5)
ender.pu()
ender.goto(0,80)
ender.write("CONGRATULATIONS!", align = "center", font = ("Terminal", 30, "normal"))
ender.goto(0,30)
ender.color("cornflower blue")
ender.write("YOU WON!", align = "center", font = ("Terminal", 25, "normal"))
ender.goto(0,-250)
ender.color("red")
ender.goto(0,-250)
ender.write("Press Enter to play again or Esc to go to menu", align = "center", font = ("Terminal", 18, "normal"))
window.listen()
window.onkeypress(Exit, "Escape")
window.onkeypress(Continue,"Return")
def reset_a() :
reset('A')
def reset_b() :
reset("B")
def reset_c() :
reset('C')
def reset_d() :
reset('D')
def reset_e() :
reset('E')
def reset_f() :
reset('F')
def reset_g() :
reset('G')
def reset_h() :
reset('H')
def reset_i() :
reset('I')
def reset_j() :
reset('J')
def reset_k() :
reset('K')
def reset_l() :
reset('L')
def reset_m() :
reset('M')
def reset_n() :
reset('N')
def reset_o() :
reset('O')
def reset_p() :
reset('P')
def reset_q() :
reset('Q')
def reset_r() :
reset('R')
def reset_s() :
reset('S')
def reset_t() :
reset('T')
def reset_u() :
reset('U')
def reset_v() :
reset('V')
def reset_w() :
reset('W')
def reset_x() :
reset('X')
def reset_y() :
reset('Y')
def reset_z() :
reset('Z')
def initiate (x) :
global string
under = ' _ '
space = ' '
for i in word[choice] :
if i.isspace() :
string += space
else :
string += under
tc.write(category[x].upper(), align = 'center', font = ('Arial', '12', 'bold'))
letter.write(string, align = 'center', font = ('Arial','32','bold'))
def clue() :
global clues
if len(clues) != 0 :
writeclue.pu()
writeclue.goto(300,writeclue.ycor()-50)
writeclue.write(clues[0], align = 'center', font = ('Arial', 10, 'bold'))
clues=clues[1::]
initiate(choice)
window.listen()
window.onkeypress(reset_a, 'a')
window.onkeypress(reset_b, 'b')
window.onkeypress(reset_c, 'c')
window.onkeypress(reset_d, 'd')
window.onkeypress(reset_e, 'e')
window.onkeypress(reset_f, 'f')
window.onkeypress(reset_g, 'g')
window.onkeypress(reset_h, 'h')
window.onkeypress(reset_i, 'i')
window.onkeypress(reset_j, 'j')
window.onkeypress(reset_k, 'k')
window.onkeypress(reset_l, 'l')
window.onkeypress(reset_m, 'm')
window.onkeypress(reset_n, 'n')
window.onkeypress(reset_o, 'o')
window.onkeypress(reset_p, 'p')
window.onkeypress(reset_q, 'q')
window.onkeypress(reset_r, 'r')
window.onkeypress(reset_s, 's')
window.onkeypress(reset_t, 't')
window.onkeypress(reset_u, 'u')
window.onkeypress(reset_v, 'v')
window.onkeypress(reset_w, 'w')
window.onkeypress(reset_x, 'x')
window.onkeypress(reset_y, 'y')
window.onkeypress(reset_z, 'z')
window.onkeypress(clue, 'space')
#___________________________________________________________________________________________________________________________________________________________________________________________________________________________________________#
def Obstacle_Course() :
window=turtle.Screen()
window.title("Mini ARCADE - Obstacle Course")
window.setup(width=1000, height=600)
window.bgcolor('black')
window.tracer(0)
Block_color=['red', 'cyan', 'blue', 'purple']
t_color=['cyan', 'orange', 'yellow', 'lightgreen']
col=randint(0,3)
block_color=Block_color[col]
t=turtle.Turtle()
t.hideturtle()
t.color(t_color[col])
t.pu()
t.setpos(-500,0)
t.showturtle()
block1=turtle.Turtle()
block1.pu()
block1.color(block_color)
block1.shape("square")
block2=turtle.Turtle()
block2.pu()
block2.color(block_color)
block2.shape("square")
block3=turtle.Turtle()
block3.pu()
block3.color(block_color)
block3.shape("square")
block4=turtle.Turtle()
block4.pu()
block4.color(block_color)
block4.shape("square")
block5=turtle.Turtle()
block5.pu()
block5.color(block_color)
block5.shape("square")
block6=turtle.Turtle()
block6.pu()
block6.color(block_color)
block6.shape("square")
block7=turtle.Turtle()
block7.pu()
block7.color(block_color)
block7.shape("square")
block8=turtle.Turtle()
block8.pu()
block8.color(block_color)
block8.shape("square")
block9=turtle.Turtle()
block9.pu()
block9.color(block_color)
block9.shape("square")
block1.setpos(-400,-120)
block2.setpos(-300,50)
block3.setpos(-200,-200)
block4.setpos(-100,50)
block5.setpos(100,150)
block6.setpos(200,260)
block7.setpos(300,-50)
block8.setpos(400,-250)
block9.setpos(0,10)
ender = turtle.Turtle()
ender.penup()
ender.ht()
ender.goto(0,20)
ender.color("red")
ender.write("WELCOME TO OBSTACLE COURSE", align ="center", font = ("Terminal", 30, "normal") )
ender.goto(0,-20)
ender.color("cornflower blue")
ender.write("- Reach the end by avoiding the obstacles", align = "center", font = ("Terminal", 12, "normal"))
ender.goto(0,-60)
ender.write("- Make sure you don't go out of the screen", align = "center", font = ("Terminal", 12, "normal"))
time.sleep(3)
ender.clear()
ender.color("red")
ender.goto(0,80)
ender.pd()
window.update()
window.tracer(1)
window.delay(0.5)
def up() :
| |
<reponame>lcremer/Maya-Rigging
import pymel.core as pc
import re
from Maya_Rigging.Utils import AttrUtil as atu
from ...Utils import *
from Maya_Rigging.Utils.CharUtilsLib import *
from Maya_Rigging.Core import JointStretchNetworkLib as js
def buildTwistJointSetup(name, side, joint, stretchType, ikFkType, rotConnect, wristJoint, controller, stretch, volume, scale, controlColor = ''):
transNode = []
transNodePath = ''
rotNode = []
rotNodePath = ''
childJoint = ''
transConnection = []
rotConnection = []
jointCount = 0
newJoints = []
twistAxis = []
stretchAxis = []
tempJoint = []
axis = ''
color = ''
rotAdd = ''
jointRotAdd = ''
fullchain = []
transMd = ''
if pc.attributeQuery('twistJoints', n=joint, ex=True):
jointCount = (pc.getAttr(joint + '.twistJoints')+1)
if jointCount == 1:
# TODO: clean this up, shouldn't have to be resetting value to 0
jointCount = 0
stretchAxis = getStretchAxis(joint, stretchType)
twistAxis = getTwistAxis(joint)
# determine twist axis for further info distribution
if twistAxis[0] == 'rx':
axis = 'X'
color = 'R'
elif twistAxis[0] == 'ry':
axis = 'Y'
color = 'G'
elif twistAxis[0] == 'rz':
axis = 'Z'
color = 'B'
pc.select(joint, r=True)
atu.removeTwistJointsAttr('twistJoints')
pc.select(cl=True)
if jointCount > 0:
childJoint = getChildJoint(joint)
if not pc.attributeQuery('twistFixVis', n=controller,ex=True):
pc.addAttr(controller, ln='twistFixVis', at='bool', keyable=True)
pc.setAttr((controller + '.twistFixVis'), 1)
if stretch:
transMd = pc.createNode('multiplyDivide', n=(name + side + joint + '_posSplit_md'))
pc.setAttr((transMd + '.operation'), 2)
if stretchType == 'translate':
transConnection = pc.listConnections((childJoint + '.' + stretchAxis[0]), d=False, s=True, plugs=True, skipConversionNodes=True)
transNodePath = transConnection[0]
match = re.search('[^.]*', str(transNodePath))
if match:
transNode.append(match.group())
elif transNodePath:
transNode.append(transNodePath)
pc.setAttr((transMd + '.input2X'), jointCount)
pc.setAttr((transMd + '.input2Y'), jointCount)
pc.setAttr((transMd + '.input2Z'), jointCount)
elif stretchType == 'scale':
transConnection = pc.listConnections((joint + '.' + stretchAxis[0]), d=False, s=True, plugs=True, skipConversionNodes=True)
transNodePath = transConnection[0]
match = re.search('[^.]*', str(transNodePath))
if match:
transNode.append(match.group())
elif transNodePath:
transNode.append(transNodePath)
pc.connectAttr((transNode[0] + '.outputR'), (transMd + '.input1X'))
pc.connectAttr((transNode[0] + '.outputG'), (transMd + '.input1Y'))
pc.connectAttr((transNode[0] + '.outputB'), (transMd + '.input1Z'))
rotMd = pc.createNode('multiplyDivide', n=(name + side + joint + '_rotSplit_md'))
pc.setAttr((rotMd + '.operation'), 2)
if rotConnect == 'parent':
rotConnection = pc.listConnections((joint + '.' + twistAxis[0]), d=False, s=True, plugs=True, skipConversionNodes=True)
rotNodePath = rotConnection[0]
match = re.search('[^.]*', str(rotNodePath))
if match:
rotNode.append(match.group())
elif rotNodePath:
rotNode.append(rotNodePath)
if ikFkType == 'utilNode':
pc.setAttr((rotMd + '.input2X'),jointCount)
pc.setAttr((rotMd + '.input2Y'),jointCount)
pc.setAttr((rotMd + '.input2Z'),jointCount)
pc.connectAttr((rotNode[0] + '.outputR'), (rotMd + '.input1X'))
pc.connectAttr((rotNode[0] + '.outputG'), (rotMd + '.input1Y'))
pc.connectAttr((rotNode[0] + '.outputB'), (rotMd + '.input1Z'))
elif ikFkType == 'constrain':
pc.setAttr((rotMd + '.input2X'), jointCount)
pc.setAttr((rotMd + '.input2Y'), jointCount)
pc.setAttr((rotMd + '.input2Z'), jointCount)
pc.connectAttr((rotNode[0] + '.constraintRotate.constraintRotateX'), (rotMd + '.input1X'))
pc.connectAttr((rotNode[0] + '.constraintRotate.constraintRotateY'), (rotMd + '.input1Y'))
pc.connectAttr((rotNode[0] + '.constraintRotate.constraintRotateZ'), (rotMd + '.input1Z'))
pc.select(cl=True)
twistCtrl = curveControl('plus', 'curve', controlColor)
resizeCurves(None, 1, 1, 1, scale)
twistCtrl[0] = pc.rename(twistCtrl[0], name + side + joint + '_twist_ctrl')
Snap(joint, twistCtrl[0])
zeroGrp = quickZeroOut(twistCtrl[0])
pc.parentConstraint(joint, zeroGrp[0], weight=1)
partGrp = pc.listRelatives(controller, parent=True)
pc.parent(zeroGrp[0], partGrp[0])
pc.connectAttr((controller + '.twistFixVis'), (twistCtrl[0] + '.visibility'))
lockAndHide(twistCtrl[0], locknHide, 'trans scale vis')
pc.setAttr((twistCtrl[0] + '.' + twistAxis[1]),lock=True, keyable=False)
pc.setAttr((twistCtrl[0] + '.' + twistAxis[2]), lock=True, keyable=False)
rotMulti = pc.createNode('multiplyDivide', n=(name + side + joint + '_addTwist_md'))
pc.setAttr((rotMulti + '.operation'), 2)
pc.setAttr((rotMulti + '.input2X'), jointCount)
pc.setAttr((rotMulti + '.input2Y'), jointCount)
pc.setAttr((rotMulti + '.input2Z'), jointCount)
rotMultiDouble = pc.createNode('multDoubleLinear', n=(name + side + joint + '_addTwist_mdl'))
rotAdd = pc.createNode('addDoubleLinear', n=(name + side + joint + '_addTwist_adl'))
rotBlend = pc.createNode('blendTwoAttr', n=(name + side + joint + '_addTwist_bta'))
jointRotAdd = pc.createNode('addDoubleLinear', n=(name + side + joint + '_addRot_adl'))
jointRotBlend = pc.createNode('blendTwoAttr', n=(name + side + joint + '_addRot_bta'))
pc.addAttr(twistCtrl[0], ln='nullifyTwist', at='double', min=0, max=1, keyable=True)
pc.connectAttr((twistCtrl[0] + '.' + twistAxis[0]), (rotMulti + '.input1X'))
pc.connectAttr((rotMulti + '.outputX'), (rotMultiDouble + '.input1'))
pc.setAttr((rotMultiDouble + '.input2'), -1)
pc.connectAttr((twistCtrl[0] + '.nullifyTwist'), (rotBlend + '.attributesBlender'))
pc.connectAttr((rotMd + '.output' + axis), (rotBlend + '.input[0]'))
pc.setAttr((rotBlend + '.input[1]'), 0)
pc.connectAttr((rotBlend + '.output'), (rotAdd + '.input1'))
pc.connectAttr((rotMultiDouble + '.output'), (rotAdd + '.input2'))
pc.connectAttr((twistCtrl[0] + '.nullifyTwist'), (jointRotBlend + '.attributesBlender'))
pc.connectAttr((rotMd + '.output' + axis), (jointRotBlend + '.input[0]'))
if ikFkType == 'utilNode':
pc.connectAttr((rotNode[0] + '.output' + color), (jointRotBlend + '.input[1]'))
elif ikFkType == 'constraint':
pc.connectAttr((rotNode[0] + '.constraintRotate.constraintRotate' + axis), (jointRotBlend + '.input[1]'))
pc.connectAttr((jointRotBlend + '.output'), (jointRotAdd + '.input1'))
pc.connectAttr((twistCtrl[0] + '.' + twistAxis[0]), (jointRotAdd + '.input2'))
elif rotConnect == 'child':
# create new joint same position as wrist joint and constraint to the ikfk joints
pc.select(cl=True)
tempJoint = curveControl('joint', 'curve', controlColor)
tempJoint[0] = pc.rename(tempJoint[0], name + side + joint + 'Rot_jnt')
Snap(wristJoint, tempJoint[0])
rad = pc.getAttr(wristJoint + '.radius')
pc.setAttr((tempJoint[0] + '.radius'), rad)
pc.select(cl=True)
pc.makeIdentity(tempJoint[0], apply=True, t=0, r=1, s=0, n=0)
ikFkCon = pc.parentConstraint(('fk_' + wristJoint), ('ik_' + wristJoint), tempJoint[0], skipTranslate=['x', 'y', 'z'], weight=1)
rev = pc.createNode('reverse', n=(tempJoint[0] + '_rot_rev'))
pc.connectAttr((controller + '.FK_IK'), (rev + '.ix'), f=True)
pc.connectAttr((rev + '.ox'), (ikFkCon + '.w0'), f=True)
pc.connectAttr((controller + '.FK_IK'), (ikFkCon + '.w1'), f=True)
pc.setAttr((rotMd + '.input2X'), jointCount)
pc.setAttr((rotMd + '.input2Y'), jointCount)
pc.setAttr((rotMd + '.input2Z'), jointCount)
pc.connectAttr((tempJoint[0] + '.rotateX'), (rotMd + '.input1X'))
pc.connectAttr((tempJoint[0] + '.rotateY'), (rotMd + '.input1Y'))
pc.connectAttr((tempJoint[0] + '.rotateZ'), (rotMd + '.input1Z'))
pc.select(cl=True)
twistCtrl = curveControl('plus', 'curve', controlColor)
resizeCurves(None, 1, 1, 1, scale)
twistCtrl[0] = pc.rename(twistCtrl[0], name + side + childJoint + '_twist_ctrl')
Snap(childJoint,twistCtrl[0])
zeroGrp = quickZeroOut(twistCtrl[0])
pc.parentConstraint(childJoint, zeroGrp[0], weight=1)
partGrp = pc.listRelatives(controller, parent=True)
pc.parent(zeroGrp[0], partGrp[0])
pc.connectAttr((controller + '.twistFixVis'), (twistCtrl[0] + '.visibility'))
lockAndHide( twistCtrl[0], 'locknHide', 'trans scale vis')
pc.setAttr((twistCtrl[0] + '.' + twistAxis[1]), lock=True, keyable=False)
pc.setAttr((twistCtrl[0] + '.' + twistAxis[2]), lock=True, keyable=False)
rotMulti = pc.createNode('multiplyDivide', n=(name + side + joint + '_addTwist_mdl'))
pc.setAttr((rotMulti + '.operation'), 2)
pc.setAttr((rotMulti + '.input2X'), jointCount)
pc.setAttr((rotMulti + '.input2Y'), jointCount)
pc.setAttr((rotMulti + '.input2Z'), jointCount)
rotAdd = pc.createNode('addDoubleLinear', n=(name + side + joint + '_addTwist_adl'))
rotBlend = pc.createNode('blendTwoAttr', n=(name + side + joint + '_addTwist_bta'))
pc.addAttr(twistCtrl[0], ln='nullifyTwist', at='double', min=0, max=1, keyable=True)
pc.connectAttr((twistCtrl[0] + '.nullifyTwist'), (rotBlend + '.attributesBlender'))
pc.connectAttr((rotMd + '.output' + axis), (rotBlend + '.input[0]'))
pc.setAttr((rotBlend + '.input[1]'), 0)
pc.connectAttr((twistCtrl[0] + '.' + twistAxis[0]), (rotMulti + '.input1X'))
pc.connectAttr((rotBlend + '.output'), (rotAdd + '.input1'))
pc.connectAttr((rotMulti + '.outputX'), (rotAdd + '.input2'))
pc.select(joint, r=True)
newJoints = splitSelJoint(jointCount)
for j in newJoints:
if stretch:
if stretchType == 'translate':
pc.connectAttr((transMd + '.outputX'), (j + '.translateX'))
pc.connectAttr((transMd + '.outputY'), (j + '.translateY'))
pc.connectAttr((transMd + '.outputZ'), (j + '.translateZ'))
elif stretchType == 'scale':
pc.connectAttr((transMd + '.output' + axis), (j + '.scale' + axis))
pc.connectAttr((rotAdd + '.output'), (j + '.' + twistAxis[0]))
if rotConnect == 'parent':
if stretch:
if stretchType == 'translate':
pc.connectAttr((transMd + '.outputX'), (childJoint + '.translateX'), f=True)
pc.connectAttr((transMd + '.outputY'), (childJoint + '.translateY'), f=True)
pc.connectAttr((transMd + '.outputZ'), (childJoint + '.translateZ'), f=True)
pc.connectAttr((jointRotAdd + '.output'), (joint + '.' + twistAxis[0]), f=True)
elif rotConnect == 'child':
if stretch:
if stretchType == 'translate':
pc.connectAttr((transMd + '.outputX'), (childJoint + '.translateX'), f=True)
pc.connectAttr((transMd + '.outputY'), (childJoint + '.translateY'), f=True)
pc.connectAttr((transMd + '.outputZ'), (childJoint + '.translateZ'), f=True)
# connecting temp joint translate
pc.connectAttr((transNode[0] + '.outputR'), (tempJoint[0] + '.translateX'))
pc.connectAttr((transNode[0] + '.outputG'), (tempJoint[0] + '.translateY'))
pc.connectAttr((transNode[0] + '.outputB'), (tempJoint[0] + '.translateZ'))
# parent new twist help joint to elbow
pc.parent(tempJoint[0], joint)
# insert joint into split joint array
if volume:
newJoints.insert(0, joint)
pc.refresh
js.make_joint_volume_setup(name, side, controller, stretchType, newJoints)
return newJoints
# based of <NAME>'s code
def splitSelJoint(numSegments):
if numSegments < 2:
pc.error('The number of segments has to be more than 1.. ')
joints = []
joint = ''
newJoints = []
newChildJoints = []
count = 0
joints = pc.ls(sl=True, type='joint')
for joint in joints:
child = getFirstChildJoint(joint)
if not child:
pc.error('Joint: ' + joint + ' has no children joints.\n')
else:
axis = ''
rotationOrder = ''
firstChar = ''
radius = pc.getAttr(joint + '.radius')
axis = getJointAxis(child)
rotOrderIndex = pc.getAttr(joint + '.rotateOrder')
rotationOrder = getRotOrder(joint)
childT = 0.0
tVal = 0.0
attr = ('t'+axis)
childT = pc.getAttr(child + '.' + attr)
space = childT/numSegments
locators = []
for x in range(numSegments-1):
tmp = pc.spaceLocator()
locators.append(tmp)
pc.parent(locators[x], joint)
pc.setAttr((locators[x] + '.t'), 0, 0, 0)
pc.setAttr((locators[x] + '.' + attr), (space * (x+1)))
prevJoint = joint
for x in range(len(locators)):
newJoint = pc.insertJoint(prevJoint)
pos = pc.xform(locators[x], q=True, ws=True, rp=True)
pc.move(pos[0], pos[1], pos[2], (newJoint + '.scalePivot'), (newJoint + '.rotatePivot'), a=True, ws=True)
newJoint = pc.rename((newJoint), (joint + '_seg_'+str(x+1)+'_joint'))
pc.setAttr((newJoint | |
<filename>bin/getCmdbData.py
#!/usr/bin/python -u
#######################################################
#
# Remedy REST mediator for topology inclusion into ASM
#
# 02/09/21 - <NAME> (<EMAIL>)
#
#######################################################
import sys
from httplib import IncompleteRead
import time
import datetime
import gc
import random
import base64
import json
import re
from pprint import pprint
import os
import ssl
import urllib2
import urllib
from collections import defaultdict
from multiprocessing import Process
def keyExists(d, myKey):
return d.has_key(myKey) or any(myhaskey(dd) for dd in d.values() if isinstance(dd, dict))
def loadProperties(filepath, sep='=', comment_char='#'):
"""
Read the file passed as parameter as a properties file.
"""
props = {}
with open(filepath, "rt") as f:
for line in f:
l = line.strip()
if l and not l.startswith(comment_char):
key_value = l.split(sep)
key = key_value[0].strip()
value = sep.join(key_value[1:]).strip().strip('"')
props[key] = value
return props
def loadClassList(filepath, comment_char='#'):
ciClassList = []
with open(filepath, "rt") as f:
for line in f:
l = line.strip()
if l and not l.startswith(comment_char):
ciClassList.append(l)
return(ciClassList)
# ciClassList = { "cmdb_ci_cluster", "cmdb_ci_cluster_vip", "cmdb_ci_cluster_resource", "cmdb_ci_cluster_node", "cmdb_ci_vm", "cmdb_ci_server", "cmdb_ci_ip_router", "cmdb_ci_ip_switch", "cmdb_ci_appl", "cmdb_ci_db_instance", "cmdb_ci_service" }
def loadCmdbServer(filepath, sep=',', comment_char='#'):
##########################################################################################
#
# This function reads the ServiceNow server configuration file and returns a dictionary
#
##########################################################################################
lineNum = 0
with open(filepath, "rt") as f:
for line in f:
cmdbServerDict = {}
l = line.strip()
if l and not l.startswith(comment_char):
values = l.split(sep)
if(len(values) < 3):
print "Malformed server configuration entry on line number: " + str(lineNum)
else:
cmdbServerDict["server"] = values[0]
cmdbServerDict["user"] = values[1]
cmdbServerDict["password"] = values[2]
lineNum = lineNum + 1
return(cmdbServerDict)
def verifyAsmConnectivity(asmDict):
##################################################################
#
# This function verifies that the ASM server credentials are valid
# ---+++ CURRENTLY UNIMPLEMENTED +++---
#
##################################################################
return True
def loadEntityTypeMapping(filepath, sep=",", comment_char='#'):
################################################################################
#
# This function reads the entityType map configuration file and returns a dictionary
#
################################################################################
lineNum = 0
with open(filepath, "rt") as f:
for line in f:
l = line.strip()
if l and not l.startswith(comment_char):
values = l.split(sep)
if(len(values) < 2 or len(values) > 2):
print "Malformed entityType map config line on line " + str(lineNum)
else:
entityTypeMappingDict[values[0].replace('"', '')] = values[1].replace('"', '')
def loadAssetLifecycleStatusFilter(filepath, sep=",", comment_char='#'):
################################################################################
#
# This function reads the entityType map configuration file and returns a dictionary
#
################################################################################
print "opening AssetLifecycleStatus filter file at " + filepath
lineNum = 0
with open(filepath, "rt") as f:
for line in f:
l = line.strip()
if l and not l.startswith(comment_char):
assetLifecycleStatusFilterArray.append(l)
#for l in assetLifecycleStatusFilterArray:
# print l
def loadPrimaryCapabilityFilter(filepath, sep=",", comment_char='#'):
################################################################################
#
# This function reads the entityType map configuration file and returns a dictionary
#
################################################################################
#print "opening Primary Capability filter file at " + filepath
lineNum = 0
with open(filepath, "rt") as f:
for line in f:
l = line.strip()
if l and not l.startswith(comment_char):
primaryCapabilityFilterArray.append(l)
def loadPrimaryCapabilityMapping(filepath, sep=",", comment_char='#'):
################################################################################
#
# This function reads the entityType map configuration file and returns a dictionary
#
################################################################################
#print "opening PrimaryCapability mapping file at " + filepath
lineNum = 0
with open(filepath, "rt") as f:
for line in f:
l = line.strip()
if l and not l.startswith(comment_char):
values = l.split(sep)
if(len(values) < 2 or len(values) > 2):
print "Malformed entityType map config line on line " + str(lineNum)
else:
primaryCapabilityMappingDict[values[0].replace('"', '')] = values[1].replace('"', '')
#print primaryCapabilityMappingDict
def loadRelationshipMapping(filepath, sep=",", comment_char='#'):
################################################################################
#
# This function reads the relationship map configuration file and returns a dictionary
#
################################################################################
lineNum = 0
#relationshipMappingDict = {}
with open(filepath, "rt") as f:
for line in f:
l = line.strip()
if l and not l.startswith(comment_char):
values = l.split(sep)
if(len(values) < 3 or len(values) > 3):
print "Malformed mapping config line on line " + str(lineNum)
else:
relationshipMappingDict[values[0].replace('"', '')] = values[2].replace('"', '')
def loadAsmServer(filepath, sep=",", comment_char='#'):
################################################################################
#
# This function reads the ASM server configuration file and returns a dictionary
#
################################################################################
lineNum = 0
with open(filepath, "rt") as f:
for line in f:
asmDict = {}
l = line.strip()
if l and not l.startswith(comment_char):
values = l.split(sep)
if(len(values) < 5):
print "Malformed ASM server config line on line " + str(lineNum)
else:
asmDict["server"] = values[0]
asmDict["port"] = values[1]
asmDict["user"] = values[2]
asmDict["password"] = values[3]
asmDict["tenantid"] = values[4]
if(verifyAsmConnectivity(asmDict)):
return(asmDict)
else:
print "Unable to connect to ASM server " + asmDict["server"] + " on port " + asmDict["port"] + ", please verify server, username, password, and tenant id in " + mediatorHome + "config/asmserver.conf"
def listArsCmdbClasses(token):
#####################################
#
# This function gets all cmdb namespaces
#
# input: token
#
#####################################
method = "GET"
requestUrl = 'https://' + cmdbServerDict["server"] + '/api/cmdb/v1.0/classes/BMC.CORE'
try:
request = urllib2.Request(requestUrl)
request.add_header("Content-Type",'application/json')
request.add_header("Authorization","AR-JWT " + token)
request.get_method = lambda: method
response = urllib2.urlopen(request)
xmlout = response.read()
print str(xmlout)
return str(xmlout)
except IOError, e:
print 'Failed to open "%s".' % requestUrl
if hasattr(e, 'code'):
print 'We failed with error code - %s.' % e.code
elif hasattr(e, 'reason'):
print "The error object has the following 'reason' attribute :"
print e.reason
print "This usually means the server doesn't exist,",
print "is down, or we don't have an internet connection."
return False
def listArsCmdbNamespaces(token):
#####################################
#
# This function gets all cmdb namespaces
#
# input: token
#
#####################################
method = "GET"
requestUrl = 'https://' + cmdbServerDict["server"] + '/api/cmdb/v1.0/namespaces'
try:
request = urllib2.Request(requestUrl)
request.add_header("Content-Type",'application/json')
request.add_header("Authorization","AR-JWT " + token)
request.get_method = lambda: method
response = urllib2.urlopen(request)
xmlout = response.read()
print str(xmlout)
return str(xmlout)
except IOError, e:
print 'Failed to open "%s".' % requestUrl
if hasattr(e, 'code'):
print 'We failed with error code - %s.' % e.code
elif hasattr(e, 'reason'):
print "The error object has the following 'reason' attribute :"
print e.reason
print "This usually means the server doesn't exist,",
print "is down, or we don't have an internet connection."
return False
def getArsCmdb(token):
#####################################
#
# This function gets all cmdb items
#
# input: token
#
#####################################
namespace = "BMC.CORE"
datasetId = "BMC.ASSET"
classId = "BMC_COMPUTERSYSTEM"
method = "GET"
#print "REQUEST URL: " + requestUrl
try:
request = urllib2.Request(requestUrl)
request.add_header("Content-Type",'application/json')
request.add_header("Authorization","AR-JWT " + token)
request.get_method = lambda: method
response = urllib2.urlopen(request)
xmlout = response.read()
print str(xmlout)
return str(xmlout)
except IOError, e:
print 'Failed to open "%s".' % requestUrl
if hasattr(e, 'code'):
print 'We failed with error code - %s.' % e.code
elif hasattr(e, 'reason'):
print "The error object has the following 'reason' attribute :"
print e.reason
print "This usually means the server doesn't exist,",
print "is down, or we don't have an internet connection."
return False
def getArsToken():
#####################################
#
# This function generates an ARS token
#
# inputs: username, passord
# output: token
#
#####################################
username = cmdbServerDict["user"]
password = cmdbServerDict["password"]
print ":"
print "Getting token, username is " + username + " and password is " + password
global arsToken
method = "POST"
requestUrl = 'https://' + cmdbServerDict["server"] + '/api/jwt/login'
body = "username=" + username + "&password=" + password
#authHeader = 'Basic ' + base64.b64encode(asmServerDict["user"] + ":" + asmServerDict["password"])
#print "auth header is: " + str(authHeader)
#print "pushing the following json to ASM: " + jsonResource
try:
request = urllib2.Request(requestUrl, body)
request.add_header("Content-Type",'application/x-www-form-urlencoded')
#request.add_header("Content-Length",'32')
#request.add_header("username",username)
#request.add_header("password",password)
request.get_method = lambda: method
response = urllib2.urlopen(request)
xmlout = response.read()
sys.stderr.write("Received token: " + str(xmlout))
#return str(xmlout)
arsToken = str(xmlout)
except IOError, e:
print 'Failed to open "%s".' % requestUrl
if hasattr(e, 'code'):
print 'We failed with error code - %s.' % e.code
elif hasattr(e, 'reason'):
print "The error object has the following 'reason' attribute :"
print e.reason
print "This usually means the server doesn't exist,",
print "is down, or we don't have an internet connection."
print "FATAL: UNABLE TO AUTHENTICATE WITH ARS TO OBTAIN TOKEN USING PROVIDED CREDENTIALS"
exit()
return False
def releaseArsToken(token):
#####################################
#
# This function releases an ARS token
#
# input: token
# output: True/False
#
#####################################
method = "POST"
requestUrl = 'https://' + cmdbServerDict["server"] + '/api/jwt/logout'
#authHeader = 'Basic ' + base64.b64encode(asmServerDict["user"] + ":" + asmServerDict["password"])
#print "auth header is: " + str(authHeader)
#print "pushing the following json to ASM: " + jsonResource
try:
request = urllib2.Request(requestUrl)
request.add_header("Authorization","AR-JWT " + token)
request.get_method = lambda: method
response = urllib2.urlopen(request)
xmlout = response.read()
if hasattr(response, 'code'):
| |
<reponame>lakhlaifi/RedHat-Ansible
#!/usr/bin/python
# Copyright 2014 <NAME>, Hothead Games Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author:
- "<NAME> (@j-carl), Hothead Games Inc."
module: redshift
version_added: "2.2"
short_description: create, delete, or modify an Amazon Redshift instance
description:
- Creates, deletes, or modifies amazon Redshift cluster instances.
options:
command:
description:
- Specifies the action to take.
required: true
choices: [ 'create', 'facts', 'delete', 'modify' ]
identifier:
description:
- Redshift cluster identifier.
required: true
node_type:
description:
- The node type of the cluster. Must be specified when command=create.
choices: ['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large', 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge']
username:
description:
- Master database username. Used only when command=create.
password:
description:
- Master database password. Used only when command=create.
cluster_type:
description:
- The type of cluster.
choices: ['multi-node', 'single-node' ]
default: 'single-node'
db_name:
description:
- Name of the database.
default: null
availability_zone:
description:
- availability zone in which to launch cluster
aliases: ['zone', 'aws_zone']
number_of_nodes:
description:
- Number of nodes. Only used when cluster_type=multi-node.
default: null
cluster_subnet_group_name:
description:
- which subnet to place the cluster
aliases: ['subnet']
cluster_security_groups:
description:
- in which security group the cluster belongs
default: null
aliases: ['security_groups']
vpc_security_group_ids:
description:
- VPC security group
aliases: ['vpc_security_groups']
default: null
preferred_maintenance_window:
description:
- maintenance window
aliases: ['maintance_window', 'maint_window']
default: null
cluster_parameter_group_name:
description:
- name of the cluster parameter group
aliases: ['param_group_name']
default: null
automated_snapshot_retention_period:
description:
- period when the snapshot take place
aliases: ['retention_period']
default: null
port:
description:
- which port the cluster is listining
default: null
cluster_version:
description:
- which version the cluster should have
aliases: ['version']
choices: ['1.0']
default: null
allow_version_upgrade:
description:
- flag to determinate if upgrade of version is possible
aliases: ['version_upgrade']
default: true
publicly_accessible:
description:
- if the cluster is accessible publicly or not
default: false
encrypted:
description:
- if the cluster is encrypted or not
default: false
elastic_ip:
description:
- if the cluster has an elastic IP or not
default: null
new_cluster_identifier:
description:
- Only used when command=modify.
aliases: ['new_identifier']
default: null
wait:
description:
- When command=create, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated.
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
requirements: [ 'boto' ]
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Basic cluster provisioning example
- redshift: >
command=create
node_type=ds1.xlarge
identifier=new_cluster
username=cluster_admin
password=<PASSWORD>
'''
RETURN = '''
cluster:
description: dictionary containing all the cluster information
returned: success
type: dictionary
contains:
identifier:
description: Id of the cluster.
returned: success
type: string
sample: "new_redshift_cluster"
create_time:
description: Time of the cluster creation as timestamp.
returned: success
type: float
sample: 1430158536.308
status:
description: Stutus of the cluster.
returned: success
type: string
sample: "available"
db_name:
description: Name of the database.
returned: success
type: string
sample: "new_db_name"
availability_zone:
description: Amazon availability zone where the cluster is located.
returned: success
type: string
sample: "us-east-1b"
maintenance_window:
description: Time frame when maintenance/upgrade are done.
returned: success
type: string
sample: "sun:09:30-sun:10:00"
private_ip_address:
description: Private IP address of the main node.
returned: success
type: string
sample: "10.10.10.10"
public_ip_address:
description: Public IP address of the main node.
returned: success
type: string
sample: "0.0.0.0"
port:
description: Port of the cluster.
returned: success
type: int
sample: 5439
url:
description: FQDN of the main cluster node.
returned: success
type: string
sample: "new-redshift_cluster.jfkdjfdkj.us-east-1.redshift.amazonaws.com"
'''
import time
try:
import boto
from boto import redshift
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def _collect_facts(resource):
"""Transfrom cluster information to dict."""
facts = {
'identifier' : resource['ClusterIdentifier'],
'create_time' : resource['ClusterCreateTime'],
'status' : resource['ClusterStatus'],
'username' : resource['MasterUsername'],
'db_name' : resource['DBName'],
'availability_zone' : resource['AvailabilityZone'],
'maintenance_window': resource['PreferredMaintenanceWindow'],
}
for node in resource['ClusterNodes']:
if node['NodeRole'] in ('SHARED', 'LEADER'):
facts['private_ip_address'] = node['PrivateIPAddress']
break
return facts
def create_cluster(module, redshift):
"""
Create a new cluster
module: AnsibleModule object
redshift: authenticated redshift connection object
Returns:
"""
identifier = module.params.get('identifier')
node_type = module.params.get('node_type')
username = module.params.get('username')
password = module.params.get('password')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
changed = True
# Package up the optional parameters
params = {}
for p in ('db_name', 'cluster_type', 'cluster_security_groups',
'vpc_security_group_ids', 'cluster_subnet_group_name',
'availability_zone', 'preferred_maintenance_window',
'cluster_parameter_group_name',
'automated_snapshot_retention_period', 'port',
'cluster_version', 'allow_version_upgrade',
'number_of_nodes', 'publicly_accessible',
'encrypted', 'elastic_ip'):
if p in module.params:
params[ p ] = module.params.get( p )
try:
redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
changed = False
except boto.exception.JSONResponseError as e:
try:
redshift.create_cluster(identifier, node_type, username, password, **params)
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
try:
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
if wait:
try:
wait_timeout = time.time() + wait_timeout
time.sleep(5)
while wait_timeout > time.time() and resource['ClusterStatus'] != 'available':
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg = "Timeout waiting for resource %s" % resource.id)
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
return(changed, _collect_facts(resource))
def describe_cluster(module, redshift):
"""
Collect data about the cluster.
module: Ansible module object
redshift: authenticated redshift connection object
"""
identifier = module.params.get('identifier')
try:
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
return(True, _collect_facts(resource))
def delete_cluster(module, redshift):
"""
Delete a cluster.
module: Ansible module object
redshift: authenticated redshift connection object
"""
identifier = module.params.get('identifier')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
try:
redshift.delete_custer( identifier )
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
if wait:
try:
wait_timeout = time.time() + wait_timeout
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
while wait_timeout > time.time() and resource['ClusterStatus'] != 'deleting':
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg = "Timeout waiting for resource %s" % resource.id)
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
return(True, {})
def modify_cluster(module, redshift):
"""
Modify an existing cluster.
module: Ansible module object
redshift: authenticated redshift connection object
"""
identifier = module.params.get('identifier')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
# Package up the optional parameters
params = {}
for p in ('cluster_type', 'cluster_security_groups',
'vpc_security_group_ids', 'cluster_subnet_group_name',
'availability_zone', 'preferred_maintenance_window',
'cluster_parameter_group_name',
'automated_snapshot_retention_period', 'port', 'cluster_version',
'allow_version_upgrade', 'number_of_nodes', 'new_cluster_identifier'):
if p in module.params:
params[p] = module.params.get(p)
try:
redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
try:
redshift.modify_cluster(identifier, **params)
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
try:
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
if wait:
try:
wait_timeout = time.time() + wait_timeout
time.sleep(5)
while wait_timeout > time.time() and resource['ClusterStatus'] != 'available':
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg = "Timeout waiting for resource %s" % resource.id)
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
# https://github.com/boto/boto/issues/2776 is fixed.
module.fail_json(msg=str(e))
return(True, _collect_facts(resource))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
command = dict(choices=['create', 'facts', 'delete', 'modify'], required=True),
identifier = dict(required=True),
node_type = dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large', 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge'], required=False),
username = dict(required=False),
password = dict(no_log=True, required=False),
db_name = dict(require=False),
cluster_type = dict(choices=['multi-node', 'single-node', ], default='single-node'),
cluster_security_groups = dict(aliases=['security_groups'], type='list'),
vpc_security_group_ids = dict(aliases=['vpc_security_groups'], type='list'),
cluster_subnet_group_name = dict(aliases=['subnet']),
availability_zone = dict(aliases=['aws_zone', 'zone']),
preferred_maintenance_window = dict(aliases=['maintance_window', 'maint_window']),
cluster_parameter_group_name = dict(aliases=['param_group_name']),
automated_snapshot_retention_period = dict(aliases=['retention_period']),
port = dict(type='int'),
cluster_version = dict(aliases=['version'], choices=['1.0']),
allow_version_upgrade = dict(aliases=['version_upgrade'], type='bool', default=True),
number_of_nodes = dict(type='int'),
publicly_accessible = dict(type='bool', default=False),
encrypted = dict(type='bool', default=False),
elastic_ip = dict(required=False),
new_cluster_identifier = dict(aliases=['new_identifier']),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto v2.9.0+ required for this module')
command = module.params.get('command')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg=str("region not specified and unable to determine region from EC2_REGION."))
# connect to the rds endpoint
try:
conn = connect_to_aws(boto.redshift, region, **aws_connect_params)
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
changed = True
if command == 'create':
(changed, cluster) = create_cluster(module, conn)
elif command == 'facts':
(changed, cluster) = describe_cluster(module, conn)
elif command == 'delete':
(changed, cluster) = delete_cluster(module, conn)
elif command == 'modify':
(changed, cluster) = modify_cluster(module, conn)
module.exit_json(changed=changed, cluster=cluster)
# import module snippets
from ansible.module_utils.basic | |
self.assertRaises(exception.NotFound, resg.FnGetAtt,
'resource.2')
self.assertIn("Member '2' not found in group resource 'group1'.",
str(ex))
def test_get_attribute_convg(self):
cache_data = {'group1': node_data.NodeData.from_dict({
'uuid': mock.ANY,
'id': mock.ANY,
'action': 'CREATE',
'status': 'COMPLETE',
'attrs': {'refs': ['rsrc1', 'rsrc2']}
})}
stack = utils.parse_stack(template, cache_data=cache_data)
rsrc = stack.defn['group1']
self.assertEqual(['rsrc1', 'rsrc2'], rsrc.FnGetAtt('refs'))
def test_get_attribute_blacklist(self):
resg = self._create_dummy_stack()
resg.data = mock.Mock(return_value={'name_blacklist': '3,5'})
expected = ['3', '5']
self.assertEqual(expected, resg.FnGetAtt(resg.REMOVED_RSRC_LIST))
def _create_dummy_stack(self, template_data=template, expect_count=2,
expect_attrs=None):
stack = utils.parse_stack(template_data)
resg = stack['group1']
resg.resource_id = 'test-test'
attrs = {}
refids = {}
if expect_attrs is None:
expect_attrs = {}
for index in range(expect_count):
res = str(index)
attrs[index] = expect_attrs.get(res, res)
refids[index] = 'ID-%s' % res
names = [str(name) for name in range(expect_count)]
resg._resource_names = mock.Mock(return_value=names)
self._stub_get_attr(resg, refids, attrs)
return resg
def _stub_get_attr(self, resg, refids, attrs):
def ref_id_fn(res_name):
return refids[int(res_name)]
def attr_fn(args):
res_name = args[0]
return attrs[int(res_name)]
def get_output(output_name):
outputs = resg._nested_output_defns(resg._resource_names(),
attr_fn, ref_id_fn)
op_defns = {od.name: od for od in outputs}
self.assertIn(output_name, op_defns)
return op_defns[output_name].get_value()
orig_get_attr = resg.FnGetAtt
def get_attr(attr_name, *path):
if not path:
attr = attr_name
else:
attr = (attr_name,) + path
# Mock referenced_attrs() so that _nested_output_definitions()
# will include the output required for this attribute
resg.referenced_attrs = mock.Mock(return_value=[attr])
# Pass through to actual function under test
return orig_get_attr(attr_name, *path)
resg.FnGetAtt = mock.Mock(side_effect=get_attr)
resg.get_output = mock.Mock(side_effect=get_output)
class ResourceGroupAttrFallbackTest(ResourceGroupAttrTest):
def _stub_get_attr(self, resg, refids, attrs):
# Raise NotFound when getting output, to force fallback to old-school
# grouputils functions
resg.get_output = mock.Mock(side_effect=exception.NotFound)
def make_fake_res(idx):
fr = mock.Mock()
fr.stack = resg.stack
fr.FnGetRefId.return_value = refids[idx]
fr.FnGetAtt.return_value = attrs[idx]
return fr
fake_res = {str(i): make_fake_res(i) for i in refids}
resg.nested = mock.Mock(return_value=fake_res)
@mock.patch.object(grouputils, 'get_rsrc_id')
def test_get_attribute(self, mock_get_rsrc_id):
stack = utils.parse_stack(template)
mock_get_rsrc_id.side_effect = ['0', '1']
rsrc = stack['group1']
rsrc.get_output = mock.Mock(side_effect=exception.NotFound)
self.assertEqual(['0', '1'], rsrc.FnGetAtt(rsrc.REFS))
class ReplaceTest(common.HeatTestCase):
# 1. no min_in_service
# 2. min_in_service > count and existing with no blacklist
# 3. min_in_service > count and existing with blacklist
# 4. existing > count and min_in_service with blacklist
# 5. existing > count and min_in_service with no blacklist
# 6. all existing blacklisted
# 7. count > existing and min_in_service with no blacklist
# 8. count > existing and min_in_service with blacklist
# 9. count < existing - blacklisted
# 10. pause_sec > 0
scenarios = [
('1', dict(min_in_service=0, count=2,
existing=['0', '1'], black_listed=['0'],
batch_size=1, pause_sec=0, tasks=2)),
('2', dict(min_in_service=3, count=2,
existing=['0', '1'], black_listed=[],
batch_size=2, pause_sec=0, tasks=3)),
('3', dict(min_in_service=3, count=2,
existing=['0', '1'], black_listed=['0'],
batch_size=2, pause_sec=0, tasks=3)),
('4', dict(min_in_service=3, count=2,
existing=['0', '1', '2', '3'], black_listed=['2', '3'],
batch_size=1, pause_sec=0, tasks=4)),
('5', dict(min_in_service=2, count=2,
existing=['0', '1', '2', '3'], black_listed=[],
batch_size=2, pause_sec=0, tasks=2)),
('6', dict(min_in_service=2, count=3,
existing=['0', '1'], black_listed=['0', '1'],
batch_size=2, pause_sec=0, tasks=2)),
('7', dict(min_in_service=0, count=5,
existing=['0', '1'], black_listed=[],
batch_size=1, pause_sec=0, tasks=5)),
('8', dict(min_in_service=0, count=5,
existing=['0', '1'], black_listed=['0'],
batch_size=1, pause_sec=0, tasks=5)),
('9', dict(min_in_service=0, count=3,
existing=['0', '1', '2', '3', '4', '5'],
black_listed=['0'],
batch_size=2, pause_sec=0, tasks=2)),
('10', dict(min_in_service=0, count=3,
existing=['0', '1', '2', '3', '4', '5'],
black_listed=['0'],
batch_size=2, pause_sec=10, tasks=3))]
def setUp(self):
super(ReplaceTest, self).setUp()
templ = copy.deepcopy(template)
self.stack = utils.parse_stack(templ)
snip = self.stack.t.resource_definitions(self.stack)['group1']
self.group = resource_group.ResourceGroup('test', snip, self.stack)
self.group.update_with_template = mock.Mock()
self.group.check_update_complete = mock.Mock()
inspector = mock.Mock(spec=grouputils.GroupInspector)
self.patchobject(grouputils.GroupInspector, 'from_parent_resource',
return_value=inspector)
inspector.member_names.return_value = self.existing
inspector.size.return_value = len(self.existing)
def test_rolling_updates(self):
self.group._nested = get_fake_nested_stack(self.existing)
self.group.get_size = mock.Mock(return_value=self.count)
self.group._name_blacklist = mock.Mock(
return_value=set(self.black_listed))
tasks = self.group._replace(self.min_in_service, self.batch_size,
self.pause_sec)
self.assertEqual(self.tasks, len(tasks))
def tmpl_with_bad_updt_policy():
t = copy.deepcopy(template)
rg = t['resources']['group1']
rg["update_policy"] = {"foo": {}}
return t
def tmpl_with_default_updt_policy():
t = copy.deepcopy(template)
rg = t['resources']['group1']
rg["update_policy"] = {"rolling_update": {}}
return t
def tmpl_with_updt_policy():
t = copy.deepcopy(template)
rg = t['resources']['group1']
rg["update_policy"] = {"rolling_update": {
"min_in_service": "1",
"max_batch_size": "2",
"pause_time": "1"
}}
return t
def get_fake_nested_stack(names):
nested_t = '''
heat_template_version: 2015-04-30
description: Resource Group
resources:
'''
resource_snip = '''
'%s':
type: OverwrittenFnGetRefIdType
properties:
foo: bar
'''
resources = [nested_t]
for res_name in names:
resources.extend([resource_snip % res_name])
nested_t = ''.join(resources)
return utils.parse_stack(template_format.parse(nested_t))
class RollingUpdatePolicyTest(common.HeatTestCase):
def test_parse_without_update_policy(self):
stack = utils.parse_stack(template)
stack.validate()
grp = stack['group1']
self.assertFalse(grp.update_policy['rolling_update'])
def test_parse_with_update_policy(self):
tmpl = tmpl_with_updt_policy()
stack = utils.parse_stack(tmpl)
stack.validate()
tmpl_grp = tmpl['resources']['group1']
tmpl_policy = tmpl_grp['update_policy']['rolling_update']
tmpl_batch_sz = int(tmpl_policy['max_batch_size'])
grp = stack['group1']
self.assertTrue(grp.update_policy)
self.assertEqual(2, len(grp.update_policy))
self.assertIn('rolling_update', grp.update_policy)
policy = grp.update_policy['rolling_update']
self.assertIsNotNone(policy)
self.assertGreater(len(policy), 0)
self.assertEqual(1, int(policy['min_in_service']))
self.assertEqual(tmpl_batch_sz, int(policy['max_batch_size']))
self.assertEqual(1, policy['pause_time'])
def test_parse_with_default_update_policy(self):
tmpl = tmpl_with_default_updt_policy()
stack = utils.parse_stack(tmpl)
stack.validate()
grp = stack['group1']
self.assertTrue(grp.update_policy)
self.assertEqual(2, len(grp.update_policy))
self.assertIn('rolling_update', grp.update_policy)
policy = grp.update_policy['rolling_update']
self.assertIsNotNone(policy)
self.assertGreater(len(policy), 0)
self.assertEqual(0, int(policy['min_in_service']))
self.assertEqual(1, int(policy['max_batch_size']))
self.assertEqual(0, policy['pause_time'])
def test_parse_with_bad_update_policy(self):
tmpl = tmpl_with_bad_updt_policy()
stack = utils.parse_stack(tmpl)
error = self.assertRaises(
exception.StackValidationFailed, stack.validate)
self.assertIn("foo", str(error))
class RollingUpdatePolicyDiffTest(common.HeatTestCase):
def validate_update_policy_diff(self, current, updated):
# load current stack
current_stack = utils.parse_stack(current)
current_grp = current_stack['group1']
current_grp_json = current_grp.frozen_definition()
updated_stack = utils.parse_stack(updated)
updated_grp = updated_stack['group1']
updated_grp_json = updated_grp.t.freeze()
# identify the template difference
tmpl_diff = updated_grp.update_template_diff(
updated_grp_json, current_grp_json)
self.assertTrue(tmpl_diff.update_policy_changed())
prop_diff = current_grp.update_template_diff_properties(
updated_grp.properties,
current_grp.properties)
# test application of the new update policy in handle_update
current_grp._try_rolling_update = mock.Mock()
current_grp._assemble_nested_for_size = mock.Mock()
self.patchobject(scheduler.TaskRunner, 'start')
current_grp.handle_update(updated_grp_json, tmpl_diff, prop_diff)
self.assertEqual(updated_grp_json._update_policy or {},
current_grp.update_policy.data)
def test_update_policy_added(self):
self.validate_update_policy_diff(template,
tmpl_with_updt_policy())
def test_update_policy_updated(self):
updt_template = tmpl_with_updt_policy()
grp = updt_template['resources']['group1']
policy = grp['update_policy']['rolling_update']
policy['min_in_service'] = '2'
policy['max_batch_size'] = '4'
policy['pause_time'] = '90'
self.validate_update_policy_diff(tmpl_with_updt_policy(),
updt_template)
def test_update_policy_removed(self):
self.validate_update_policy_diff(tmpl_with_updt_policy(),
template)
class RollingUpdateTest(common.HeatTestCase):
def check_with_update(self, with_policy=False, with_diff=False):
current = copy.deepcopy(template)
self.current_stack = utils.parse_stack(current)
self.current_grp = self.current_stack['group1']
current_grp_json = self.current_grp.frozen_definition()
prop_diff, tmpl_diff = None, None
updated = tmpl_with_updt_policy() if (
with_policy) else copy.deepcopy(template)
if with_diff:
res_def = updated['resources']['group1'][
'properties']['resource_def']
res_def['properties']['Foo'] = 'baz'
prop_diff = dict(
{'count': 2,
'resource_def': {'properties': {'Foo': 'baz'},
'type': 'OverwrittenFnGetRefIdType'}})
updated_stack = utils.parse_stack(updated)
updated_grp = updated_stack['group1']
updated_grp_json = updated_grp.t.freeze()
tmpl_diff = updated_grp.update_template_diff(
updated_grp_json, current_grp_json)
self.current_grp._replace = mock.Mock(return_value=[])
self.current_grp._assemble_nested = mock.Mock()
self.patchobject(scheduler.TaskRunner, 'start')
self.current_grp.handle_update(updated_grp_json, tmpl_diff, prop_diff)
def test_update_without_policy_prop_diff(self):
self.check_with_update(with_diff=True)
self.assertTrue(self.current_grp._assemble_nested.called)
def test_update_with_policy_prop_diff(self):
self.check_with_update(with_policy=True, with_diff=True)
self.current_grp._replace.assert_called_once_with(1, 2, 1)
self.assertTrue(self.current_grp._assemble_nested.called)
def test_update_time_not_sufficient(self):
current = copy.deepcopy(template)
self.stack = utils.parse_stack(current)
self.current_grp = self.stack['group1']
self.stack.timeout_secs = mock.Mock(return_value=200)
err = self.assertRaises(ValueError, self.current_grp._update_timeout,
3, 100)
self.assertIn('The current update policy will result in stack update '
'timeout.', str(err))
def test_update_time_sufficient(self):
current = copy.deepcopy(template)
self.stack = utils.parse_stack(current)
self.current_grp = self.stack['group1']
self.stack.timeout_secs = mock.Mock(return_value=400)
self.assertEqual(200, self.current_grp._update_timeout(3, 100))
class TestUtils(common.HeatTestCase):
# 1. No existing no blacklist
# 2. Existing with no blacklist
# 3. Existing with blacklist
scenarios = [
('1', dict(existing=[], black_listed=[], count=0)),
('2', dict(existing=['0', '1'], black_listed=[], count=0)),
('3', dict(existing=['0', '1'], black_listed=['0'], count=1)),
('4', dict(existing=['0', '1'], black_listed=['1', '2'], count=1))
]
def test_count_black_listed(self):
inspector = mock.Mock(spec=grouputils.GroupInspector)
self.patchobject(grouputils.GroupInspector, 'from_parent_resource',
return_value=inspector)
inspector.member_names.return_value = self.existing
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
resgrp._name_blacklist = mock.Mock(return_value=set(self.black_listed))
rcount = resgrp._count_black_listed(self.existing)
self.assertEqual(self.count, rcount)
class TestGetBatches(common.HeatTestCase):
scenarios = [
('4_4_1_0', dict(targ_cap=4, init_cap=4, bat_size=1, min_serv=0,
batches=[
(4, 1, ['4']),
(4, 1, ['3']),
(4, 1, ['2']),
(4, 1, ['1']),
])),
('4_4_1_4', dict(targ_cap=4, init_cap=4, bat_size=1, min_serv=4,
batches=[
(5, 1, ['5']),
(5, 1, ['4']),
(5, 1, ['3']),
(5, 1, ['2']),
(5, 1, ['1']),
(4, 0, []),
])),
('4_4_1_5', dict(targ_cap=4, init_cap=4, bat_size=1, min_serv=5,
batches=[
(5, 1, ['5']),
(5, 1, ['4']),
(5, 1, ['3']),
(5, 1, ['2']),
(5, 1, ['1']),
(4, 0, []),
])),
('4_4_2_0', dict(targ_cap=4, init_cap=4, bat_size=2, min_serv=0,
batches=[
(4, 2, ['4', '3']),
(4, 2, ['2', '1']),
])),
('4_4_2_4', dict(targ_cap=4, init_cap=4, bat_size=2, min_serv=4,
batches=[
(6, 2, ['6', '5']),
(6, 2, ['4', '3']),
(6, 2, ['2', '1']),
(4, 0, []),
])),
('5_5_2_0', dict(targ_cap=5, init_cap=5, bat_size=2, min_serv=0,
batches=[
(5, 2, ['5', '4']),
(5, 2, ['3', '2']),
(5, 1, ['1']),
])),
('5_5_2_4', dict(targ_cap=5, init_cap=5, bat_size=2, min_serv=4,
batches=[
(6, 2, ['6', '5']),
(6, 2, ['4', '3']),
(6, 2, ['2', '1']),
(5, 0, []),
])),
('3_3_2_0', dict(targ_cap=3, init_cap=3, bat_size=2, min_serv=0,
batches=[
(3, 2, ['3', '2']),
(3, 1, ['1']),
])),
('3_3_2_4', dict(targ_cap=3, init_cap=3, bat_size=2, min_serv=4,
batches=[
(5, 2, ['5', '4']),
(5, 2, ['3', '2']),
(4, 1, ['1']),
(3, 0, []),
])),
('4_4_4_0', dict(targ_cap=4, init_cap=4, bat_size=4, min_serv=0,
batches=[
(4, 4, ['4', '3', '2', '1']),
])),
('4_4_5_0', dict(targ_cap=4, init_cap=4, bat_size=5, min_serv=0,
batches=[
(4, 4, ['4', '3', '2', '1']),
])),
('4_4_4_1', dict(targ_cap=4, init_cap=4, bat_size=4, min_serv=1,
batches=[
(5, 4, ['5', '4', '3', '2']),
(4, 1, ['1']),
])),
('4_4_6_1', dict(targ_cap=4, init_cap=4, bat_size=6, min_serv=1,
batches=[
(5, 4, ['5', '4', '3', '2']),
(4, 1, ['1']),
])),
('4_4_4_2', dict(targ_cap=4, init_cap=4, bat_size=4, min_serv=2,
batches=[
(6, 4, ['6', '5', '4', '3']),
(4, 2, ['2', '1']),
])),
('4_4_4_4', dict(targ_cap=4, init_cap=4, bat_size=4, min_serv=4,
batches=[
(8, 4, ['8', '7', '6', '5']),
(8, 4, ['4', '3', '2', '1']),
(4, 0, []),
])),
('4_4_5_6', dict(targ_cap=4, init_cap=4, | |
<reponame>MaciejSzaflik/tirt-W4-Project
# -*- coding: utf-8 -*-
'''
This module handles RTP payload for MJPEG codec as described in RFC2435.
JPEG header restoration code is taken from that RFC, but adapted to Python.
'''
# The following code can be used to create a quantization table from a
# Q factor:
# Tables with leading underscores are alternative, but not from the
# specification. I took them from a certain JPEG image.
_jpeg_luma_quantizer = [
16, 11, 10, 16, 24, 40, 51, 61,
12, 12, 14, 19, 26, 58, 60, 55,
14, 13, 16, 24, 40, 57, 69, 56,
14, 17, 22, 29, 51, 87, 80, 62,
18, 22, 37, 56, 68, 109, 103, 77,
24, 35, 55, 64, 81, 104, 113, 92,
49, 64, 78, 87, 103, 121, 120, 101,
72, 92, 95, 98, 112, 100, 103, 99]
_jpeg_chroma_quantizer = [
17, 18, 24, 47, 99, 99, 99, 99,
18, 21, 26, 66, 99, 99, 99, 99,
24, 26, 56, 99, 99, 99, 99, 99,
47, 66, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99]
jpeg_luma_quantizer = [
16, 11, 12, 14, 12, 10, 16, 14,
13, 14, 18, 17, 16, 19, 24, 40,
26, 24, 22, 22, 24, 49, 35, 37,
29, 40, 58, 51, 61, 60, 57, 51,
56, 55, 64, 72, 92, 78, 64, 68,
87, 69, 55, 56, 80, 109, 81, 87,
95, 98, 103, 104, 103, 62, 77, 113,
121, 112, 100, 120, 92, 101, 103, 99]
jpeg_chroma_quantizer = [
17, 18, 18, 24, 21, 24, 47, 26,
26, 47, 99, 66, 56, 66, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99]
# Call MakeTables with the Q factor and two u_char[64] return arrays
def MakeTables(q, lqt, cqt):
i = 0
factor = q
if q < 1:
factor = 1.0
if q > 99:
factor = 99.0
if q < 50:
_q = 5000.0 / factor
else:
_q = 200.0 - factor*2
for i in range(64):
lq = int((jpeg_luma_quantizer[i] * _q + 50.0) / 100.0)
cq = int((jpeg_chroma_quantizer[i] * _q + 50.0) / 100.0)
# Limit the quantizers to 1 <= q <= 255
if lq < 1:
lq = 1
elif lq > 255:
lq = 255
lqt.append(lq)
if cq < 1:
cq = 1
elif cq > 255:
cq = 255
cqt.append(cq)
# Reconstruct Header
lum_dc_codelens = [0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]
lum_dc_symbols = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
lum_ac_codelens = [0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d]
lum_ac_symbols = [
0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0,
0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16,
0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5,
0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4,
0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
0xf9, 0xfa]
chm_dc_codelens = [0, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]
chm_dc_symbols = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
chm_ac_codelens = [0, 2, 1, 2, 4, 4, 3, 4, 7, 5, 4, 4, 0, 1, 2, 0x77]
chm_ac_symbols = [
0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21,
0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0,
0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34,
0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26,
0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38,
0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96,
0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5,
0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4,
0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3,
0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2,
0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda,
0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9,
0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
0xf9, 0xfa]
def MakeQuantHeader(p, qt, tableNo):
p.append(0xff)
p.append(0xdb) # DQT
p.append(0) # length msb
p.append(67) # length lsb
p.append(tableNo)
p.extend(qt)
def MakeHuffmanHeader(p, codelens, ncodes, symbols, nsymbols, tableNo, tableClass):
p.append(0xff)
p.append(0xc4) # DHT
p.append(0) # length msb
p.append(3 + ncodes + nsymbols) # length lsb
p.append((tableClass << 4) | tableNo)
p.extend(codelens)
p.extend(symbols)
def MakeDRIHeader(p, dri):
p.append(0xff)
p.append(0xdd) # DRI
p.append(0x0) # length msb
p.append(4) # length lsb
p.append(dri >> 8) # dri msb
p.append(dri & 0xff) # dri lsb
#===============================================================================
# Arguments:
# type, width, height: as supplied in RTP/JPEG header
# lqt, cqt: quantization tables as either derived from
# the Q field using MakeTables() or as specified
# in section 4.2.
# dri: restart interval in MCUs, or 0 if no restarts.
#
# p: pointer to return area
#
# Return value:
# The length of the generated headers.
#
# Generate a frame and scan headers that can be prepended to the
# RTP/JPEG data payload to produce a JPEG compressed image in
# interchange format (except for possible trailing garbage and
# absence of an EOI marker to terminate the scan).
#===============================================================================
def MakeHeaders(p, type, w, h, lqt, cqt, dri):
p.append(0xff)
p.append(0xd8) # SOI
MakeQuantHeader(p, lqt, 0)
MakeQuantHeader(p, cqt, 1)
if dri != 0:
MakeDRIHeader(p, dri)
p.append(0xff)
p.append(0xc0) # SOF
p.append(0) # length msb
p.append(17) # length lsb
p.append(8) # 8-bit precision
p.append(h >> 8) # height msb
p.append(h & 255) # height lsb
p.append(w >> 8) # width msb
p.append(w & 255) # wudth lsb
p.append(3) # number of components
p.append(0) # comp 0
if type == 0:
p.append(0x21) # hsamp = 2, vsamp = 1
else:
p.append(0x22) # hsamp = 2, vsamp = 2
p.append(0) # quant table 0
p.append(1) # comp 1
p.append(0x11) # hsamp = 1, vsamp = 1
p.append(1) # quant table 1
p.append(2) # comp 2
p.append(0x11) # hsamp = 1, vsamp = 1
p.append(1) # quant table 1
MakeHuffmanHeader(p, lum_dc_codelens,
len(lum_dc_codelens),
lum_dc_symbols,
len(lum_dc_symbols), 0, 0)
MakeHuffmanHeader(p, lum_ac_codelens,
len(lum_ac_codelens),
lum_ac_symbols,
len(lum_ac_symbols), 0, 1)
MakeHuffmanHeader(p, chm_dc_codelens,
len(chm_dc_codelens),
chm_dc_symbols,
len(chm_dc_symbols), 1, 0)
MakeHuffmanHeader(p, chm_ac_codelens,
len(chm_ac_codelens),
chm_ac_symbols,
len(chm_ac_symbols), 1, 1)
p.append(0xff)
p.append(0xda) # SOS
p.append(0) # length msb
p.append(12) # length lsb
p.append(3) # 3 components
p.append(0) # comp 0
p.append(0) # huffman table 0
p.append(1) # comp 1
p.append(0x11) # huffman table 1
p.append(2) # comp 2
p.append(0x11) # huffman table 1
p.append(0) # first DCT coeff
p.append(63) # last DCT coeff
p.append(0) # sucessive approx.
from struct import unpack
def list2string(l):
s = ''
for c in l:
s += chr(c)
return s
def string2list(s):
l = []
for c in s:
l.append(ord(c))
return l
class RFC2435JPEG(object):
'JPEG image | |
from keras.models import Model
from keras.layers import Input, Activation, Dropout, Merge, TimeDistributed, Masking, Dense
from keras.layers.recurrent import LSTM, GRU
from keras.layers.embeddings import Embedding
from keras.regularizers import l2
from keras.optimizers import Adam
from keras import backend as K
import h5py
import shutil
import logging
import sys
# Set up logger
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger(__name__)
class NIC:
def __init__(self, embed_size, hidden_size, vocab_size, dropin, optimiser,
l2reg, hsn_size=512, weights=None, gru=False,
clipnorm=-1, batch_size=None, t=None, lr=0.001):
self.max_t = t # Expected timesteps. Needed to build the Theano graph
# Model hyperparameters
self.vocab_size = vocab_size # size of word vocabulary
self.embed_size = embed_size # number of units in a word embedding
self.hsn_size = hsn_size # size of the source hidden vector
self.hidden_size = hidden_size # number of units in first LSTM
self.gru = gru # gru recurrent layer? (false = lstm)
self.dropin = dropin # prob. of dropping input units
self.l2reg = l2reg # weight regularisation penalty
# Optimiser hyperparameters
self.optimiser = optimiser # optimisation method
self.lr = lr
self.beta1 = 0.9
self.beta2 = 0.999
self.epsilon = 1e-8
self.clipnorm = clipnorm
self.weights = weights # initialise with checkpointed weights?
def buildKerasModel(self, use_sourcelang=False, use_image=True):
'''
Define the exact structure of your model here. We create an image
description generation model by merging the VGG image features with
a word embedding model, with an LSTM over the sequences.
'''
logger.info('Building Keras model...')
text_input = Input(shape=(self.max_t, self.vocab_size), name='text')
text_mask = Masking(mask_value=0., name='text_mask')(text_input)
# Word embeddings
wemb = TimeDistributed(Dense(output_dim=self.embed_size,
input_dim=self.vocab_size,
W_regularizer=l2(self.l2reg)),
name="w_embed")(text_mask)
drop_wemb = Dropout(self.dropin, name="wemb_drop")(wemb)
# Embed -> Hidden
emb_to_hidden = TimeDistributed(Dense(output_dim=self.hidden_size,
input_dim=self.vocab_size,
W_regularizer=l2(self.l2reg)),
name='wemb_to_hidden')(drop_wemb)
if use_image:
# Image 'embedding'
logger.info('Using image features: %s', use_image)
img_input = Input(shape=(self.max_t, 4096), name='img')
img_emb = TimeDistributed(Dense(output_dim=self.hidden_size,
input_dim=4096,
W_regularizer=l2(self.l2reg)),
name='img_emb')(img_input)
img_drop = Dropout(self.dropin, name='img_embed_drop')(img_emb)
if use_sourcelang:
logger.info('Using source features: %s', use_sourcelang)
logger.info('Size of source feature vectors: %d', self.hsn_size)
src_input = Input(shape=(self.max_t, self.hsn_size), name='src')
src_relu = Activation('relu', name='src_relu')(src_input)
src_embed = TimeDistributed(Dense(output_dim=self.hidden_size,
input_dim=self.hsn_size,
W_regularizer=l2(self.l2reg)),
name="src_embed")(src_relu)
src_drop = Dropout(self.dropin, name="src_drop")(src_embed)
# Input nodes for the recurrent layer
rnn_input_dim = self.hidden_size
if use_image and use_sourcelang:
recurrent_inputs = [emb_to_hidden, img_drop, src_drop]
recurrent_inputs_names = ['emb_to_hidden', 'img_drop', 'src_drop']
inputs = [text_input, img_input, src_input]
elif use_image:
recurrent_inputs = [emb_to_hidden, img_drop]
recurrent_inputs_names = ['emb_to_hidden', 'img_drop']
inputs = [text_input, img_input]
elif use_sourcelang:
recurrent_inputs = [emb_to_hidden, src_drop]
recurrent_inputs_names = ['emb_to_hidden', 'src_drop']
inputs = [text_input, src_input]
merged_input = Merge(mode='sum')(recurrent_inputs)
# Recurrent layer
if self.gru:
logger.info("Building a GRU with recurrent inputs %s", recurrent_inputs_names)
rnn = GRU(output_dim=self.hidden_size,
input_dim=rnn_input_dim,
return_sequences=True,
W_regularizer=l2(self.l2reg),
U_regularizer=l2(self.l2reg),
name='rnn')(merged_input)
else:
logger.info("Building an LSTM with recurrent inputs %s", recurrent_inputs_names)
rnn = LSTM(output_dim=self.hidden_size,
input_dim=rnn_input_dim,
return_sequences=True,
W_regularizer=l2(self.l2reg),
U_regularizer=l2(self.l2reg),
name='rnn')(merged_input)
output = TimeDistributed(Dense(output_dim=self.vocab_size,
input_dim=self.hidden_size,
W_regularizer=l2(self.l2reg),
activation='softmax'),
name='output')(rnn)
if self.optimiser == 'adam':
# allow user-defined hyper-parameters for ADAM because it is
# our preferred optimiser
optimiser = Adam(lr=self.lr, beta_1=self.beta1,
beta_2=self.beta2, epsilon=self.epsilon,
clipnorm=self.clipnorm)
model = Model(input=inputs, output=output)
model.compile(optimiser, {'output': 'categorical_crossentropy'})
else:
model.compile(self.optimiser, {'output': 'categorical_crossentropy'})
if self.weights is not None:
logger.info("... with weights defined in %s", self.weights)
# Initialise the weights of the model
shutil.copyfile("%s/weights.hdf5" % self.weights,
"%s/weights.hdf5.bak" % self.weights)
model.load_weights("%s/weights.hdf5" % self.weights)
#plot(model, to_file="model.png")
return model
def buildHSNActivations(self, use_image=True):
'''
Define the exact structure of your model here. We create an image
description generation model by merging the VGG image features with
a word embedding model, with an LSTM over the sequences.
'''
logger.info('Building Keras model...')
text_input = Input(shape=(self.max_t, self.vocab_size), name='text')
text_mask = Masking(mask_value=0., name='text_mask')(text_input)
# Word embeddings
wemb = TimeDistributed(Dense(output_dim=self.embed_size,
input_dim=self.vocab_size,
W_regularizer=l2(self.l2reg)),
name="w_embed")(text_mask)
drop_wemb = Dropout(self.dropin, name="wemb_drop")(wemb)
# Embed -> Hidden
emb_to_hidden = TimeDistributed(Dense(output_dim=self.hidden_size,
input_dim=self.vocab_size,
W_regularizer=l2(self.l2reg)),
name='wemb_to_hidden')(drop_wemb)
if use_image:
# Image 'embedding'
logger.info('Using image features: %s', use_image)
img_input = Input(shape=(self.max_t, 4096), name='img')
img_emb = TimeDistributed(Dense(output_dim=self.hidden_size,
input_dim=4096,
W_regularizer=l2(self.l2reg)),
name='img_emb')(img_input)
img_drop = Dropout(self.dropin, name='img_embed_drop')(img_emb)
# Input nodes for the recurrent layer
rnn_input_dim = self.hidden_size
if use_image:
recurrent_inputs = [emb_to_hidden, img_drop]
recurrent_inputs_names = ['emb_to_hidden', 'img_drop']
inputs = [text_input, img_input]
merged_input = Merge(mode='sum')(recurrent_inputs)
# Recurrent layer
if self.gru:
logger.info("Building a GRU with recurrent inputs %s", recurrent_inputs_names)
rnn = GRU(output_dim=self.hidden_size,
input_dim=rnn_input_dim,
return_sequences=True,
W_regularizer=l2(self.l2reg),
U_regularizer=l2(self.l2reg),
name='rnn')(merged_input)
else:
logger.info("Building an LSTM with recurrent inputs %s", recurrent_inputs_names)
rnn = LSTM(output_dim=self.hidden_size,
input_dim=rnn_input_dim,
return_sequences=True,
W_regularizer=l2(self.l2reg),
U_regularizer=l2(self.l2reg),
name='rnn')(merged_input)
if self.optimiser == 'adam':
# allow user-defined hyper-parameters for ADAM because it is
# our preferred optimiser
optimiser = Adam(lr=self.lr, beta_1=self.beta1,
beta_2=self.beta2, epsilon=self.epsilon,
clipnorm=self.clipnorm)
model = Model(input=[text_input, img_input], output=rnn)
print(model.get_config())
model.compile(optimiser, {'rnn': 'categorical_crossentropy'})
else:
model.compile(self.optimiser, {'rnn': 'categorical_crossentropy'})
if self.weights is not None:
logger.info("... with weights defined in %s", self.weights)
# Initialise the weights of the model
shutil.copyfile("%s/weights.hdf5" % self.weights,
"%s/weights.hdf5.bak" % self.weights)
f = h5py.File("%s/weights.hdf5" % self.weights)
self.partial_load_weights(model, f)
f.close()
#plot(model, to_file="model.png")
return model
def partial_load_weights(self, model, f):
'''
Keras does not seem to support partially loading weights from one
model into another model. This function achieves the same purpose so
we can serialise the final RNN hidden state to disk.
TODO: find / engineer a more elegant and general approach
'''
flattened_layers = model.layers
# new file format
filtered_layers = []
for layer in flattened_layers:
weights = layer.weights
if weights:
filtered_layers.append(layer)
flattened_layers = filtered_layers
layer_names = [n.decode('utf8') for n in f.attrs['layer_names']]
filtered_layer_names = []
for name in layer_names[:-1]: # -1 so we clip out the output layer
g = f[name]
weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]
if len(weight_names):
filtered_layer_names.append(name)
layer_names = filtered_layer_names
if len(layer_names) != len(flattened_layers):
raise Exception('You are trying to load a weight file '
'containing ' + str(len(layer_names)) +
' layers into a model with ' +
str(len(flattened_layers)) + ' layers.')
# we batch weight value assignments in a single backend call
# which provides a speedup in TensorFlow.
weight_value_tuples = []
for k, name in enumerate(layer_names):
g = f[name]
weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]
weight_values = [g[weight_name] for weight_name in weight_names]
layer = flattened_layers[k]
symbolic_weights = layer.weights
if len(weight_values) != len(symbolic_weights):
raise Exception('Layer #' + str(k) +
' (named "' + layer.name +
'" in the current model) was found to '
'correspond to layer ' + name +
' in the save file. '
'However the new layer ' + layer.name +
' expects ' + str(len(symbolic_weights)) +
' weights, but the saved weights have ' +
str(len(weight_values)) +
' elements.')
weight_value_tuples += zip(symbolic_weights, weight_values)
K.batch_set_value(weight_value_tuples)
class MRNN:
'''
TODO: port this model architecture to Keras 1.0.7
'''
def __init__(self, embed_size, hidden_size, vocab_size, dropin, optimiser,
l2reg, hsn_size=512, weights=None, gru=False,
clipnorm=-1, batch_size=None, t=None, lr=0.001):
self.max_t = t # Expected timesteps. Needed to build the Theano graph
# Model hyperparameters
self.vocab_size = vocab_size # size of word vocabulary
self.embed_size = embed_size # number of units in a word embedding
self.hsn_size = hsn_size # size of the source hidden vector
self.hidden_size = hidden_size # number of units in first LSTM
self.gru = gru # gru recurrent layer? (false = lstm)
self.dropin = dropin # prob. of dropping input units
self.l2reg = l2reg # weight regularisation penalty
# Optimiser hyperparameters
self.optimiser = optimiser # optimisation method
self.lr = lr
self.beta1 = 0.9
self.beta2 = 0.999
self.epsilon = 1e-8
self.clipnorm = clipnorm
self.weights = weights # initialise with checkpointed weights?
def buildKerasModel(self, use_sourcelang=False, use_image=True):
'''
Define the exact structure of your model here. We create an image
description generation model by merging the VGG image features with
a word embedding model, with an LSTM over the sequences.
The order in which these appear below (text, image) is _IMMUTABLE_.
(Needs to match up with input to model.fit.)
'''
logger.info('Building Keras model...')
logger.info('Using image features: %s', use_image)
logger.info('Using source language features: %s', use_sourcelang)
model = Graph()
model.add_input('text', input_shape=(self.max_t, self.vocab_size))
model.add_node(Masking(mask_value=0.), input='text', name='text_mask')
# Word embeddings
model.add_node(TimeDistributedDense(output_dim=self.embed_size,
input_dim=self.vocab_size,
W_regularizer=l2(self.l2reg)),
name="w_embed", input='text_mask')
model.add_node(Dropout(self.dropin),
name="w_embed_drop",
input="w_embed")
# Embed -> Hidden
model.add_node(TimeDistributedDense(output_dim=self.hidden_size,
input_dim=self.embed_size,
W_regularizer=l2(self.l2reg)),
name='embed_to_hidden', input='w_embed_drop')
recurrent_inputs = 'embed_to_hidden'
# Source language input
if use_sourcelang:
model.add_input('source', input_shape=(self.max_t, self.hsn_size))
model.add_node(Masking(mask_value=0.),
input='source',
name='source_mask')
model.add_node(TimeDistributedDense(output_dim=self.hidden_size,
input_dim=self.hsn_size,
W_regularizer=l2(self.l2reg)),
name="s_embed",
input="source_mask")
model.add_node(Dropout(self.dropin),
name="s_embed_drop",
input="s_embed")
recurrent_inputs = ['embed_to_hidden', 's_embed_drop']
# Recurrent layer
if self.gru:
model.add_node(GRU(output_dim=self.hidden_size,
input_dim=self.hidden_size,
return_sequences=True), name='rnn',
input=recurrent_inputs)
else:
model.add_node(LSTM(output_dim=self.hidden_size,
input_dim=self.hidden_size,
return_sequences=True), name='rnn',
input=recurrent_inputs)
# Image 'embedding'
model.add_input('img', input_shape=(self.max_t, 4096))
model.add_node(Masking(mask_value=0.),
input='img', name='img_mask')
model.add_node(TimeDistributedDense(output_dim=self.hidden_size,
input_dim=4096,
W_regularizer=l2(self.l2reg)),
name='i_embed', input='img_mask')
model.add_node(Dropout(self.dropin), name='i_embed_drop', input='i_embed')
# Multimodal layer outside the recurrent layer
model.add_node(TimeDistributedDense(output_dim=self.hidden_size,
input_dim=self.hidden_size,
W_regularizer=l2(self.l2reg)),
name='m_layer',
inputs=['rnn','i_embed_drop', 'embed_to_hidden'],
merge_mode='sum')
| |
<filename>src/pretix/plugins/banktransfer/payment.py<gh_stars>0
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 <NAME> and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
# This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of
# the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.
#
# This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A
# full history of changes and contributors is available at <https://github.com/pretix/pretix>.
#
# This file contains Apache-licensed contributions copyrighted by: <NAME>
#
# Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
import json
import textwrap
from collections import OrderedDict
from decimal import Decimal
from django import forms
from django.core.exceptions import ValidationError
from django.http import HttpRequest
from django.template.loader import get_template
from django.utils.translation import gettext, gettext_lazy as _
from i18nfield.fields import I18nFormField, I18nTextarea
from i18nfield.forms import I18nTextInput
from i18nfield.strings import LazyI18nString
from localflavor.generic.forms import BICFormField, IBANFormField
from localflavor.generic.validators import IBANValidator
from pretix.base.models import Order, OrderPayment, OrderRefund
from pretix.base.payment import BasePaymentProvider
from pretix.plugins.banktransfer.templatetags.ibanformat import ibanformat
class BankTransfer(BasePaymentProvider):
identifier = 'banktransfer'
verbose_name = _('Bank transfer')
abort_pending_allowed = True
@staticmethod
def form_fields():
return OrderedDict([
('ack',
forms.BooleanField(
label=_('I have understood that people will pay the ticket price directly to my bank account and '
'pretix cannot automatically know what payments arrived. Therefore, I will either mark '
'payments as complete manually, or regularly import a digital bank statement in order to '
'give pretix the required information.'),
required=True,
)),
('bank_details_type', forms.ChoiceField(
label=_('Bank account type'),
widget=forms.RadioSelect,
choices=(
('sepa', _('SEPA bank account')),
('other', _('Other bank account')),
),
initial='sepa'
)),
('bank_details_sepa_name', forms.CharField(
label=_('Name of account holder'),
widget=forms.TextInput(
attrs={
'data-display-dependency': '#id_payment_banktransfer_bank_details_type_0',
'data-required-if': '#id_payment_banktransfer_bank_details_type_0'
}
),
required=False
)),
('bank_details_sepa_iban', IBANFormField(
label=_('IBAN'),
required=False,
widget=forms.TextInput(
attrs={
'data-display-dependency': '#id_payment_banktransfer_bank_details_type_0',
'data-required-if': '#id_payment_banktransfer_bank_details_type_0'
}
),
)),
('bank_details_sepa_bic', BICFormField(
label=_('BIC'),
widget=forms.TextInput(
attrs={
'data-display-dependency': '#id_payment_banktransfer_bank_details_type_0',
'data-required-if': '#id_payment_banktransfer_bank_details_type_0'
}
),
required=False
)),
('bank_details_sepa_bank', forms.CharField(
label=_('Name of bank'),
widget=forms.TextInput(
attrs={
'data-display-dependency': '#id_payment_banktransfer_bank_details_type_0',
'data-required-if': '#id_payment_banktransfer_bank_details_type_0'
}
),
required=False
)),
('bank_details', I18nFormField(
label=_('Bank account details'),
widget=I18nTextarea,
help_text=_(
'Include everything else that your customers might need to send you a bank transfer payment. '
'If you have lots of international customers, they might need your full address and your '
'bank\'s full address.'),
widget_kwargs={'attrs': {
'rows': '4',
'placeholder': _(
'For SEPA accounts, you can leave this empty. Otherwise, please add everything that '
'your customers need to transfer the money, e.g. account numbers, routing numbers, '
'addresses, etc.'
),
}},
required=False
)),
('invoice_immediately',
forms.BooleanField(
label=_('Create an invoice for orders using bank transfer immediately if the event is otherwise '
'configured to create invoices after payment is completed.'),
required=False,
)),
('public_name', I18nFormField(
label=_('Payment method name'),
widget=I18nTextInput,
required=False
)),
('omit_hyphen', forms.BooleanField(
label=_('Do not include hyphens in the payment reference.'),
help_text=_('This is required in some countries.'),
required=False
)),
('include_invoice_number', forms.BooleanField(
label=_('Include invoice number in the payment reference.'),
required=False
)),
('prefix', forms.CharField(
label=_('Prefix for the payment reference'),
required=False,
)),
('pending_description', I18nFormField(
label=_('Additional text to show on pending orders'),
help_text=_('This text will be shown on the order confirmation page for pending orders in addition to '
'the standard text.'),
widget=I18nTextarea,
required=False,
)),
('refund_iban_blocklist', forms.CharField(
label=_('IBAN blocklist for refunds'),
required=False,
widget=forms.Textarea,
help_text=_('Put one IBAN or IBAN prefix per line. The system will not attempt to send refunds to any '
'of these IBANs. Useful e.g. if you receive a lot of "forwarded payments" by a third-party payment '
'provider. You can also list country codes such as "GB" if you never want to send refunds to '
'IBANs from a specific country.')
)),
])
@property
def public_name(self):
return str(self.settings.get('public_name', as_type=LazyI18nString) or self.verbose_name)
@property
def test_mode_message(self):
return _('In test mode, you can just manually mark this order as paid in the backend after it has been '
'created.')
@property
def requires_invoice_immediately(self):
return self.settings.get('invoice_immediately', False, as_type=bool)
@property
def settings_form_fields(self):
d = OrderedDict(list(super().settings_form_fields.items()) + list(BankTransfer.form_fields().items()))
d.move_to_end('bank_details', last=False)
d.move_to_end('bank_details_sepa_bank', last=False)
d.move_to_end('bank_details_sepa_bic', last=False)
d.move_to_end('bank_details_sepa_iban', last=False)
d.move_to_end('bank_details_sepa_name', last=False)
d.move_to_end('bank_details_type', last=False)
d.move_to_end('ack', last=False)
d.move_to_end('_enabled', last=False)
return d
def settings_form_clean(self, cleaned_data):
if cleaned_data.get('payment_banktransfer_bank_details_type') == 'sepa':
for f in ('bank_details_sepa_name', 'bank_details_sepa_bank', 'bank_details_sepa_bic', 'bank_details_sepa_iban'):
if not cleaned_data.get('payment_banktransfer_%s' % f):
raise ValidationError(
{'payment_banktransfer_%s' % f: _('Please fill out your bank account details.')})
else:
if not cleaned_data.get('payment_banktransfer_bank_details'):
raise ValidationError(
{'payment_banktransfer_bank_details': _('Please enter your bank account details.')})
return cleaned_data
def payment_form_render(self, request, total=None, order=None) -> str:
template = get_template('pretixplugins/banktransfer/checkout_payment_form.html')
ctx = {
'request': request,
'event': self.event,
'settings': self.settings,
'code': self._code(order) if order else None,
'details': self.settings.get('bank_details', as_type=LazyI18nString),
}
return template.render(ctx)
def checkout_prepare(self, request, total):
return True
def payment_prepare(self, request: HttpRequest, payment: OrderPayment):
return True
def payment_is_valid_session(self, request):
return True
def checkout_confirm_render(self, request, order=None):
return self.payment_form_render(request, order=order)
def order_pending_mail_render(self, order, payment) -> str:
template = get_template('pretixplugins/banktransfer/email/order_pending.txt')
bankdetails = []
if self.settings.get('bank_details_type') == 'sepa':
bankdetails += [
_("Account holder"), ": ", self.settings.get('bank_details_sepa_name'), "\n",
_("IBAN"), ": ", ibanformat(self.settings.get('bank_details_sepa_iban')), "\n",
_("BIC"), ": ", self.settings.get('bank_details_sepa_bic'), "\n",
_("Bank"), ": ", self.settings.get('bank_details_sepa_bank'),
]
if bankdetails and self.settings.get('bank_details', as_type=LazyI18nString):
bankdetails.append("\n")
bankdetails.append(self.settings.get('bank_details', as_type=LazyI18nString))
ctx = {
'event': self.event,
'order': order,
'code': self._code(order),
'amount': payment.amount,
'details': textwrap.indent(''.join(str(i) for i in bankdetails), ' '),
}
return template.render(ctx)
def payment_pending_render(self, request: HttpRequest, payment: OrderPayment):
template = get_template('pretixplugins/banktransfer/pending.html')
ctx = {
'event': self.event,
'code': self._code(payment.order),
'order': payment.order,
'amount': payment.amount,
'settings': self.settings,
'pending_description': self.settings.get('pending_description', as_type=LazyI18nString),
'details': self.settings.get('bank_details', as_type=LazyI18nString),
}
return template.render(ctx)
def payment_control_render(self, request: HttpRequest, payment: OrderPayment) -> str:
warning = None
if not self.payment_refund_supported(payment):
warning = _("Invalid IBAN/BIC")
return self._render_control_info(request, payment.order, payment.info_data, warning=warning)
def _render_control_info(self, request, order, info_data, **extra_context):
template = get_template('pretixplugins/banktransfer/control.html')
ctx = {'request': request, 'event': self.event,
'code': self._code(order),
'payment_info': info_data, 'order': order,
**extra_context}
return template.render(ctx)
def _code(self, order):
prefix = self.settings.get('prefix', default='')
li = order.invoices.last()
invoice_number = li.number if self.settings.get('include_invoice_number', as_type=bool) and li else ''
code = " ".join((prefix, order.full_code, invoice_number)).strip(" ")
if self.settings.get('omit_hyphen', as_type=bool):
code = code.replace('-', '')
return code
def shred_payment_info(self, obj):
if not obj.info_data:
return
d = obj.info_data
d['reference'] = '█'
d['payer'] = '█'
d['_shredded'] = True
obj.info = json.dumps(d)
obj.save(update_fields=['info'])
@staticmethod
def norm(s):
return s.strip().upper().replace(" ", "")
def payment_refund_supported(self, payment: OrderPayment) -> bool:
if not all(payment.info_data.get(key) for key in ("payer", "iban")):
return False
try:
iban = self.norm(payment.info_data['iban'])
IBANValidator()(iban)
except ValidationError:
return False
else:
return not any(iban.startswith(b) for b in (self.settings.refund_iban_blocklist or '').splitlines() if b)
def payment_partial_refund_supported(self, payment: OrderPayment) -> bool:
return self.payment_refund_supported(payment)
def payment_control_render_short(self, payment: OrderPayment) -> str:
pi = payment.info_data or {}
r = pi.get('payer', '')
if pi.get('iban'):
if r:
r += ' / '
r += pi.get('iban')
if pi.get('bic'):
if r:
r += ' / '
r += pi.get('bic')
return r
def payment_presale_render(self, payment: OrderPayment) -> str:
pi = payment.info_data or {}
if self.payment_refund_supported(payment):
try:
iban = self.norm(pi['iban'])
return gettext('Bank account {iban}').format(
iban=iban[0:2] + '****' + iban[-4:]
)
except:
pass
return super().payment_presale_render(payment)
def execute_refund(self, refund: OrderRefund):
"""
We just keep a created refund object. It will be marked as done using the control view
for bank transfer refunds.
"""
if refund.info_data.get('iban'):
return # | |
<gh_stars>0
# Author: <NAME>, <NAME>
# Last Updated: 5/2/2005
#
# This tutorial shows how to detect and respond to collisions. It uses solids
# create in code and the egg files, how to set up collision masks, a traverser,
# and a handler, how to detect collisions, and how to dispatch function based
# on the collisions. All of this is put together to simulate a labyrinth-style
# game
import direct.directbase.DirectStart
from panda3d.core import CollisionTraverser,CollisionNode
from panda3d.core import CollisionHandlerQueue,CollisionRay
from panda3d.core import Material,LRotationf,NodePath
from panda3d.core import AmbientLight,DirectionalLight
from panda3d.core import TextNode
from panda3d.core import Vec3,Vec4,BitMask32
from direct.gui.OnscreenText import OnscreenText
from direct.showbase.DirectObject import DirectObject
from direct.interval.MetaInterval import Sequence,Parallel
from direct.interval.LerpInterval import LerpFunc
from direct.interval.FunctionInterval import Func,Wait
from direct.task.Task import Task
import sys
# Some constants for the program
ACCEL = 70 # Acceleration in ft/sec/sec
MAX_SPEED = 5 # Max speed in ft/sec
MAX_SPEED_SQ = MAX_SPEED ** 2 # Squared to make it easier to use lengthSquared
# Instead of length
UP = Vec3(0,0,1) # We need this vector a lot, so its better to just have one
# instead of creating a new one every time we need it
class World(DirectObject):
def __init__(self):
# This code puts the standard title and instruction text on screen
self.title = OnscreenText(text="Panda3D: Tutorial - Collision Detection",
style=1, fg=(1,1,1,1),
pos=(0.7,-0.95), scale = .07)
self.instructions = OnscreenText(text="Mouse pointer tilts the board",
pos = (-1.3, .95), fg=(1,1,1,1),
align = TextNode.ALeft, scale = .05)
self.accept("escape", sys.exit) # Escape quits
base.disableMouse() # Disable mouse-based camera control
camera.setPosHpr(0, 0, 25, 0, -90, 0) # Place the camera
# Load the maze and place it in the scene
self.maze = loader.loadModel("models/maze")
self.maze.reparentTo(render)
# Most times, you want collisions to be tested against invisible geometry
# rather than every polygon. This is because testing against every polygon
# in the scene is usually too slow. You can have simplified or approximate
# geometry for the solids and still get good results.
#
# Sometimes you'll want to create and position your own collision solids in
# code, but it's often easier to have them built automatically. This can be
# done by adding special tags into an egg file. Check maze.egg and ball.egg
# and look for lines starting with <Collide>. The part is brackets tells
# Panda exactly what to do. Polyset means to use the polygons in that group
# as solids, while Sphere tells panda to make a collision sphere around them
# Keep means to keep the polygons in the group as visable geometry (good
# for the ball, not for the triggers), and descend means to make sure that
# the settings are applied to any subgroups.
#
# Once we have the collision tags in the models, we can get to them using
# NodePath's find command
# Find the collision node named wall_collide
self.walls = self.maze.find("**/wall_collide")
# Collision objects are sorted using BitMasks. BitMasks are ordinary numbers
# with extra methods for working with them as binary bits. Every collision
# solid has both a from mask and an into mask. Before Panda tests two
# objects, it checks to make sure that the from and into collision masks
# have at least one bit in common. That way things that shouldn't interact
# won't. Normal model nodes have collision masks as well. By default they
# are set to bit 20. If you want to collide against actual visable polygons,
# set a from collide mask to include bit 20
#
# For this example, we will make everything we want the ball to collide with
# include bit 0
self.walls.node().setIntoCollideMask(BitMask32.bit(0))
# CollisionNodes are usually invisible but can be shown. Uncomment the next
# line to see the collision walls
# self.walls.show()
# We will now find the triggers for the holes and set their masks to 0 as
# well. We also set their names to make them easier to identify during
# collisions
self.loseTriggers = []
for i in range(6):
trigger = self.maze.find("**/hole_collide" + str(i))
trigger.node().setIntoCollideMask(BitMask32.bit(0))
trigger.node().setName("loseTrigger")
self.loseTriggers.append(trigger)
# Uncomment this line to see the triggers
# trigger.show()
# Ground_collide is a single polygon on the same plane as the ground in the
# maze. We will use a ray to collide with it so that we will know exactly
# what height to put the ball at every frame. Since this is not something
# that we want the ball itself to collide with, it has a different
# bitmask.
self.mazeGround = self.maze.find("**/ground_collide")
self.mazeGround.node().setIntoCollideMask(BitMask32.bit(1))
# Load the ball and attach it to the scene
# It is on a root dummy node so that we can rotate the ball itself without
# rotating the ray that will be attached to it
self.ballRoot = render.attachNewNode("ballRoot")
self.ball = loader.loadModel("models/ball")
self.ball.reparentTo(self.ballRoot)
# Find the collison sphere for the ball which was created in the egg file
# Notice that it has a from collision mask of bit 0, and an into collison
# mask of no bits. This means that the ball can only cause collisions, not
# be collided into
self.ballSphere = self.ball.find("**/ball")
self.ballSphere.node().setFromCollideMask(BitMask32.bit(0))
self.ballSphere.node().setIntoCollideMask(BitMask32.allOff())
# No we create a ray to start above the ball and cast down. This is to
# Determine the height the ball should be at and the angle the floor is
# tilting. We could have used the sphere around the ball itself, but it
# would not be as reliable
self.ballGroundRay = CollisionRay() # Create the ray
self.ballGroundRay.setOrigin(0,0,10) # Set its origin
self.ballGroundRay.setDirection(0,0,-1) # And its direction
# Collision solids go in CollisionNode
self.ballGroundCol = CollisionNode('groundRay') # Create and name the node
self.ballGroundCol.addSolid(self.ballGroundRay) # Add the ray
self.ballGroundCol.setFromCollideMask(BitMask32.bit(1)) # Set its bitmasks
self.ballGroundCol.setIntoCollideMask(BitMask32.allOff())
# Attach the node to the ballRoot so that the ray is relative to the ball
# (it will always be 10 feet over the ball and point down)
self.ballGroundColNp = self.ballRoot.attachNewNode(self.ballGroundCol)
# Uncomment this line to see the ray
# self.ballGroundColNp.show()
# Finally, we create a CollisionTraverser. CollisionTraversers are what
# do the job of calculating collisions
self.cTrav = CollisionTraverser()
# Collision traverservs tell collision handlers about collisions, and then
# the handler decides what to do with the information. We are using a
# CollisionHandlerQueue, which simply creates a list of all of the
# collisions in a given pass. There are more sophisticated handlers like
# one that sends events and another that tries to keep collided objects
# apart, but the results are often better with a simple queue
self.cHandler = CollisionHandlerQueue()
# Now we add the collision nodes that can create a collision to the
# traverser. The traverser will compare these to all others nodes in the
# scene. There is a limit of 32 CollisionNodes per traverser
# We add the collider, and the handler to use as a pair
self.cTrav.addCollider(self.ballSphere, self.cHandler)
self.cTrav.addCollider(self.ballGroundColNp, self.cHandler)
# Collision traversers have a built in tool to help visualize collisions.
# Uncomment the next line to see it.
# self.cTrav.showCollisions(render)
# This section deals with lighting for the ball. Only the ball was lit
# because the maze has static lighting pregenerated by the modeler
ambientLight = AmbientLight("ambientLight")
ambientLight.setColor(Vec4(.55, .55, .55, 1))
directionalLight = DirectionalLight("directionalLight")
directionalLight.setDirection(Vec3(0, 0, -1))
directionalLight.setColor(Vec4(0.375, 0.375, 0.375, 1))
directionalLight.setSpecularColor(Vec4(1, 1, 1, 1))
self.ballRoot.setLight(render.attachNewNode(ambientLight))
self.ballRoot.setLight(render.attachNewNode(directionalLight))
# This section deals with adding a specular highlight to the ball to make
# it look shiny
m = Material()
m.setSpecular(Vec4(1,1,1,1))
m.setShininess(96)
self.ball.setMaterial(m, 1)
# Finally, we call start for more initialization
self.start()
def start(self):
# The maze model also has a locator in it for where to start the ball
# To access it we use the find command
startPos = self.maze.find("**/start").getPos()
self.ballRoot.setPos(startPos) # Set the ball in the starting position
self.ballV = Vec3(0,0,0) # Initial velocity is 0
self.accelV = Vec3(0,0,0) # Initial acceleration is 0
# For a traverser to actually do collisions, you need to | |
number *)
# locate begin
# scan begin
# function
# | NUM (m, n) -> Some (Num (m, n))
# | _ -> None
# end
# end ;
#
# locate (kwd "TRUE" <!> Internal B.TRUE) ;
# locate (kwd "FALSE" <!> Internal B.FALSE) ;
# locate (kwd "BOOLEAN" <!> Internal B.BOOLEAN) ;
# locate (kwd "STRING" <!> Internal B.STRING) ;
#
# (* locate (punct "@" <!> At) ; *)
# ]
# end
def reduced_expr(b):
def choices():
# '(' expr ')'
def f():
return (
intf.punct("(")
<< second
>> use(expr(b))
<< first
>> intf.punct(")")
<< apply
>> (lambda e: tla_ast.Parens(e, tla_ast.Syntax()))
)
yield ((tokens.PUNCT("("),), f)
# STR
yield intf.locate(intf.scan(string_scan))
# NUM
yield intf.locate(intf.scan(number_scan))
# 'TRUE'
yield intf.locate(
intf.kwd("TRUE") << bang >> tla_ast.TRUE() # tla_ast.Internal(
)
# 'FALSE'
yield intf.locate(
intf.kwd("FALSE") << bang >> tla_ast.FALSE() # tla_ast.Internal(
)
# 'BOOLEAN'
yield intf.locate(
intf.kwd("BOOLEAN") << bang >> tla_ast.BOOLEAN() # tla_ast.Internal(
)
# 'STRING'
yield intf.locate(
intf.kwd("STRING") << bang >> tla_ast.STRING() # tla_ast.Internal(
)
while True:
yield choice_iter(choices)
def apply_sub_expr(prs):
id, sr = prs
e = tla_ast.Opaque(id)
if sr is None:
return e
assert sr is not None
return tla_ast.Bang(e, sr)
# and sub_expr b = lazy begin
# choice [
# locate begin
# hint <*> optional (use (subref b))
# end <$> begin
# fun prs ->
# let (id, sr) = prs.core in
# let e = Opaque id.core @@ id in
# match sr with
# | None -> e
# | Some sr -> Bang (e, sr) @@ prs
# end ;
#
# use (atomic_expr b) ;
# ]
# end
def sub_expr(b):
while True:
yield choice(
[
intf.locate(hint() << times >> optional(use(subref(b))))
<< apply
>> apply_sub_expr,
# causes infinite recursion
use(atomic_expr(b))
# use(expr(b))
]
)
# and bull_at bull where =
# P.scan begin
# fun t ->
# let module Loc = Loc in
# if t.form = OP bull && Loc.column t.loc.start = where
# then Some ()
# else None
# end
def bull_at(bull, where):
def f(t):
if (
isinstance(t.form, tokens.OP)
and t.form.string == bull
and t.loc.start.column == where
):
return tuple()
else:
return None
return pco.scan(f)
# and bulleted_list b = lazy begin
# lookahead (scan begin
# function
# | OP "/\\" -> Some "/\\"
# | OP "\\/" -> Some "\\/"
# | _ -> None
# end)
# >>+ fun bt loc ->
# get >>= fun px ->
# let module Loc = Loc in
# let bwhere = Loc.column loc.start in
# let newledge = { px with ledge = Loc.column loc.start + 1 } in
# star1 (bull_at bt bwhere >>> using newledge (use (expr b)))
# <$> (fun es -> match bt with
# | "\\/" -> List (Or, es)
# | _ -> List (And, es))
# end
def bulleted_list(b):
def f(op):
if isinstance(op, tokens.OP) and op.string == "/\\":
return "/\\"
elif isinstance(op, tokens.OP) and op.string == "\\/":
return "\\/"
else:
return None
def g(bt, loc, px):
bwhere = loc.start.column
newledge = intf.Pcx(bwhere + 1, px.clean)
return (
star1(bull_at(bt, bwhere) << second >> using(newledge, use(expr(b))))
<< apply
>> (
lambda es: tla_ast.List(tla_ast.Or(), es)
if bt == "\\/"
else tla_ast.List(tla_ast.And(), es)
)
)
while True:
yield lookahead(intf.scan(f)) << shift_plus >> (
lambda bt, loc: get() << shift_eq >> functools.partial(g, bt, loc)
)
# and operator b = lazy begin
# choice [
# locate begin
# kwd "LAMBDA" >*> sep1 (punct ",") hint
# <**> (punct ":" >>> use (expr b))
# <$> (fun (vs, e) -> Lambda (
# List.map (fun v -> (v, Shape_expr)) vs,
# e))
# end ;
#
# locate begin
# choice [
# anyident ;
# scan begin
# function
# | ST (`Num n, l, 0) -> Some (Printf.sprintf "<%d>%s" n l)
# | _ -> None
# end ;
# ] <$> (fun v -> Opaque v)
# end ;
#
# punct "(" >>> use (operator b) <<< punct ")" ;
# ]
# end
def operator(b):
# <$> (fun (vs, e) -> Lambda (
# List.map (fun v -> (v, Shape_expr)) vs,
# e))
def apply_lambda(vs_e):
vs, e = vs_e
hint_shapes = [(v, tla_ast.ShapeExpr()) for v in vs]
return tla_ast.Lambda(hint_shapes, e)
def scan_step():
def f(form):
if isinstance(form, tokens.ST) and isinstance(form.kind, tokens.StepNum):
n = form.kind.value
m = form.string
return "<" + n + ">" + m
else:
return None
return intf.scan(f)
def choices():
# locate begin
# kwd "LAMBDA" >*> sep1 (punct ",") hint
# <**> (punct ":" >>> use (expr b))
# <$> (fun (vs, e) -> Lambda (
# List.map (fun v -> (v, Shape_expr)) vs,
# e))
# end ;
# RULE: 'LAMBDA' hint (, hint)* ':' expr
def f():
return intf.locate(
intf.kwd("LAMBDA")
<< second_commit
>> sep1(intf.punct(","), hint())
<< times2
>> (intf.punct(":") << second >> use(expr(b)))
<< apply
>> apply_lambda
)
yield ((tokens.KWD("LAMBDA"),), f)
# locate begin
# choice [
# anyident ;
# scan begin
# function
# | ST (`Num n, l, 0) ->
# Some (Printf.sprintf "<%d>%s" n l)
# | _ -> None
# end ;
# ] <$> (fun v -> Opaque v)
# end ;
# RULE: [anyident | ST ]
def f():
return intf.locate(
choice([intf.anyident(), scan_step()])
<< apply
>> (lambda v: tla_ast.Opaque(v))
)
yield (
(
tokens.ID,
tokens.ST,
),
f,
)
# punct "(" >>> use (operator b) <<< punct ")" ;
# RULE: '(' operator ')'
def f():
return (
intf.punct("(")
<< second
>> use(operator(b))
<< first
>> intf.punct(")")
)
yield ((tokens.PUNCT("("),), f)
while True:
yield choice_iter(choices)
# fun bss ->
# let vss = List.map begin
# fun (vs, dom) -> match dom with
# | None ->
# List.map (fun v -> (v, Constant, No_domain)) vs
# | Some dom ->
# (List.hd vs, Constant, Domain dom)
# :: List.map (fun v -> (v, Constant, Ditto)) (List.tl vs)
# end bss in
# List.concat vss
def apply_bounds(bss):
def f(vs_dom):
vs, dom = vs_dom
if dom is None:
return [(v, tla_ast.Constant(), tla_ast.NoDomain()) for v in vs]
else:
return [(vs[0], tla_ast.Constant(), tla_ast.Domain(dom))] + [
(v, tla_ast.Constant(), tla_ast.Ditto()) for v in vs[1:]
]
vss = [f(vs_dom) for vs_dom in bss]
flat = [i for j in vss for i in j]
return flat
# and bounds b = lazy begin
# sep1 (punct ",") (
# sep1 (punct ",") hint
# <*> optional (infix "\\in" >*> use (expr b))
# )
# <$> begin
# fun bss ->
# let vss = List.map begin
# fun (vs, dom) -> match dom with
# | None ->
# List.map (fun v -> (v, Constant, No_domain)) vs
# | Some dom ->
# (List.hd vs, Constant, Domain dom)
# :: List.map (fun v -> (v, Constant, Ditto)) (List.tl vs)
# end bss in
# List.concat vss
# end
# end
def bounds(b):
# RULE: hint (',' hint)* '\\in' expr
# (hint (',' hint)* '\\in' expr)*
while True:
yield sep1(
intf.punct(","),
sep1(intf.punct(","), hint())
<< times2
>> optional(intf.infix("\\in") << second_commit >> use(expr(b))),
) << apply >> apply_bounds
def apply_boundeds(bss):
def f(vs_dom):
vs, dom = vs_dom
return [(vs[0], tla_ast.Constant(), tla_ast.Domain(dom))] + [
(v, tla_ast.Constant(), tla_ast.Ditto()) for v in vs[1:]
]
vss = [f(vs_dom) for vs_dom in bss]
return [i for j in vss for i in j]
# and boundeds b = lazy begin
# sep1 (punct ",") (
# sep1 (punct ",") hint <*> (infix "\\in" >*> use (expr b))
# )
# <$> begin
# fun bss ->
# let vss = List.map begin
# fun (vs, dom) ->
# (List.hd vs, Constant, Domain dom)
# :: List.map (fun v -> (v, Constant, Ditto)) (List.tl vs)
# end bss in
# List.concat vss
# end
# end
def boundeds(b):
while True:
yield sep1(
# hint [, hint]* \in expr [, hint [, hint]*]*
intf.punct(","),
sep1(intf.punct(","), hint())
<< times
>> (intf.infix("\\in") << second_commit >> use(expr(b))),
) << apply >> apply_boundeds
# (* pragmas *)
#
# and float =
# number <$> (fun (m, n) ->
# float_of_string (Printf.sprintf "%s.%s0" m n))
def float_():
while True:
yield (
intf.number()
<< apply
>> (lambda m_n: f"{m_n[0]}.{m_n[1]}" if m_n[1] == "None" else f"{m_n[0]}")
)
# and read_method_by = lazy begin
# ident "by" >>> use read_method_args <$> (fun l -> l)
# end
def read_method_by():
while True:
yield (intf.ident("by") << second >> use(read_method_args()))
# (* The "set" syntax has been deprecated. *)
# and read_method_set = lazy begin
# ident "set" >>> use read_method_args <$> (fun l -> l)
# end
#
# and read_new_method = lazy begin
# pragma (star (choice [use read_method_by; use read_method_set]))
# end
def read_new_method():
while True:
yield intf.pragma(star(use(read_method_by())))
# and read_method_args = lazy begin
# | |
import argparse
import pandas
def mean_std_table(
datasets,
dataset_labels,
metrics,
metric_labels,
model_order,
model_labels,
all_data,
output_file):
# set output_file
output_file = open(output_file, "w")
# stats
stats = ["Mean", "Median", "Peak"]
# keep relevant columns
all_data = all_data[["model", "dataset", metrics[0], metrics[1]]]
# write table headers
output_file.write("\\begin{table}[h!]\n")
output_file.write(" \\begin{center}\n")
output_file.write(" \\begin{tabular}{\n")
output_file.write(" l\n")
output_file.write(" p{1cm}\n")
output_file.write(" c@{\hspace{0.5cm}}\n")
output_file.write(" c@{\hspace{0.5cm}}\n")
output_file.write(" c@{\hspace{0.5cm}}\n")
output_file.write(" c@{\hspace{0.5cm}}\n")
output_file.write(" c\n")
output_file.write(" }\n")
output_file.write(" \\toprule\n")
output_file.write(" & & {")
for model in model_order:
if model == model_order[-1]:
output_file.write(model_labels[model] + "} \\\\ \midrule\n")
else:
output_file.write(model_labels[model] + "} & {")
# get data
mean = all_data.groupby(["model", "dataset"], as_index=False).mean()
std = all_data.groupby(["model", "dataset"]).std().reset_index()
# populate table
for dataset in range(len(dataset_labels)):
output_file.write(" \\multirow{5}*{\\begin{turn}{90}\\emph{" + dataset_labels[dataset] + "}\\end{turn}}\n")
output_file.write(" \\\\[0.2cm]\n")
for metric in range(len(metrics)):
output_file.write(" & " + metric_labels[metric] + " & ")
for model in model_order:
entry_mean = mean.loc[(mean["model"] == model) & (mean["dataset"] == datasets[dataset])]
entry_std = std.loc[(std["model"] == model) & (std["dataset"] == datasets[dataset])]
output_file.write("{:.1f}$\\pm${:.1f}".format(
round(entry_mean[metrics[metric]].item()*100, 1),
round(entry_std[metrics[metric]].item()*100, 1))
)
if model == model_order[-1]:
output_file.write(" \\\\")
if metrics[metric] == metrics[-1]:
if datasets[dataset] == datasets[-1]:
output_file.write("\n \\\\[0.2cm] \n \\bottomrule\n")
else:
output_file.write("\n \\\\[0.2cm] \n \\midrule\n")
else:
output_file.write("\n")
else:
output_file.write(" & ")
# write end of table
output_file.write(" \\end{tabular}\n")
output_file.write(" \\end{center}\n")
output_file.write(" \\caption{Mean and standard deviation of the performance on validation data of the best performing models.}\n")
output_file.write("\\end{table}\n\n")
def hyperparameters_table(
datasets,
dataset_labels,
model_order,
model_labels,
hyperparameters,
hyperparameter_labels,
metric,
metric_label,
all_trials,
best_trials,
output_file,
appendix,
table_label,
bayes):
# take either all Bayes trials, or none
if bayes:
all_trials = all_trials.loc[all_trials['folder'].str.contains("-bo")]
else:
all_trials = all_trials.loc[~all_trials['folder'].str.contains("-bo")]
# set printable values of some hyperparameters
# floats with printable will not be treated as float, meaning it will show a delta
print_values = {}
print_values["train_type"] = {
"negative_sampling": "NegSamp",
"1vsAll": "1vsAll",
"KvsAll": "KvsAll",
}
print_values["reciprocal"] = {
0: "No",
1: "Yes"
}
print_values["emb_regularize_p"] = {
0.0: "None",
1.0: "L1",
2.0: "L2",
3.0: "L3"
}
print_values["emb_regularize_weighted"] = {
0: "No",
1: "Yes",
}
print_values["transe_l_norm"] = {
1: "L1",
2: "L2",
}
print_values["transe_normalize_e"] = {
-1.0: "No",
1.0: "L1",
2.0: "L2",
}
print_values["transe_normalize_r"] = {
-1.0: "No",
1.0: "L1",
2.0: "L2",
}
print_values["emb_initialize"] = {
"normal_": "Normal",
"uniform_": "Unif.",
"xavier_normal_": "XvNorm",
"xavier_uniform_": "XvUnif"
}
print_values["train_loss"] = {
"kl": "CE",
"bce": "BCE",
"margin_ranking": "MR"
}
# set rounding for floats (defaults to 2 if not determined here)
# Not a pretty solution, couldn't quickly parameterize {:.2f}
round_5 = ["train_lr", "emb_initialize_normal_std"]
round_0 = ["num_negs_s", "num_negs_o"]
scientific = ["emb_e_regularize_weight", "emb_r_regularize_weight"]
# set compabitility between hyperparameters (determines when hyperparameter should be printed)
# {attribute_1: (attribute_2, [list of values])}
# Show attribute_1 iff value of attribute_2 is in list of values
compatibility = {
"num_negs_s":("train_type", ["negative_sampling"]),
"num_negs_o":("train_type", ["negative_sampling"]),
"label_smoothing":("train_type", ["KvsAll"]),
"margin":("train_loss", ["margin_ranking"]),
"transe_l_norm":("model", ["transe"]),
"transe_normalize_e":("model", ["transe"]),
"transe_normalize_r":("model", ["transe"]),
"conve_projection_dropout":("model", ["conve"]),
"conve_feature_map_dropout":("model", ["conve"]),
"emb_initialize_normal_std":("emb_initialize", ["normal_"]),
"emb_initialize_uniform_interval":("emb_initialize", ["uniform_"]),
"emb_e_regularize_weight":("emb_regularize_p", [1, 2, 3]),
"emb_r_regularize_weight":("emb_regularize_p", [1, 2, 3])
}
# set hyperparameters on the far left if table from appendix
far_left_params = [
"emb_e_dim",
"train_type",
"train_loss",
"train_optimizer",
"emb_regularize_p",
"emb_initialize",
]
# set hyperparameters that trigger a multicolumn row before them
multicol_params = {
"emb_e_dropout":"Dropout",
"transe_normalize_e":"Embedding normalization (TransE)"
}
# open output_file
output_file = open(output_file, "w")
# write table headers
if appendix and not bayes:
output_file.write("\\begin{sidewaystable}[h!]\n")
else:
output_file.write("\\begin{table}[t]\n")
output_file.write(" \\begin{center}\n")
output_file.write(" \\begin{tabular}{\n")
if appendix:
if not bayes:
output_file.write(" l@{\hspace{0.2cm}}\n")
output_file.write(" l@{\hspace{-0.2cm}}\n")
output_file.write(" r@{\hspace{0.2cm}}\n")
output_file.write(" c@{\hspace{0.2cm}}\n")
output_file.write(" r@{\hspace{0.2cm}}\n")
output_file.write(" c@{\hspace{0.2cm}}\n")
output_file.write(" r@{\hspace{0.2cm}}\n")
output_file.write(" c@{\hspace{0.2cm}}\n")
output_file.write(" r@{\hspace{0.2cm}}\n")
output_file.write(" c@{\hspace{0.2cm}}\n")
output_file.write(" r@{\hspace{0.2cm}}\n")
output_file.write(" c")
else:
output_file.write(" l@{\hspace{0.2cm}}\n")
output_file.write(" l@{\hspace{-0.2cm}}\n")
output_file.write(" r@{\hspace{0.1cm}}\n")
output_file.write(" c@{\hspace{0.1cm}}\n")
output_file.write(" r@{\hspace{0.1cm}}\n")
output_file.write(" c@{\hspace{0.1cm}}\n")
output_file.write(" r@{\hspace{0.1cm}}\n")
output_file.write(" c@{\hspace{0.1cm}}\n")
output_file.write(" r@{\hspace{0.1cm}}\n")
output_file.write(" c@{\hspace{0.1cm}}\n")
output_file.write(" r@{\hspace{0.1cm}}\n")
output_file.write(" c")
else:
output_file.write(" l@{\hspace{0.2cm}}\n")
output_file.write(" l@{\hspace{-0.05cm}}\n")
output_file.write(" r@{\hspace{0.1cm}}\n")
output_file.write(" c@{\hspace{0.2cm}}\n")
output_file.write(" r@{\hspace{0.1cm}}\n")
output_file.write(" c@{\hspace{0.2cm}}\n")
output_file.write(" r@{\hspace{0.1cm}}\n")
output_file.write(" c@{\hspace{0.2cm}}\n")
output_file.write(" r@{\hspace{0.1cm}}\n")
output_file.write(" c@{\hspace{0.2cm}}\n")
output_file.write(" r@{\hspace{0.1cm}}\n")
output_file.write(" c\n")
output_file.write("}\n")
output_file.write(" \\toprule\n")
if not bayes:
output_file.write("& & \multicolumn{2}{c}{")
else:
output_file.write("& & \multicolumn{1}{c}{")
for model in model_order:
if model == model_order[-1]:
output_file.write(model_labels[model] + "} \\\\ \\midrule\n")
else:
if not bayes:
output_file.write(model_labels[model] + "} & \multicolumn{2}{c}{")
else:
output_file.write(model_labels[model] + "} & & \multicolumn{1}{c}{")
# populate table
model_means = {}
for dataset in range(len(dataset_labels)):
if not bayes:
output_file.write(" \\multirow{" + str((len(hyperparameters) + len(multicol_params) + 1)) + "}*{\\begin{turn}{90}\\emph{" + dataset_labels[dataset] + "}\\end{turn}}\n")
else:
output_file.write(" \\multirow{" + str((len(hyperparameters))) + "}*{\\begin{turn}{90}\\emph{" + dataset_labels[dataset] + "}\\end{turn}}\n")
# write mean MRR of each model
if not bayes:
mean = best_trials.groupby(["model", "dataset"], as_index=False).mean()
count = all_trials.groupby(["model", "dataset"], as_index=False).count()
output_file.write("& " + metric_label + " & & \\emph{")
for model in model_order:
entry_mean = mean.loc[(mean["model"] == model) & (mean["dataset"] == datasets[dataset])]
model_means[model] = round(entry_mean[metric].item()*100, 1)
output_file.write("{:.1f}".format(model_means[model]))
if model == model_order[-1]:
output_file.write("} \\\\\n")
else:
output_file.write("} & & \emph{")
# hyperparameter rows
for i in range(len(hyperparameters)):
hp = hyperparameters[i]
if bayes:
show_delta = False
else:
show_delta = True
# insert multicolum row if necessary
if appendix and hp in multicol_params and not bayes:
output_file.write("&\multicolumn{6}{l}{" + multicol_params[hp] + "} \\\\\n")
# hyperparameter name
if appendix and hp in far_left_params and not bayes:
output_file.write("& " + hyperparameter_labels[i] + " & ")
else:
output_file.write("& $\\quad$" + hyperparameter_labels[i] + " & ")
for model in model_order:
# get model trials
model_trials = all_trials.loc[
(all_trials['model'] == model) &
(all_trials['dataset'] == datasets[dataset])
]
# value
max_entry = model_trials.loc[model_trials[metric] == model_trials[metric].max()]
value = max_entry[hp].item()
# check compatibility (whether it should be printed or not)
compatible = True
if hp in compatibility and max_entry[compatibility[hp][0]].item() not in compatibility[hp][1]:
compatible = False
show_delta = False
if compatible:
if isinstance(value, float) and hp not in print_values:
show_delta = False
if hp == "emb_initialize_uniform_interval":
output_file.write("[{:.2f}, {:.2f}] & ".format(round(value, 2), round(value, 2) * -1))
else:
if hp in round_5:
output_file.write("{:.5f} & ".format(round(value, 5)))
elif hp in round_0:
output_file.write("{:.0f} & ".format(round(value, 0)))
elif hp in scientific:
output_file.write(scientific_notation(value) + " & ")
else:
output_file.write("{:.2f} & ".format(round(value, 2)))
else:
if hp in print_values:
printable_value = print_values[hp][max_entry[hp].item()]
else:
printable_value = value
output_file.write("{} & ".format(printable_value))
if show_delta:
output_file.write("\emph{")
# delta
if show_delta:
delta_trials = model_trials.loc[model_trials[hp] != value]
if len(delta_trials.index):
max_entry = delta_trials.loc[delta_trials[metric] == delta_trials[metric].max()]
delta = round((max_entry[metric].item() * 100) - model_means[model], 1)
output_file.write("({:.1f})".format(delta))
else:
output_file.write("--")
output_file.write("}")
else:
output_file.write("-- & ")
# close line
if model == model_order[-1]:
output_file.write(" \\\\")
if hp == hyperparameters[-1]:
if datasets[dataset] == datasets[-1]:
output_file.write("\n \\bottomrule\n")
else:
output_file.write("\n \\midrule\n")
else:
output_file.write("\n")
else:
output_file.write(" & ")
# write end of table
output_file.write(" \\end{tabular}\n")
output_file.write(" \\end{center}\n")
output_file.write(" \\caption{Insert caption here}\n")
output_file.write(" \\label{" + table_label + "}\n")
if appendix and not bayes:
output_file.write("\\end{sidewaystable}\n")
else:
output_file.write("\\end{table}\n")
def scientific_notation(number):
number_str = "{:.2E}".format(number).split("E")
return r"$" + number_str[0] + r"^{" + number_str[1] + "}$"
if __name__ == '__main__':
# parse args
parser = argparse.ArgumentParser()
parser.add_argument(
'--all_trials',
type=str,
required=True,
help="csvs of all trials created with dump command, comma separated")
parser.add_argument(
'--best_trials',
type=str,
required=True,
help="csvs of best trials created with dump command, comma separated")
args, _ = parser.parse_known_args()
# load input CSVs
csvs = []
for input_file in args.all_trials.split(","):
csvs.append(pandas.read_csv(input_file))
all_trials = pandas.concat(csvs)
csvs = []
for input_file in args.best_trials.split(","):
csvs.append(pandas.read_csv(input_file))
best_trials = pandas.concat(csvs)
# deal with empty string in emb_regularize_p
all_trials['emb_regularize_p'] = all_trials['emb_regularize_p'].fillna(0)
best_trials['emb_regularize_p'] = best_trials['emb_regularize_p'].fillna(0)
# change negative dropout to zero
all_trials['emb_e_dropout'] = all_trials['emb_e_dropout'].clip(lower=0)
best_trials['emb_e_dropout'] = best_trials['emb_e_dropout'].clip(lower=0)
all_trials['emb_r_dropout'] = all_trials['emb_r_dropout'].clip(lower=0)
best_trials['emb_r_dropout'] = best_trials['emb_r_dropout'].clip(lower=0)
# make sure it's only validation data
all_trials = all_trials.loc[all_trials["split"] == "valid"]
best_trials = best_trials.loc[best_trials["split"] == "valid"]
# order and label for models
model_order = ['rescal', 'transe', 'distmult', 'complex', 'conve']
model_labels = {'rescal': 'RESCAL', 'transe': 'TransE', 'distmult': 'DistMult', 'complex': 'ComplEx', 'conve': 'ConvE'}
# set datasets
datasets = ["fb15k-237", "wnrr"]
dataset_labels = ["FB15K-237", "WNRR"]
# set 2 metrics and corresponding attributes in CSVs
metrics = ["fwt_mrr", "fwt_hits@10"]
metric_labels = ["MRR", "Hits@10"]
# MEAN+STD TABLE IN APPENDIX
mean_std_table(
datasets,
dataset_labels,
metrics,
metric_labels,
model_order,
model_labels,
best_trials,
"table_mean_std.tex")
# HYPERPARAMETER TABLE IN MAIN PAPER
# set datasets
datasets = ["fb15k-237", "wnrr"]
dataset_labels = ["FB15K-237", "WNRR"]
# set hyperparameters and corresponding labels
hyperparameters = [
"emb_e_dim",
"train_batch_size",
"train_type",
"train_loss",
"train_optimizer",
"emb_initialize", | |
u0 {1,S}
7 C u0 {3,D}
""",
thermo = u'Cs-(Cds-Cds)CbCtCt',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 821,
label = "Cs-CbCbCtCds",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cb u0 {1,S}
3 Cb u0 {1,S}
4 Ct u0 {1,S}
5 [Cd,CO] u0 {1,S}
""",
thermo = u'Cs-(Cds-Cds)CbCbCt',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 822,
label = "Cs-(Cds-O2d)CbCbCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CO u0 {1,S} {6,D}
3 Cb u0 {1,S}
4 Cb u0 {1,S}
5 Ct u0 {1,S}
6 O2d u0 {2,D}
""",
thermo = u'Cs-(Cds-O2d)(Cds-Cds)(Cds-Cds)Ct',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 823,
label = "Cs-(Cds-Cd)CbCbCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cb u0 {1,S}
4 Cb u0 {1,S}
5 Ct u0 {1,S}
6 C u0 {2,D}
""",
thermo = u'Cs-(Cds-Cds)CbCbCt',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 824,
label = "Cs-(Cds-Cds)CbCbCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cb u0 {1,S}
4 Cb u0 {1,S}
5 Ct u0 {1,S}
6 Cd u0 {2,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)Ct',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 825,
label = "Cs-(Cds-Cdd)CbCbCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cb u0 {1,S}
4 Cb u0 {1,S}
5 Ct u0 {1,S}
6 Cdd u0 {2,D}
""",
thermo = u'Cs-(Cds-Cdd-Cd)CbCbCt',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 826,
label = "Cs-(Cds-Cdd-O2d)CbCbCt",
group =
"""
1 * Cs u0 {2,S} {4,S} {5,S} {6,S}
2 Cd u0 {1,S} {3,D}
3 Cdd u0 {2,D} {7,D}
4 Cb u0 {1,S}
5 Cb u0 {1,S}
6 Ct u0 {1,S}
7 O2d u0 {3,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-O2d)Ct',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)CbCbCt",
group =
"""
1 * Cs u0 {2,S} {4,S} {5,S} {6,S}
2 Cd u0 {1,S} {3,D}
3 Cdd u0 {2,D} {7,D}
4 Cb u0 {1,S}
5 Cb u0 {1,S}
6 Ct u0 {1,S}
7 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 827,
label = "Cs-(Cds-Cdd-Cd)CbCbCt",
group =
"""
1 * Cs u0 {2,S} {4,S} {5,S} {6,S}
2 Cd u0 {1,S} {3,D}
3 Cdd u0 {2,D} {7,D}
4 Cb u0 {1,S}
5 Cb u0 {1,S}
6 Ct u0 {1,S}
7 C u0 {3,D}
""",
thermo = u'Cs-(Cds-Cds)CbCbCt',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 828,
label = "Cs-CbCbCbCds",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cb u0 {1,S}
3 Cb u0 {1,S}
4 Cb u0 {1,S}
5 [Cd,CO] u0 {1,S}
""",
thermo = u'Cs-(Cds-Cds)CbCbCb',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 829,
label = "Cs-(Cds-O2d)CbCbCb",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CO u0 {1,S} {6,D}
3 Cb u0 {1,S}
4 Cb u0 {1,S}
5 Cb u0 {1,S}
6 O2d u0 {2,D}
""",
thermo = u'Cs-(Cds-O2d)(Cds-Cds)(Cds-Cds)(Cds-Cds)',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 830,
label = "Cs-(Cds-Cd)CbCbCb",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cb u0 {1,S}
4 Cb u0 {1,S}
5 Cb u0 {1,S}
6 C u0 {2,D}
""",
thermo = u'Cs-(Cds-Cds)CbCbCb',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 831,
label = "Cs-(Cds-Cds)CbCbCb",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cb u0 {1,S}
4 Cb u0 {1,S}
5 Cb u0 {1,S}
6 Cd u0 {2,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)(Cds-Cds)',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 832,
label = "Cs-(Cds-Cdd)CbCbCb",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cb u0 {1,S}
4 Cb u0 {1,S}
5 Cb u0 {1,S}
6 Cdd u0 {2,D}
""",
thermo = u'Cs-(Cds-Cdd-Cd)CbCbCb',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 833,
label = "Cs-(Cds-Cdd-O2d)CbCbCb",
group =
"""
1 * Cs u0 {2,S} {4,S} {5,S} {6,S}
2 Cd u0 {1,S} {3,D}
3 Cdd u0 {2,D} {7,D}
4 Cb u0 {1,S}
5 Cb u0 {1,S}
6 Cb u0 {1,S}
7 O2d u0 {3,D}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)(Cds-Cdd-O2d)',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)CbCbCb",
group =
"""
1 * Cs u0 {2,S} {4,S} {5,S} {6,S}
2 Cd u0 {1,S} {3,D}
3 Cdd u0 {2,D} {7,D}
4 Cb u0 {1,S}
5 Cb u0 {1,S}
6 Cb u0 {1,S}
7 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 834,
label = "Cs-(Cds-Cdd-Cd)CbCbCb",
group =
"""
1 * Cs u0 {2,S} {4,S} {5,S} {6,S}
2 Cd u0 {1,S} {3,D}
3 Cdd u0 {2,D} {7,D}
4 Cb u0 {1,S}
5 Cb u0 {1,S}
6 Cb u0 {1,S}
7 C u0 {3,D}
""",
thermo = u'Cs-(Cds-Cds)CbCbCb',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 835,
label = "Cs-CtCtCtCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Ct u0 {1,S}
3 Ct u0 {1,S}
4 Ct u0 {1,S}
5 Ct u0 {1,S}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)(Cds-Cds)',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 836,
label = "Cs-CbCtCtCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cb u0 {1,S}
3 Ct u0 {1,S}
4 Ct u0 {1,S}
5 Ct u0 {1,S}
""",
thermo = u'Cs-(Cds-Cds)CtCtCt',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 837,
label = "Cs-CbCbCtCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cb u0 {1,S}
3 Cb u0 {1,S}
4 Ct u0 {1,S}
5 Ct u0 {1,S}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)CtCt',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 838,
label = "Cs-CbCbCbCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cb u0 {1,S}
3 Cb u0 {1,S}
4 Cb u0 {1,S}
5 Ct u0 {1,S}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)Ct',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 839,
label = "Cs-CbCbCbCb",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cb u0 {1,S}
3 Cb u0 {1,S}
4 Cb u0 {1,S}
5 Cb u0 {1,S}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)(Cds-Cds)',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=SCbCtCt",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {6,D}
3 Cb u0 {1,S}
4 Ct u0 {1,S}
5 Ct u0 {1,S}
6 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cd)(Cds-Cd)(Cds-Cd)",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {9,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cd u0 {1,S} {8,D}
6 C u0 {3,D}
7 C u0 {4,D}
8 C u0 {5,D}
9 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cds)(Cds-Cds)(Cds-Cdd)",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {9,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cd u0 {1,S} {8,D}
6 Cd u0 {3,D}
7 Cd u0 {4,D}
8 Cdd u0 {5,D}
9 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cds)(Cds-Cds)(Cds-Cdd-Cd)",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 CS u0 {1,S} {9,D}
4 Cd u0 {1,S} {7,D}
5 Cd u0 {1,S} {8,D}
6 Cdd u0 {2,D} {10,D}
7 Cd u0 {4,D}
8 Cd u0 {5,D}
9 S2d u0 {3,D}
10 C u0 {6,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cds)(Cds-Cds)(Cds-Cdd-S2d)",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 CS u0 {1,S} {9,D}
4 Cd u0 {1,S} {7,D}
5 Cd u0 {1,S} {8,D}
6 Cdd u0 {2,D} {10,D}
7 Cd u0 {4,D}
8 Cd u0 {5,D}
9 S2d u0 {3,D}
10 S2d u0 {6,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cdd)(Cds-Cdd)(Cds-Cdd)",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {9,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cd u0 {1,S} {8,D}
6 Cdd u0 {3,D}
7 Cdd u0 {4,D}
8 Cdd u0 {5,D}
9 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cdd-Cd)(Cds-Cdd-Cd)(Cds-Cdd-Cd)",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Cd u0 {1,S} {8,D}
5 CS u0 {1,S} {9,D}
6 Cdd u0 {2,D} {10,D}
7 Cdd u0 {3,D} {11,D}
8 Cdd u0 {4,D} {12,D}
9 S2d u0 {5,D}
10 C u0 {6,D}
11 C u0 {7,D}
12 C u0 {8,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=S(Cds-Cdd-S2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd)",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Cd u0 {1,S} {8,D}
5 CS u0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.