input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<gh_stars>10-100
"""
NLTE module of SME
reads and interpolates departure coefficients from library files
"""
import itertools
import logging
import warnings
import numpy as np
from scipy import interpolate
from tqdm import tqdm
from .abund import Abund
from .abund import elements as abund_elem
from .data_structure import Collection, CollectionFactory, array, astype, oneof, this
logger = logging.getLogger(__name__)
class DirectAccessFile:
"""
This function reads a single record from binary file that has the following
structure:
Version string - 64 byte long
# of directory bocks - short int
directory block length - short int
# of used directory blocks - short int
1st directory block
key - string of up to 256 characters padded with ' '
datatype - 32-bit int 23-element array returned by SIZE
pointer - 64-bit int pointer to the beginning of the record
2nd directory block
...
last directory block
1st data record
...
last data record
"""
def __init__(self, filename):
key, pointer, dtype, shape, version = DirectAccessFile.read_header(filename)
self.file = filename
self.version = version
self.key = key
self.shape = shape
self.pointer = pointer
self.dtype = dtype
def __getitem__(self, key):
# Access data via brackets
value = self.get(key)
if value is None:
raise KeyError(f"Key {key} not found")
return value
def get(self, key: str, alt=None) -> np.memmap:
"""get field from file"""
idx = np.where(self.key == key)[0]
if idx.size == 0:
return alt
else:
idx = idx[0]
return np.memmap(
self.file,
mode="r",
offset=self.pointer[idx],
dtype=self.dtype[idx],
shape=self.shape[idx][::-1],
)
@staticmethod
def idl_typecode(i):
"""
relevant IDL typecodes and corresponding Numpy Codes
Most specify a byte size, but not all
"""
typecode = {
0: "V",
1: "B",
2: "i2",
3: "i4",
4: "f4",
5: "f8",
6: "c4",
7: "S",
8: "O",
9: "c8",
10: "i8",
11: "O",
12: "u2",
13: "u4",
14: "i8",
15: "u8",
}
return typecode[i]
@staticmethod
def get_typecode(dt):
"""
relevant IDL typecodes and corresponding Numpy Codes
Most specify a byte size, but not all
"""
typecode = {
"V": 0,
"B": 1,
"U": 1,
"i2": 2,
"i4": 3,
"f4": 4,
"f8": 5,
"c4": 6,
"S": 7,
"O": 8,
"c8": 9,
"i8": 10,
"O": 11,
"u2": 12,
"u4": 13,
"i8": 14,
"u8": 15,
}
if isinstance(dt, np.dtype):
dt = dt.str
if len(dt) > 2:
dt = dt[1:]
return typecode[dt]
@staticmethod
def get_dtypes(version):
major, minor = version
if major == 1 and minor == 00:
header_dtype = np.dtype(
[("nblocks", "<u2"), ("dir_length", "<u2"), ("ndir", "<u2")]
)
dir_dtype = np.dtype(
[("key", "S256"), ("size", "<i4", 23), ("pointer", "<i8")]
)
elif major == 1 and minor >= 10:
header_dtype = np.dtype(
[("nblocks", "<u8"), ("dir_length", "<i2"), ("ndir", "<u8")]
)
dir_dtype = np.dtype(
[("key", "S256"), ("size", "<i4", 23), ("pointer", "<i8")]
)
else:
raise ValueError("DirectAccess File Version '{version}' not understood.")
return header_dtype, dir_dtype
@staticmethod
def read_version(version_string: str):
if isinstance(version_string, bytes):
version_string = version_string.decode()
major, minor = int(version_string[26]), int(version_string[28:30])
version = (major, minor)
return version
@classmethod
def read_header(cls, fname: str):
"""parse Header data"""
with open(fname, "rb") as file:
version_dtype = "S64"
version = np.fromfile(file, version_dtype, count=1)
version = cls.read_version(version[0])
header_dtype, dir_dtype = cls.get_dtypes(version)
header = np.fromfile(file, header_dtype, count=1)
ndir = int(header["ndir"][0])
directory = np.fromfile(file, dir_dtype, count=ndir)
# Decode bytes to strings
key = directory["key"]
key = np.char.strip(np.char.decode(key))
# Get relevant info from size parameter
# ndim, n1, n2, ..., typecode, size
dtype = np.array(
[cls.idl_typecode(d[1 + d[0]]) for d in directory["size"]],
dtype="U5",
)
shape = np.empty(ndir, dtype=object)
shape[:] = [tuple(d[1 : d[0] + 1]) for d in directory["size"]]
# Pointer to data arrays
pointer = directory["pointer"]
# Bytes (which represent strings) get special treatment to get the dimensions right
# And to properly convert them to strings
# Also Null terminator is important to remember
idx = dtype == "B"
dtype[idx] = [f"S{s[0]}" for s in shape[idx]]
shape2 = np.empty(np.count_nonzero(idx), dtype=object)
shape2[:] = [tuple(s[1:]) for s in shape[idx]]
shape[idx] = shape2
return key, pointer, dtype, shape, version
@classmethod
def write(cls, fname, **kwargs):
major, minor = 1, 10
ndir = len(kwargs)
version = f"DirectAccess file Version {major}.{minor} 2011-03-24"
version = np.asarray([version], dtype="S64")
version_length = version.itemsize
header_dtype, dir_dtype = cls.get_dtypes((major, minor))
header_length = header_dtype.itemsize
dir_length = dir_dtype.itemsize
header = np.zeros(1, header_dtype)
header[0]["nblocks"] = len(kwargs)
header[0]["dir_length"] = dir_length
header[0]["ndir"] = ndir
directory = np.zeros(ndir, dir_dtype)
pointer = version_length + header_length + ndir * dir_length
for i, (key, value) in enumerate(kwargs.items()):
value = np.asarray(value)
directory[i]["key"] = key
shape = directory[i]["size"]
if np.issubdtype(value.dtype, np.dtype("U")) or np.issubdtype(
value.dtype, np.dtype("S")
):
value = value.astype("S")
shape[0] = value.ndim + 1
shape[1 : 2 + value.ndim] = value.itemsize, *value.shape
shape[2 + value.ndim] = 1
shape[3 + value.ndim] = value.size * value.itemsize
else:
shape[0] = value.ndim
shape[1 : 1 + value.ndim] = value.shape
shape[1 + value.ndim] = cls.get_typecode(value.dtype)
shape[2 + value.ndim] = value.size
directory[i]["pointer"] = pointer
pointer += value.nbytes
kwargs[key] = value
with open(fname, "wb") as file:
version.tofile(file)
header.tofile(file)
directory.tofile(file)
for i, value in enumerate(kwargs.values()):
value.tofile(file)
class Grid:
"""NLTE Grid class that handles all NLTE data reading and interpolation"""
def __init__(
self,
sme,
elem,
lfs_nlte,
selection="energy",
solar=None,
abund_format="Fe=12",
min_energy_diff=None,
):
#:str: Element of the NLTE grid
self.elem = elem
#:LineList: Whole LineList that was passed to the C library
self.linelist = sme.linelist
#:array(str): Elemental Species Names for the linelist
self.species = sme.linelist.species
#:str: Name of the grid
self.grid_name = sme.nlte.grids[elem]
#:str: complete filename of the NLTE grid data file
self.fname = lfs_nlte.get(self.grid_name)
#:{"levels", "energy"}: Selection algorithm to match lines in grid with linelist
self.selection = selection
#:float: Minimum difference between energy levels to match, only relevant for selection=='energy'
self.min_energy_diff = min_energy_diff
#:DirectAccessFile: The NLTE data file
self.directory = DirectAccessFile(self.fname)
self.version = self.directory.version
# Check atmosphere compatibility
if "atmosphere_grid" in self.directory.key:
self.atmosphere_grid = self.directory["atmosphere_grid"][0]
if self.atmosphere_grid != sme.atmo.source:
logger.warning(
"The {elem} NLTE grid was created with {self.atmosphere_grid}, but we are using {sme.atmo.source} for the synthesis"
)
# The possible labels
self._teff = self.directory["teff"]
self._grav = self.directory["grav"]
self._feh = self.directory["feh"]
self._xfe = self.directory["abund"]
# The position of the models in the datafile
self._keys = self.directory["models"].astype("U")
depth_name = str.lower(sme.atmo.interp)
try:
depth = self.directory[depth_name]
except KeyError:
other_depth_name = "tau" if depth_name == "rhox" else "rhox"
depth = self.directory[other_depth_name]
logger.warning(
f"No data for {depth_name} in NLTE grid for {self.elem} found, using {other_depth_name} instead."
)
depth_name = other_depth_name
#:str: Which parameter is used as the depth axis
self.depth_name = depth_name
#:array(float): depth points of the atmosphere that is passed to the C library (in log10 scale)
self._depth = depth
self._grid = None
self._points = None
#:list(int): number of points in the grid to cache for each parameter, order; abund, teff, logg, monh
self.subgrid_size = sme.nlte.subgrid_size
#:float: Solar Abundance of the element
self.abund_format = abund_format
if solar is None:
solar = Abund.solar()
elif isinstance(solar, Abund):
pass
else:
solar = Abund(0, solar)
self.solar = solar
#:dict: upper and lower parameters covered by the grid
self.limits = {}
#:array: NLTE data array
self.bgrid = None
#:array: Depth points of the NLTE data
self.depth = None
#:array: Indices of the lines in the NLTE data
self.linerefs = None
#:array: Indices of the lines in the LineList
self.lineindices = None
#:array: Indices of the lines in the bgrid
self.iused = None
#:str: citations in bibtex format, if known
self.citation_info = ""
conf = self.directory["conf"].astype("U")
term = self.directory["term"].astype("U")
try:
species = self.directory["spec"].astype("U")
except KeyError:
logger.warning(
"Could not find 'species' field in NLTE file %s. Assuming they are all '%s 1'.",
self.grid_name,
self.elem,
)
species = np.full(conf.shape, "%s 1" % self.elem)
pass
rotnum = self.directory["J"] # rotational number of the atomic state
if self.version[0] == 1 and self.version[1] >= 10:
energies = self.directory["energy"] # energy in eV
self.citation_info = self.directory["citation"][()].decode()
else:
self.citation_info = None
if self.selection != "levels":
logger.warning(
"NLTE grid file version %s only supports level selection, not %s",
self.version,
self.selection,
)
self.selection = "levels"
if self.selection == "levels":
self.lineindices, self.linerefs, self.iused = self.select_levels(
conf, term, species, rotnum
)
elif self.selection == "energy":
self.lineindices, self.linerefs, self.iused = self.select_energies(
conf, term, species, rotnum, energies
)
def solar_rel_abund(self, abund, elem):
"""Get the abundance of elem relative to H, i.e. [X/H]"""
if self.abund_format == "H=12":
| |
and end positions.
for match in re.finditer(
r'^CIDR:[^\S\n]+(.+?,[^\S\n].+|.+)$',
response,
re.MULTILINE
):
try:
net = copy.deepcopy(BASE_NET)
if len(nets) > 0:
temp = pattern.search(response, match.start())
net_range = None
net_range_start = None
if temp is not None:
net_range = temp.group(1).strip()
net_range_start = temp.start()
if net_range is not None:
if net_range_start < match.start() or len(nets) > 0:
net['range'] = net_range
net['cidr'] = ', '.join(
[ip_network(c.strip()).__str__()
for c in match.group(1).split(', ')]
)
net['start'] = match.start()
net['end'] = match.end()
nets.append(net)
except ValueError:
pass
elif results['asn_registry'] == 'lacnic':
#Iterate through all of the networks found, storing the CIDR value
#and the start and end positions.
for match in re.finditer(
r'^(inetnum|inet6num):[^\S\n]+(.+?,[^\S\n].+|.+)$',
response,
re.MULTILINE
):
try:
net = copy.deepcopy(BASE_NET)
net['range'] = match.group(2).strip()
temp = []
for addr in match.group(2).strip().split(', '):
count = addr.count('.')
if count is not 0 and count < 4:
addr_split = addr.strip().split('/')
for i in range(count + 1, 4):
addr_split[0] += '.0'
addr = '/'.join(addr_split)
temp.append(ip_network(addr.strip()).__str__())
net['cidr'] = ', '.join(temp)
net['start'] = match.start()
net['end'] = match.end()
nets.append(net)
except ValueError:
pass
else:
#Iterate through all of the networks found, storing the CIDR value
#and the start and end positions.
for match in re.finditer(
r'^(inetnum|inet6num):[^\S\n]+((.+?)[^\S\n]-[^\S\n](.+)|.+)$',
response,
re.MULTILINE
):
try:
net = copy.deepcopy(BASE_NET)
net['range'] = match.group(2)
if match.group(3) and match.group(4):
addrs = []
addrs.extend(summarize_address_range(
ip_address(match.group(3).strip()),
ip_address(match.group(4).strip())))
cidr = ', '.join(
[i.__str__() for i in collapse_addresses(addrs)]
)
else:
cidr = ip_network(match.group(2).strip()).__str__()
net['cidr'] = cidr
net['start'] = match.start()
net['end'] = match.end()
nets.append(net)
except (ValueError, TypeError):
pass
#Iterate through all of the network sections and parse out the
#appropriate fields for each.
for index, net in enumerate(nets):
section_end = None
if index + 1 < len(nets):
section_end = nets[index + 1]['start']
try:
dt_format = NIC_WHOIS[results['asn_registry']]['dt_format']
except KeyError:
dt_format = None
temp_net = self._parse_fields(
response,
NIC_WHOIS[results['asn_registry']]['fields'],
section_end,
net['end'],
dt_format
)
#Merge the net dictionaries.
net.update(temp_net)
#The start and end values are no longer needed.
del net['start'], net['end']
#Add the networks to the return dictionary.
results['nets'] = nets
return results
def _lookup_rws_arin(self, response=None, retry_count=3):
"""
The function for retrieving and parsing whois information for an ARIN
IP address via HTTP (Whois-RWS).
Args:
response: The dictionary containing whois information to parse.
retry_count: The number of times to retry in case socket errors,
timeouts, connection resets, etc. are encountered.
Returns:
List: Dictionaries containing network information which consists
of the fields listed in the NIC_WHOIS dictionary. Certain IPs
have more granular network listings, hence the need for a list
object.
"""
nets = []
try:
net_list = response['nets']['net']
if not isinstance(net_list, list):
net_list = [net_list]
except KeyError:
net_list = []
for n in net_list:
if 'orgRef' in n and n['orgRef']['@handle'] in ('ARIN', 'VR-ARIN'):
continue
addrs = []
net = copy.deepcopy(BASE_NET)
try:
addrs.extend(summarize_address_range(
ip_address(n['startAddress']['$'].strip()),
ip_address(n['endAddress']['$'].strip())))
net['cidr'] = ', '.join(
[i.__str__() for i in collapse_addresses(addrs)]
)
net['range'] = (n['startAddress']['$'].strip() + ' - ' +
n['endAddress']['$'].strip())
except (KeyError, ValueError, TypeError):
pass
for k, v in {
'created': 'registrationDate',
'updated': 'updateDate',
'name': 'name'
}.items():
try:
net[k] = str(n[v]['$']).strip()
except KeyError:
pass
if 'handle' in n:
net['handle'] = n['handle']['$'].strip()
ref = None
if 'customerRef' in n:
ref = ['customerRef', 'customer']
elif 'orgRef' in n:
ref = ['orgRef', 'org']
if ref is not None:
try:
net['description'] = str(n[ref[0]]['@name']).strip()
except KeyError:
pass
try:
ref_url = n[ref[0]]['$'].strip() + '?showPocs=true'
ref_response = self.get_rws(ref_url, retry_count)
except (KeyError, WhoisLookupError):
nets.append(net)
continue
try:
addr_list = (
ref_response[ref[1]]['streetAddress']['line']
)
if not isinstance(addr_list, list):
addr_list = [addr_list]
net['address'] = '\n'.join(
[str(line['$']).strip() for line in addr_list]
)
except KeyError:
pass
for k, v in {
'postal_code': 'postalCode',
'city': 'city',
'state': 'iso3166-2'
}.items():
try:
net[k] = str(ref_response[ref[1]][v]['$'])
except KeyError:
pass
try:
net['country'] = (
str(ref_response[ref[1]]['iso3166-1']['code2']['$'])
).upper()
except KeyError:
pass
try:
for poc in (
ref_response[ref[1]]['pocs']['pocLinkRef']
):
if poc['@description'] in ('Abuse', 'Tech'):
poc_url = poc['$']
poc_response = self.get_rws(
poc_url,
retry_count
)
emails = poc_response['poc']['emails']['email']
if not isinstance(emails, list):
emails = [emails]
temp = []
for e in emails:
temp.append(str(e['$']).strip())
key = '%s_emails' % poc['@description'].lower()
net[key] = (
'\n'.join(unique_everseen(temp))
if len(temp) > 0 else None
)
except (KeyError, WhoisLookupError):
pass
nets.append(net)
return nets
def _lookup_rws_ripe(self, response=None):
"""
The function for retrieving and parsing whois information for a RIPE
IP address via HTTP (Whois-RWS).
***
THIS FUNCTION IS TEMPORARILY BROKEN UNTIL RIPE FIXES THEIR API:
https://github.com/RIPE-NCC/whois/issues/114
***
Args:
response: The dictionary containing whois information to parse.
Returns:
List: Dictionaries containing network information which consists
of the fields listed in the NIC_WHOIS dictionary. Certain IPs
have more granular network listings, hence the need for a list
object.
"""
nets = []
try:
object_list = response['objects']['object']
except KeyError:
object_list = []
ripe_abuse_emails = []
ripe_misc_emails = []
net = copy.deepcopy(BASE_NET)
for n in object_list:
try:
if n['type'] == 'role':
for attr in n['attributes']['attribute']:
if attr['name'] == 'abuse-mailbox':
ripe_abuse_emails.append(str(
attr['value']
).strip())
elif attr['name'] == 'e-mail':
ripe_misc_emails.append(str(attr['value']).strip())
elif attr['name'] == 'address':
if net['address'] is not None:
net['address'] += '\n%s' % (
str(attr['value']).strip()
)
else:
net['address'] = str(attr['value']).strip()
elif n['type'] in ('inetnum', 'inet6num'):
for attr in n['attributes']['attribute']:
if attr['name'] in ('inetnum', 'inet6num'):
net['range'] = str(attr['value']).strip()
ipr = str(attr['value']).strip()
ip_range = ipr.split(' - ')
try:
if len(ip_range) > 1:
addrs = []
addrs.extend(
summarize_address_range(
ip_address(ip_range[0]),
ip_address(ip_range[1])
)
)
cidr = ', '.join(
[i.__str__()
for i in collapse_addresses(addrs)]
)
else:
cidr = ip_network(ip_range[0]).__str__()
net['cidr'] = cidr
except (ValueError, TypeError):
pass
elif attr['name'] == 'netname':
net['name'] = str(attr['value']).strip()
elif attr['name'] == 'nic-hdl':
net['handle'] = str(attr['value']).strip()
elif attr['name'] == 'descr':
if net['description'] is not None:
net['description'] += '\n%s' % (
str(attr['value']).strip()
)
else:
net['description'] = str(attr['value']).strip()
elif attr['name'] == 'country':
net['country'] = str(attr['value']).strip().upper()
except KeyError:
pass
nets.append(net)
#This is nasty. Since RIPE RWS doesn't provide a granular
#contact to network relationship, we apply to all networks.
if len(ripe_abuse_emails) > 0 or len(ripe_misc_emails) > 0:
abuse = (
'\n'.join(unique_everseen(ripe_abuse_emails))
if len(ripe_abuse_emails) > 0 else None
)
misc = (
'\n'.join(unique_everseen(ripe_misc_emails))
if len(ripe_misc_emails) > 0 else None
)
for net in nets:
net['abuse_emails'] = abuse
net['misc_emails'] = misc
return nets
def _lookup_rws_apnic(self, response=None):
"""
The function for retrieving and parsing whois information for a APNIC
IP address via HTTP (Whois-RWS).
Args:
response: The dictionary containing whois information to parse.
Returns:
List: Dictionaries containing network information which consists
of the fields listed in the NIC_WHOIS dictionary. Certain IPs
have more granular network listings, hence the need for a list
object.
"""
addrs = []
net = copy.deepcopy(BASE_NET)
try:
addrs.extend(summarize_address_range(
ip_address(response['startAddress'].strip()),
ip_address(response['endAddress'].strip())))
net['cidr'] = ', '.join(
[i.__str__() for i in collapse_addresses(addrs)]
)
net['range'] = (response['startAddress'].strip() + ' - ' +
response['endAddress'].strip())
except (KeyError, ValueError, TypeError):
pass
try:
net['country'] = str(response['country']).strip().upper()
except KeyError:
pass
try:
events = response['events']
if not isinstance(events, list):
events = [events]
except KeyError:
events = []
for ev in events:
try:
if ev['eventAction'] == 'registration':
net['created'] = str(ev['eventDate']).strip()
elif ev['eventAction'] == 'last changed':
net['updated'] = str(ev['eventDate']).strip()
except (KeyError, ValueError):
pass
try:
entities = response['entities']
if not isinstance(entities, list):
entities = [entities]
except KeyError:
entities = []
for en in entities:
try:
if 'handle' in en:
net['handle'] = en['handle']
temp = en['vcardArray'][1]
for t in temp:
if 'administrative' in en['roles'] and t[0] == 'fn':
net['name'] = str(t[3]).strip()
elif 'administrative' in en['roles'] and t[0] == 'adr':
try:
net['address'] = str(t[1]['label']).strip()
except KeyError:
pass
elif t[0] == 'email':
key = None
if (len(en['roles']) > 1 or
en['roles'][0] == 'administrative'):
key = 'misc_emails'
elif en['roles'][0] == 'abuse':
key = 'abuse_emails'
elif en['roles'][0] == 'technical':
key = 'tech_emails'
if key is not None:
if net[key] is not None:
net[key] += '\n%s' % str(t[3]).strip()
else:
net[key] = str(t[3]).strip()
except (KeyError, IndexError):
pass
try:
remarks = response['remarks']
if not isinstance(remarks, list):
remarks = [remarks]
except KeyError:
remarks = []
for rem in remarks:
try:
if rem['title'] == 'description':
net['description'] = str('\n'.join(rem['description']))
except (KeyError, IndexError):
pass
return [net]
def _lookup_rws_lacnic(self, response=None):
"""
The function for retrieving and parsing whois information for a LACNIC
IP address via HTTP (Whois-RWS).
Args:
response: The dictionary containing whois information to parse.
Returns:
List: Dictionaries containing network information which | |
141. 0.]
[ 0. 194. 1.]
[ 2. 122. 0.]
[ 1. 97. 0.]
[ 4. 108. 0.]]
>>> # Print names (or absolute indices, if no names) of columns in data.
... # Note: Column 0 was isolated as target values.
... print(main.columns_)
Int64Index([1, 2, 3], dtype='int64')
>>> # Print the names (or absolute indices, if no names) of nominal columns in data.
... print(main._nominal_columns)
[1, 3]
>>> # Dummy code nominal columns inferred from data, i.e., nominal_columns='infer' (default).
... main.dummy_coding()
info: columns [1, 3] was/were infered as nominal column(s) for dummy coding
>>> # Print the data samples post dummy-coding
... print(main.data)
[[ 130. 1. 0. 0. 0. 0. 1. 0.]
[ 152. 0. 0. 1. 0. 0. 1. 0.]
[ 109. 0. 0. 0. 1. 0. 1. 0.]
[ 194. 0. 1. 0. 0. 0. 0. 1.]
[ 132. 0. 0. 0. 0. 1. 1. 0.]
[ 141. 0. 0. 0. 1. 0. 1. 0.]
[ 194. 1. 0. 0. 0. 0. 0. 1.]
[ 122. 0. 0. 1. 0. 0. 1. 0.]
[ 97. 0. 1. 0. 0. 0. 1. 0.]
[ 108. 0. 0. 0. 0. 1. 1. 0.]]
>>> # Print names of columns in data post dummy-coding.
... # Note: Dummy/indicator columns assume names of the form **'<original column name>_<nominal category binarized>'**
... print(main.columns_)
Index([2, '1_0.0', '1_1.0', '1_2.0', '1_3.0', '1_4.0', '3_0.0', '3_1.0'], dtype='object')
"""
try:
dataframe = pd.DataFrame(self.data, columns=self.columns_, dtype=np.number)
except ValueError:
# print("warning: Data contains non-numeric features")
logger.warning("Data contains non-numeric features")
dataframe = pd.DataFrame(self.data, columns=self.columns_)
#if not (nominal_columns==[] or nominal_columns is None): # Both [] (empty list) and ``None`` are False Expressions
if nominal_columns: # Evaluates to True if (nominal_columns!=[] and nominal_columns is not None)
if isinstance(nominal_columns, str) and nominal_columns.casefold()=='infer':
if hasattr(self, '_nominal_columns'):
nominal_columns = self._nominal_columns if self._nominal_columns is not None else []
# print("info: columns {0} was/were infered as nominal column(s) for dummy coding".format(nominal_columns))
logger.info("Columns %s was/were infered as nominal column(s) for dummy coding", nominal_columns)
else:
# print("error: could not infer nominal type columns from data")
logger.error("Could not infer nominal type columns from data")
raise Exception("could not infer nominal type columns from data")
elif isinstance(nominal_columns, str) and nominal_columns.casefold()=='all':
nominal_columns = self.columns_.copy()
elif isinstance(nominal_columns, list) or isinstance(nominal_columns, str) or isinstance(nominal_columns, int):
if isinstance(nominal_columns, str) or isinstance(nominal_columns, int):
nominal_columns = [nominal_columns]
if not set(nominal_columns).issubset(self.columns_):
# print("warning: Unknown columns names: {0} in argument to parameter 'nominal_columns' have been ignored".format( set(nominal_columns).difference(self.columns_) ))
logger.warning("Unknown columns names: %s in argument to parameter 'nominal_columns' have been ignored", set(nominal_columns).difference(self.columns_) )
nominal_columns = list( set(nominal_columns).intersection(self.columns_) )
else:
# print("error: Invalid arguments to parameter 'nominal_columns'. Accepted Arguments: {list of names of nominal columns, 'infer', 'all', None}")
logger.error("Invalid arguments to parameter 'nominal_columns'. Accepted Arguments: {list of names of nominal columns, 'infer', 'all', None}")
raise TypeError("invalid arguments to parameter 'nominal_columns'")
dataframe_dummy_coded = pd.get_dummies(dataframe, columns=nominal_columns, drop_first=drop_first)
del dataframe
self.data = dataframe_dummy_coded.values
self.columns_ = dataframe_dummy_coded.columns
del dataframe_dummy_coded
del self._nominal_columns
self.n_samples, self.n_features = self.data.shape
else:
# print("info: No columns to dummy code (nominal_columns = {0})".format(nominal_columns.__repr__()))
logger.info("No columns to dummy code (nominal_columns = %s)", nominal_columns.__repr__())
def standardize_data(self):
"""Feature Scaling through Standardisation (or Z-score normalisation)
See also:
`Importance of Feature Scaling <http://scikit-learn.org/stable/auto_examples/preprocessing/plot_scaling_importance.html>`_
"""
if not hasattr(self, 'standard_scaler'):
try:
self.data = self.data.astype(np.float, copy=False)
except ValueError:
# print("error: Standardization of data failed due to presence of non-numeric features")
logger.error("Standardization of data failed due to presence of non-numeric features")
raise ValueError("standardization of data failed due to presence of non-numeric features")
self.standard_scaler = StandardScaler(copy=False)
self.data = self.standard_scaler.fit_transform(self.data)
else:
# print("info: Data already in Standard Normal Form")
logger.info("Data already in Standard Normal Form")
def destandardize_data(self):
"""Scale back and shift features to original representation (i.e., as prior to Standardization)
Note:
Data should not have been modified post standardization for de-standardisation to return accurate original representation.
"""
if hasattr(self, 'standard_scaler'):
self.data = self.standard_scaler.inverse_transform(self.data)
del self.standard_scaler
def random_stratified_sampling(self, location, bag_name, sample_size, n_iterations=10, file_prefix=None):
"""Performs repeated Stratified Random Sampling of data with 'replacement across samples drawn' and dumps the sampled data into files
Parameters:
location (str): Location to dump the sampled data bags.
bag_name (str): Name of (to be created) folder that acts as a container for the sampled data bags.
sample_size (int, float): Number of data samples in every bag. { ``int`` (range: 1 to n_samples):Absolute number of samples per bag, ``float`` (range: (0, 1] ):Number of samples per bag represented as a fraction of the total number of samples}
n_iterations (int, default=10): Number of bags to be formed.
file_prefix (str, default=None): Prefix for bag filenames. Bag filenames are of the form '[<file_prefix>_]bag<bag number>.p'.
Note:
* Each sampled data bag file is an pickled dictionary of 'data' and 'target' attributes.
* Each bag folder contains a file 'metadata.p' which is a pickled dictionary of metadata information about the original dataset (bagging timestamp, class distribution, n_samples, n_features, columns (features) information).
* The metadata 'timestamp' attribute (time of bagging in seconds since the Epoch as a float) can uniquely identify bags (in most cases).
"""
# Ensure that the dataset is a classification dataset
if not ( hasattr(self, 'classes_') and self.classes_ is not None ):
# print("error: Cannot perform random stratified sampling on the non-classification dataset. If the dataset is indeed a classification dataset, ensure that you encode target column when reading.")
logger.error("Cannot perform random stratified sampling on the non-classification dataset. If the dataset is indeed a classification dataset, ensure that you encode target column when reading.")
raise ValueError("cannot perform random stratified sampling on the non-classification dataset")
cwd = os.getcwd()
location = os.path.abspath(os.path.expanduser(location))
try:
os.chdir(location)
except FileNotFoundError:
# print("error: Failed to resolve location '%s'"%location)
logger.error("Failed to resolve location for dumping sampled data files: '%s'", location)
raise FileNotFoundError("failed to resolve location for dumping sampled data files")
# print("error: Buddi-automs 'warehouse' not setup. Specify an user path for sampled data bags.")
# sys.exit(1)
try:
os.mkdir(bag_name)
os.chdir(bag_name)
except OSError as err:
logger.error("Unable to write sampled data bags to disk : %s", err)
raise OSError("unable to write sampled data bags to disk")
# print("error: Unable to write sampled data bags to disk.\n{0}".format(err))
# sys.exit(1)
# Resolving SIZE of bagged samples as a fraction
if isinstance(sample_size, int) and (sample_size>0 and sample_size<=self.n_samples):
sample_size = sample_size/self.n_samples
elif isinstance(sample_size, float) and (sample_size>0.0 and sample_size<=1.0):
pass
else:
# print("error: Invalid sampling size encountered")
logger.error("Invalid sampling size encountered")
raise ValueError("invalid sampling size encountered")
# Resolving FILE PREFIX for bagged samples
if file_prefix is None:
file_prefix = ''
else:
file_prefix = file_prefix + '_'
# Compute the indices of samples for each class
classes_samples_indices = list(map(lambda class_: np.where(self.target == class_)[0], range(len(self.classes_))))
classes_sampled_data_cnts = list(map(lambda class_samples_indices: round(sample_size*len(class_samples_indices)), classes_samples_indices))
def generate_sampled_data_indices(classes_samples_indices, classes_sampled_data_cnts):
# Choose sample indices for each class
classes_choosen_indices = list(map(lambda x: list(np.random.choice(x[0], size=x[1], replace=False)), zip(classes_samples_indices, classes_sampled_data_cnts)))
# combine indices of samples choosen for each class to generate indices for sampled data
sampled_data_choosen_indices = reduce(lambda a,b : a+b, classes_choosen_indices)
# shuffle the choosen indices
shuffle(sampled_data_choosen_indices)
return sampled_data_choosen_indices
bags_filenames = []
# Repeated Sampling of data
for iteration in range(n_iterations):
sampled_data = dict.fromkeys(['data', 'target'])
# Replace with stratified method of choosing indices
# choosen_indices = np.random.choice(np.arange(self.n_samples),size=sample_size,replace=False)
choosen_indices = generate_sampled_data_indices(classes_samples_indices, classes_sampled_data_cnts)
sampled_data['data'], sampled_data['target'] = self.data[choosen_indices], self.target[choosen_indices] if self.target is not None else None
bag_filename = os.path.abspath(file_prefix + "bag"+str(iteration+1)+".p")
pickle.dump(sampled_data, open(bag_filename, "xb"))
bags_filenames.append(bag_filename)
del sampled_data
# Metadata of data
metadata = {
'timestamp':time(), # Uniquely identifies baggings (with probability ~= 1)
'classes':label_cnt_dict(self.target) if self.target is not None else None,
'n_samples':self.n_samples, # Not inferrable from classes, if target=None
'n_features':self.n_features,
'column_names':self.columns_,
'column_categories':self.columns_categories_ if hasattr(self, 'columns_categories_') else None,
'stratified_sampling': True
}
metadata_filename = os.path.abspath("metadata.p")
pickle.dump(metadata, open(metadata_filename, "xb"))
# Change the directory back to the original working directory
os.chdir(cwd)
return {
'bags_filenames': bags_filenames,
'metadata_filename': metadata_filename
}
def perform_kmeans_clustering(self, n_clusters='n_classes', **kargs):
"""Perform K-Means Clustering on the data
n_clusters ({int, 'n_classes'}, default='n_classes'): number (``int``) of clusters in the data. ``n_classes`` implies uses number of classes in data as number of clusters.
**kargs: Other Keyword | |
empty mask for balancing training set
#nudels2 = np.where(masks1 == 1, scans1, -4000*np.ones(( masks1.shape[1], masks1.shape[2))) ### was -2000
#nudels1 = np.where(masks1 == 1, scans1, masks1 - 4000) ### was -2000
#nudles1_rf = nudels1.flatten()
#nudles1_rf = nudles1_rf[nudles1_rf > -4000]
scans1 = normalize(scans1)
useTestPlot = False
if useTestPlot:
plt.hist(scans1.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
#for i in range(scans.shape[0]):
for i in range(20):
print ('scan '+str(i))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(scans1[i,:,:],cmap=plt.cm.gray)
ax[1].imshow(scans[i,:,:],cmap=plt.cm.gray)
ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
#np.mean(scans) # 0.028367 / 0.0204
#np.min(scans) # 0
#np.max(scans) #
scans1 = zero_center(scans1)
#masks = np.copy(masks1)
## if needed do the resize here .... (img_rows and img_cols are global values defined externally)
#img_rows = scans.shape[1] ### redefine img_rows/ cols and add resize if needed
#img_cols = scans.shape[2]
# scans already are in the tensor mode with 3 rgb elements ....
#scans1 = scans ## no change
#scans1=np.zeros((scans.shape[0],3,img_rows,img_cols))
#for i in range(scans.shape[0]):
# img=scans[i,:,:]
# ###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
# scans1[i,0,:,:]=img
if use_3d_mask:
done = 1 # nothing to do
else:
masks = np.copy(masks1)
masks1=np.zeros((masks.shape[0],1,img_rows,img_cols))
for i in range(masks.shape[0]):
img=masks[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
masks1[i,0,:,:]=img
return scans1, masks1
#scans = [scans[j]]
#masks = [masks[j]]
def convert_scans_and_masks_xd3(scans, masks, only_with_nudels, dim=3, crop=16, blanks_per_axis = 4, add_blank_spacing_size=0, add_blank_layers = 0):
# reuse scan to reduce memory footprint
dim_orig = dim
#add_blank_spacing_size = 0 # dim *4 # dim # was dim ### set to 0 for version_16 #### initial trial (should perhaps be just dim ....), if 0 - do not add ...
#add_blank_layers = 0 # was 4
#skip = dim // 2 # old
skip_low = dim // 2 # dim shoudl be uneven -- it is recalculated anyway to this end
skip_high = dim -skip_low - 1
do_not_allow_even_dim = False ## now we allow odd numbers ...
if do_not_allow_even_dim:
dim = 2 * skip_low + 1
skip_low = dim // 2
skip_high = dim -skip_low - 1
if dim != dim_orig:
print ("convert_scans_and_masks_x: Dim must be uneven, corrected from .. to:", dim_orig, dim)
work = [] # 3 layers
#scan = scans[0]
for scan in scans: ##TEMP
tmp = []
#i = 1
#for i in range(1, scan.shape[0]-1, 3): # SKIP EVERY 3
for i in range(skip_low, scan.shape[0]-skip_high):
#img1 = scan[i-1]
#img2 = scan[i]
#img3 = scan[i+1]
#rgb = np.stack((img1, img2, img3))
rgb = np.stack(scan[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
#flattened1 = [val for sublist in work for val in sublist ] # NO skipping as we have already cut the first and the last layer
#scans1 = np.stack(flattened1)
scans1 = np.stack([val for sublist in work for val in sublist ]) # NO skipping as we have already cut the first and the last layer
work = []
##blanks_per_axis = 6 # cover all slice
##crop = 44
dxrange = scans[0].shape[-1] - 2 * crop
dyrange = scans[0].shape[-2] - 2 * crop
#dx = (img_cols - 2 * crop) // (blanks_per_axis)
#dy = (img_rows - 2 * crop) // (blanks_per_axis)
#dx = dxrange // (blanks_per_axis+1)
#dy = dyrange // (blanks_per_axis+1)
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
if add_blank_spacing_size > 0:
for mask in masks:
if (np.min(mask) < 0):
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
for i in range(skip_low+(add_blank_spacing_size//2), mask.shape[0]-skip_high, add_blank_spacing_size):
mask[i, np.random.randint(0,dyrange), np.random.randint(0,dxrange)] = -1 # negative pixel to be picked up below and corrected back to none
if add_blank_layers > 0:
for mask in masks:
if (np.min(mask) < 0):
dzrange = mask.shape[0]-dim
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
for k in range(add_blank_layers):
i = np.random.randint(0, dzrange) + skip_low
#print ("dz position, random, mask.shape ", i, mask.shape)
mask[i, np.random.randint(0,dyrange), np.random.randint(0,dxrange)] = -1 # negative pixel to be picked up below and corrected back to none
#mask = masks[0]
add_random_blanks_in_blanks = False ## NO need for the extra random blank pixels now, 20170327
if add_random_blanks_in_blanks:
for mask in masks:
if (np.min(mask) < 0):
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
#zlow = skip_low
#zhigh = mask.shape[0]-skip_high
pix_sum = np.sum(mask, axis=(1,2))
idx_blanks = np.min(mask, axis=(1,2)) < 0 ## don't use it - let's vary the position across the space
for iz in range(mask.shape[0]):
if (np.min(mask[iz])) < 0:
for ix in range(blanks_per_axis):
#xpos = crop + (ix)*dx + dx //2
for iy in range(blanks_per_axis):
#ypos = crop + (iy)*dy + dy //2
xpos = crop + np.random.randint(0,dxrange)
ypos = crop + np.random.randint(0,dyrange)
#print (iz, xpos, ypos)
#mask[idx_blanks, ypos, xpos] = -1 # negative pixel to be picked up below and corrected back to none
mask[iz, ypos, xpos] = -1
use_3d_mask = True ##
if use_3d_mask:
work = [] # 3 layers
#mask = masks[0]
for mask in masks:
tmp = []
#i = 0
for i in range(skip_low, mask.shape[0]-skip_high):
#img1 = mask[i-1]
#img2 = mask[i]
#img3 = mask[i+1]
#rgb = np.stack((img1, img2, img3))
rgb = np.stack(mask[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
masks1 = np.stack([val for sublist in work for val in sublist ] )# NO skipping as we have already cut the first and the last layer
else:
masks1 = np.stack([val for sublist in masks for val in sublist[skip_low:-skip_high]] ) # skip one element at the beginning and at the end
#masks1 = np.stack(flattened1) # 10187
#only_with_nudels = True
if only_with_nudels:
if use_3d_mask:
#nudels_pix_count = np.sum(np.abs(masks1[:,skip_low]), axis = (1,2)) ## CHANGE IT WED - use ANY i.e. remove skip_low abd added for the potential blanks; modified that the centre mask be mask!
nudels_pix_count = np.sum(np.abs(masks1), axis = (1,2,3)) ## USE ANY March 1; CHANGE IT WED - use ANY i.e. remove skip_low abd added for the potential blanks; modified that the centre mask be mask!
else:
nudels_pix_count = np.sum(np.abs(masks1), axis = (1,2))
scans1 = scans1[nudels_pix_count != 0]
masks1 = masks1[nudels_pix_count != 0]
#blank_mask_factor = np.sign(nudels_pix_count)[nudels_pix_count != 0]
#sum(blank_mask_factor)
#blank_mask_factor[blank_mask_factor <0] = 0
#mask1_orig = masks1
#np.sum(mask1_orig)
#np.min(masks1)
#masks1 = masks1[nudels_pix_count != 0] * blank_mask_factor # 493 -- circa 5 % with nudeles oters without; 232 if we skip over every 3 layers and use a 3d mask
#masks1[masks1 < 0] = 0 # !!!!!!!!!!!!!! in GRID version do NOT do that - do it in the key version 493 -- circa 5 % with nudeles oters without; 232 if we skip over every 3 layers and use a 3d mask
#masks1[nudels_pix_count < 0] = 0 # making empty mask for balancing training set
#nudels2 = np.where(masks1 == 1, scans1, -4000*np.ones(( masks1.shape[1], masks1.shape[2))) ### was -2000
#nudels1 = np.where(masks1 == 1, scans1, masks1 - 4000) ### was -2000
#nudles1_rf = nudels1.flatten()
#nudles1_rf = nudles1_rf[nudles1_rf > -4000]
scans1 = normalize(scans1)
### after this scans1 becomes float64 ....
useTestPlot = False
if useTestPlot:
plt.hist(scans1.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
#for i in range(scans.shape[0]):
for i in range(20):
print ('scan '+str(i))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(scans1[i,:,:],cmap=plt.cm.gray)
ax[1].imshow(scans[i,:,:],cmap=plt.cm.gray)
ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
#np.mean(scans) # 0.028367 / 0.0204
#np.min(scans) # 0
#np.max(scans) #
scans1 = zero_center(scans1)
#masks = np.copy(masks1)
scans1 = scans1.astype(np.float32) # | |
import requests
from heltour import settings
from collections import namedtuple
from . import slackapi
from django.core.cache import cache
from django.core.urlresolvers import reverse
from heltour.tournament.models import *
from django.db.models.signals import post_save
from django.dispatch.dispatcher import receiver
import logging
from heltour.tournament import lichessapi
import time
import sys
logger = logging.getLogger(__name__)
def _send_notification(notification_type, league, text):
if league.enable_notifications:
for ln in league.leaguechannel_set.filter(type=notification_type, send_messages=True):
try:
slackapi.send_message(ln.slack_channel, text)
except Exception:
logger.error(sys.exc_info())
def _message_user(league, username, text):
if league.enable_notifications:
slackapi.send_message('@%s' % username, text)
def _message_multiple_users(league, usernames, text):
if league.enable_notifications:
slackapi.send_message('+'.join(('@%s' % u for u in usernames)), text)
def _lichess_message(league, username, subject, text):
if league.enable_notifications:
lichessapi.send_mail(username, subject, text)
@receiver(signals.league_comment, dispatch_uid='heltour.tournament.notify')
def league_comment(league, comment, **kwargs):
if comment.user is None:
# Don't forward system-generated comments
return
if comment.content_type.model in ('gamenomination',):
# Exclude some models
return
obj = comment.content_object
admin_url = abs_url(reverse('admin:%s_%s_change' % (obj._meta.app_label, obj._meta.model_name), args=[obj.pk]))
message = '%s commented on %s <%s|%s>:\n>>> %s' % (comment.user_name, comment.content_type.name, admin_url, obj, comment.comment)
if league.get_leaguesetting().notify_for_comments:
_send_notification('mod', league, message)
@receiver(post_save, sender=Registration, dispatch_uid='heltour.tournament.notify')
def registration_saved(instance, created, **kwargs):
if not created:
return
league = instance.season.league
reg_url = abs_url(reverse('admin:review_registration', args=[instance.pk]) + '?_changelist_filters=status__exact%3Dpending%26season__id__exact%3D' + str(instance.season.pk))
list_url = abs_url(reverse('admin:tournament_registration_changelist') + '?status__exact=pending&season__id__exact=' + str(instance.season.pk))
pending_count = instance.season.registration_set.filter(status='pending', season=instance.season).count()
message = '@%s (%s) has <%s|registered> for %s. <%s|%d pending>' % (instance.lichess_username, instance.classical_rating, reg_url, league.name, list_url, pending_count)
pre_season = instance.season.start_date and timezone.now() < instance.season.start_date
setting = league.get_leaguesetting()
if not pre_season and setting.notify_for_registrations or pre_season and setting.notify_for_pre_season_registrations:
_send_notification('mod', league, message)
@receiver(post_save, sender=PlayerLateRegistration, dispatch_uid='heltour.tournament.notify')
def latereg_saved(instance, created, **kwargs):
if not created:
return
league = instance.round.season.league
manage_url = abs_url(reverse('admin:manage_players', args=[instance.round.season.pk]))
message = '@%s <%s|added> for round %d' % (instance.player, manage_url, instance.round.number)
if league.get_leaguesetting().notify_for_latereg_and_withdraw:
_send_notification('mod', league, message)
@receiver(post_save, sender=PlayerWithdrawal, dispatch_uid='heltour.tournament.notify')
def withdrawal_saved(instance, created, **kwargs):
if not created:
return
league = instance.round.season.league
manage_url = abs_url(reverse('admin:manage_players', args=[instance.round.season.pk]))
message = '@%s <%s|withdrawn> for round %d' % (instance.player, manage_url, instance.round.number)
if league.get_leaguesetting().notify_for_latereg_and_withdraw:
_send_notification('mod', league, message)
@receiver(signals.pairing_forfeit_changed, dispatch_uid='heltour.tournament.notify')
def pairing_forfeit_changed(instance, **kwargs):
round_ = instance.get_round()
if round_ is None:
return
league = round_.season.league
white = instance.white.lichess_username.lower() if instance.white is not None else '?'
black = instance.black.lichess_username.lower() if instance.black is not None else '?'
message = '@%s vs @%s %s' % (white, black, instance.result or '*')
if league.get_leaguesetting().notify_for_forfeits:
_send_notification('mod', league, message)
@receiver(signals.player_account_status_changed, dispatch_uid='heltour.tournament.notify')
def player_account_status_changed(instance, old_value, new_value, **kwargs):
if old_value != 'normal' and new_value == 'closed':
# Don't notify if engine/booster accounts are closed
return
season_players = instance.seasonplayer_set.select_related('season__league').nocache()
pending_regs = Registration.objects.filter(lichess_username__iexact=instance.lichess_username, status='pending') \
.select_related('season__league').nocache()
league_set = {sp.season.league for sp in season_players} | {reg.season.league for reg in pending_regs}
for league in league_set:
latest_season = league.season_set.filter(is_active=True).order_by('-start_date', '-id').first()
lichess_profile_url = 'https://lichess.org/@/%s' % instance.lichess_username
if latest_season is not None:
player_profile_url = abs_url(reverse('by_league:by_season:player_profile', args=[league.tag, latest_season.tag, instance.lichess_username]))
else:
player_profile_url = abs_url(reverse('by_league:player_profile', args=[league.tag, instance.lichess_username]))
if old_value == 'normal':
message = '@%s marked as %s on <%s|lichess>. <%s|Player profile>' % (_slack_user(instance), new_value, lichess_profile_url, player_profile_url)
else:
message = '@%s <%s|lichess> account status changed from %s to %s. <%s|Player profile>' % (_slack_user(instance), lichess_profile_url, old_value, new_value, player_profile_url)
_send_notification('mod', league, message)
@receiver(signals.notify_mods_unscheduled, dispatch_uid='heltour.tournament.notify')
def notify_mods_unscheduled(round_, **kwargs):
unscheduled_pairings = round_.pairings.filter(result='', scheduled_time=None).exclude(white=None).exclude(black=None).nocache()
if len(unscheduled_pairings) == 0:
message = '%s - All games are scheduled.' % round_
else:
pairing_strs = ('@%s vs @%s' % (p.white.lichess_username.lower(), p.black.lichess_username.lower()) for p in unscheduled_pairings)
message = '%s - The following games are unscheduled: %s' % (round_, ', '.join(pairing_strs))
_send_notification('mod', round_.season.league, message)
@receiver(signals.notify_mods_no_result, dispatch_uid='heltour.tournament.notify')
def notify_mods_no_result(round_, **kwargs):
no_result_pairings = round_.pairings.filter(result='').exclude(white=None).exclude(black=None).nocache()
if len(no_result_pairings) == 0:
message = '%s - All games have results.' % round_
else:
pairing_strs = ('@%s vs @%s' % (p.white.lichess_username.lower(), p.black.lichess_username.lower()) for p in no_result_pairings)
message = '%s - The following games are missing results: %s' % (round_, ', '.join(pairing_strs))
_send_notification('mod', round_.season.league, message)
@receiver(signals.notify_mods_pending_regs, dispatch_uid='heltour.tournament.notify')
def notify_mods_pending_regs(round_, **kwargs):
pending_count = round_.season.registration_set.filter(status='pending', season=round_.season).count()
if pending_count == 0:
return
list_url = abs_url(reverse('admin:tournament_registration_changelist') + '?status__exact=pending&season__id__exact=' + str(round_.season.pk))
message = '<%s|%d pending registrations>' % (list_url, pending_count)
_send_notification('mod', round_.season.league, message)
@receiver(signals.notify_mods_pairings_published, dispatch_uid='heltour.tournament.notify')
def notify_mods_pairings_published(round_, **kwargs):
message = '%s pairings published.' % round_
_send_notification('mod', round_.season.league, message)
@receiver(signals.notify_mods_round_start_done, dispatch_uid='heltour.tournament.notify')
def notify_mods_round_start_done(round_, **kwargs):
message = '%s notifications sent.' % round_
_send_notification('mod', round_.season.league, message)
@receiver(signals.pairings_generated, dispatch_uid='heltour.tournament.notify')
def pairings_generated(round_, **kwargs):
league = round_.season.league
review_url = abs_url(reverse('admin:review_pairings', args=[round_.pk]))
message = 'Pairings generated for round %d. <%s|Review>' % (round_.number, review_url)
_send_notification('mod', league, message)
@receiver(signals.no_round_transition, dispatch_uid='heltour.tournament.notify')
def no_round_transition(season, warnings, **kwargs):
league = season.league
message = 'Can\'t start the round transition.' + ''.join(['\n' + text for text, _ in warnings])
_send_notification('no_transition', league, message)
@receiver(signals.starting_round_transition, dispatch_uid='heltour.tournament.notify')
def starting_round_transition(season, msg_list, **kwargs):
league = season.league
message = 'Starting automatic round transition...' + ''.join(['\n' + text for text, _ in msg_list])
_send_notification('mod', league, message)
@receiver(signals.publish_scheduled, dispatch_uid='heltour.tournament.notify')
def publish_scheduled(round_id, eta, **kwargs):
round_ = Round.objects.get(id=round_id)
message = '%s pairings will be published in %d minutes.' % (round_, (eta - timezone.now()).total_seconds() / 60)
_send_notification('mod', round_.season.league, message)
@receiver(signals.alternate_search_started, dispatch_uid='heltour.tournament.notify')
def alternate_search_started(season, team, board_number, round_, **kwargs):
league = season.league
team_pairing = team.get_teampairing(round_)
if team_pairing is not None:
pairing = team_pairing.teamplayerpairing_set.filter(board_number=board_number).exclude(white=None).exclude(black=None).nocache().first()
else:
pairing = None
team_member = team.teammember_set.filter(board_number=board_number).first()
if pairing is not None:
player = pairing.white if pairing.white_team() == team else pairing.black
elif team_member is not None:
player = team_member.player
else:
player = None
# Send a DM to the player being replaced
if player is not None:
message_to_replaced_player = '@%s: I am searching for an alternate to replace you for round %d, since you have been marked as unavailable. If this is a mistake, please contact a mod as soon as possible.' \
% (_slack_user(player), round_.number)
_message_user(league, _slack_user(player), message_to_replaced_player)
# Send a DM to the opponent
if pairing is not None:
opponent = pairing.black if pairing.white_team() == team else pairing.white
if opponent.is_available_for(round_):
message_to_opponent = '@%s: Your opponent, @%s, has been marked as unavailable. I am searching for an alternate for you to play, please be patient.' \
% (_slack_user(opponent), _slack_user(player))
_message_user(league, _slack_user(opponent), message_to_opponent)
# Broadcast a message to both team captains
message = '%sI have started searching for an alternate for <@%s> on board %d of "%s" in round %d.' \
% (_captains_ping(team, round_), _slack_user(player), board_number, team.name, round_.number)
_send_notification('captains', league, message)
@receiver(signals.alternate_search_reminder, dispatch_uid='heltour.tournament.notify')
def alternate_search_reminder(season, team, board_number, round_, **kwargs):
league = season.league
team_pairing = team.get_teampairing(round_)
if team_pairing is None:
return
pairing = team_pairing.teamplayerpairing_set.filter(board_number=board_number).exclude(white=None).exclude(black=None).nocache().first()
if pairing is None:
return
player = pairing.white if pairing.white_team() == team else pairing.black
# Broadcast a reminder to both team captains
message = '%sI am still searching for an alternate for <@%s> on board %d of "%s" in round %d.' \
% (_captains_ping(team, round_), _slack_user(player), board_number, team.name, round_.number)
_send_notification('captains', league, message)
@receiver(signals.alternate_search_all_contacted, dispatch_uid='heltour.tournament.notify')
def alternate_search_all_contacted(season, team, board_number, round_, number_contacted, **kwargs):
league = season.league
# Broadcast a message to both team captains
message = '%sI have messaged every eligible alternate for board %d of "%s". Still waiting for responses from %d.' % (_captains_ping(team, round_), board_number, team.name, number_contacted)
_send_notification('captains', league, message)
@receiver(signals.alternate_search_failed, dispatch_uid='heltour.tournament.notify')
def alternate_search_failed(season, team, board_number, round_, **kwargs):
league = season.league
# Broadcast a message to both team captains
message = '%sSorry, I could not find an alternate for board %d of "%s" in round %d.' \
% (_captains_ping(team, round_), board_number, team.name, round_.number)
_send_notification('captains', league, message)
@receiver(signals.alternate_assigned, dispatch_uid='heltour.tournament.notify')
def alternate_assigned(season, alt_assignment, **kwargs):
league = season.league
aa = alt_assignment
opponent = _notify_alternate_and_opponent(league, aa)
if opponent is not None:
opponent_notified = ' Their opponent, @%s, has been notified.' % _slack_user(opponent)
else:
opponent_notified = ''
# Send a message to the captains
if aa.player == aa.replaced_player:
message = '%sI have reassigned <@%s> to play on board %d of "%s" for round %d.%s' \
% (_captains_ping(aa.team, aa.round), _slack_user(aa.player), aa.board_number, aa.team.name, opponent_notified)
else:
message = '%sI have assigned <@%s> to play on board %d of "%s" in place of <@%s> for round %d.%s' \
% (_captains_ping(aa.team, aa.round), _slack_user(aa.player), aa.board_number, aa.team.name, _slack_user(aa.replaced_player), aa.round.number, opponent_notified)
_send_notification('captains', league, message)
def _notify_alternate_and_opponent(league, aa):
captain = aa.team.captain()
if captain is not None:
captain_text = ' The team captain is <@%s>.' % _slack_user(captain)
else:
captain_text = ''
team_pairing = aa.team.get_teampairing(aa.round)
if team_pairing is None:
# Round hasn't started yet
message_to_alternate = '@%s: You will be playing on board %d of "%s" for round %d.%s' \
% (_slack_user(aa.player), aa.board_number, aa.team.name, aa.round.number, captain_text)
_message_user(league, _slack_user(aa.player), message_to_alternate)
return None
pairing = team_pairing.teamplayerpairing_set.filter(board_number=aa.board_number).exclude(white=None).exclude(black=None).nocache().first()
if pairing is None:
# No pairing yet for some reason
message_to_alternate = '@%s: You will be playing on board %d of "%s" for round %d.%s' \
% (_slack_user(aa.player), aa.board_number, aa.team.name, aa.round.number, captain_text)
_message_user(league, _slack_user(aa.player), message_to_alternate)
return None
opponent = pairing.black if pairing.white_team() == aa.team else pairing.white
if not opponent.is_available_for(aa.round):
# Still looking for | |
0.87, 0.92))
fd(1)
color((0.18, 0.27, 0.45))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.3, 0.35, 0.38))
fd(1)
color((0.29, 0.2, 0.43))
fd(1)
color((0.88, 0.14, 0.23))
fd(16)
color((0.77, 0.15, 0.27))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(8)
color((0.94, 0.71, 0.02))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.0, 0.0, 0.0))
fd(46)
gt(-128.0,112.5)
fd(44)
color((0.08, 0.22, 0.51))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.75, 0.6, 0.13))
fd(1)
color((0.98, 0.74, 0.0))
fd(10)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(17)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.15, 0.26, 0.46))
fd(1)
color((0.94, 0.95, 0.97))
fd(1)
color((1.0, 1.0, 1.0))
fd(9)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(5)
color((0.7, 0.58, 0.16))
fd(1)
color((0.24, 0.2, 0.45))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(9)
color((0.34, 0.36, 0.36))
fd(1)
color((0.51, 0.18, 0.36))
fd(1)
color((0.88, 0.14, 0.23))
fd(2)
color((0.84, 0.14, 0.24))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.73, 0.0))
fd(1)
color((0.98, 0.74, 0.0))
fd(6)
color((0.81, 0.64, 0.09))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.1, 0.23, 0.49))
fd(1)
color((0.82, 0.65, 0.09))
fd(1)
color((0.98, 0.74, 0.0))
fd(6)
color((0.98, 0.73, 0.0))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.85, 0.14, 0.24))
fd(1)
color((0.88, 0.14, 0.23))
fd(2)
color((0.49, 0.18, 0.36))
fd(1)
color((0.37, 0.39, 0.34))
fd(1)
color((0.98, 0.74, 0.0))
fd(9)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.24, 0.2, 0.45))
fd(1)
color((0.73, 0.59, 0.14))
fd(1)
color((0.98, 0.74, 0.0))
fd(5)
color((0.09, 0.22, 0.5))
fd(1)
color((1.0, 1.0, 1.0))
fd(9)
color((0.87, 0.89, 0.93))
fd(1)
color((0.17, 0.27, 0.45))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(17)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(10)
color((0.75, 0.6, 0.13))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.09, 0.21, 0.51))
fd(1)
color((0.0, 0.0, 0.0))
fd(44)
gt(-128.0,111.5)
fd(43)
color((0.08, 0.21, 0.5))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.73, 0.0))
fd(1)
color((0.98, 0.74, 0.0))
fd(1)
color((0.6, 0.51, 0.22))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.96, 0.73, 0.01))
fd(1)
color((0.98, 0.74, 0.0))
fd(5)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(17)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.39, 0.4, 0.33))
fd(1)
color((0.53, 0.6, 0.74))
fd(1)
color((1.0, 1.0, 1.0))
fd(9)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(5)
color((0.7, 0.58, 0.16))
fd(1)
color((0.24, 0.2, 0.45))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(10)
color((0.11, 0.22, 0.49))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.73, 0.15, 0.28))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.51, 0.46, 0.27))
fd(1)
color((0.98, 0.74, 0.0))
fd(16)
color((0.49, 0.45, 0.27))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.74, 0.15, 0.27))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(10)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.24, 0.2, 0.45))
fd(1)
color((0.73, 0.59, 0.14))
fd(1)
color((0.98, 0.74, 0.0))
fd(5)
color((0.09, 0.22, 0.5))
fd(1)
color((1.0, 1.0, 1.0))
fd(9)
color((0.47, 0.55, 0.71))
fd(1)
color((0.51, 0.47, 0.26))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.11, 0.23, 0.49))
fd(1)
color((0.88, 0.14, 0.23))
fd(17)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(5)
color((0.96, 0.73, 0.01))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.65, 0.55, 0.18))
fd(1)
color((0.98, 0.74, 0.0))
fd(1)
color((0.98, 0.73, 0.0))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.08, 0.21, 0.5))
fd(1)
color((0.0, 0.0, 0.0))
fd(43)
gt(-128.0,110.5)
fd(43)
color((0.09, 0.22, 0.5))
fd(1)
color((0.96, 0.73, 0.01))
fd(1)
color((0.98, 0.74, 0.0))
fd(1)
color((0.26, 0.32, 0.41))
fd(1)
color((0.12, 0.25, 0.52))
fd(1)
color((1.0, 1.0, 1.0))
fd(2)
color((0.92, 0.93, 0.95))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.87, 0.67, 0.06))
fd(1)
color((0.1, 0.22, 0.49))
fd(1)
color((0.88, 0.14, 0.23))
fd(16)
color((0.85, 0.14, 0.24))
fd(1)
color((0.23, 0.3, 0.42))
fd(1)
color((0.98, 0.74, 0.0))
fd(5)
color((0.09, 0.22, 0.5))
fd(1)
color((1.0, 1.0, 1.0))
fd(8)
color((0.16, 0.28, 0.54))
fd(1)
color((0.7, 0.58, 0.16))
fd(1)
color((0.98, 0.74, 0.0))
fd(5)
color((0.29, 0.34, 0.38))
fd(1)
color((0.71, 0.16, 0.29))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(10)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(5)
color((0.35, 0.19, 0.41))
fd(1)
color((0.09, 0.22, 0.5))
fd(4)
color((0.98, 0.74, 0.0))
fd(3)
color((0.93, 0.71, 0.03))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(4)
color((0.36, 0.19, 0.4))
fd(1)
color((0.88, 0.14, 0.23))
fd(5)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(10)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.67, 0.16, 0.31))
fd(1)
color((0.32, 0.35, 0.37))
fd(1)
color((0.98, 0.74, 0.0))
fd(5)
color((0.67, 0.56, 0.17))
fd(1)
color((0.17, 0.29, 0.55))
fd(1)
color((1.0, 1.0, 1.0))
fd(8)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(5)
color((0.27, 0.33, 0.4))
fd(1)
color((0.78, 0.15, 0.27))
fd(1)
color((0.88, 0.14, 0.23))
fd(16)
color((0.11, 0.22, 0.49))
fd(1)
color((0.86, 0.67, 0.07))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.97, 0.73, 0.01))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.93, 0.94, 0.96))
fd(1)
color((1.0, 1.0, 1.0))
fd(2)
color((0.11, 0.24, 0.51))
fd(1)
color((0.34, 0.36, 0.36))
fd(1)
color((0.98, 0.74, 0.0))
fd(1)
color((0.96, 0.73, 0.01))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.0, 0.0, 0.0))
fd(43)
gt(-128.0,109.5)
fd(43)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(2)
color((0.09, 0.22, 0.5))
fd(1)
color((1.0, 1.0, 1.0))
fd(4)
color((0.88, 0.9, 0.93))
fd(1)
color((0.14, 0.25, 0.47))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.62, 0.53, 0.2))
fd(1)
color((0.4, 0.18, 0.39))
fd(1)
color((0.88, 0.14, 0.23))
fd(17)
color((0.14, 0.25, 0.47))
fd(1)
color((0.98, 0.74, 0.0))
fd(5)
color((0.35, 0.37, 0.35))
fd(1)
color((0.18, 0.29, 0.55))
fd(1)
color((1.0, 1.0, 1.0))
fd(6)
color((0.58, 0.64, 0.77))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(6)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(4)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(10)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(9)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.85, 0.66, 0.07))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(9)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(10)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(4)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(6)
color((0.09, 0.22, 0.5))
fd(1)
color((0.62, 0.67, 0.79))
fd(1)
color((1.0, 1.0, 1.0))
fd(6)
color((0.15, 0.27, 0.53))
fd(1)
color((0.41, 0.41, 0.32))
fd(1)
color((0.98, 0.74, 0.0))
fd(5)
color((0.19, 0.28, 0.45))
fd(1)
color((0.88, 0.14, 0.23))
fd(17)
color((0.42, 0.18, 0.39))
fd(1)
color((0.6, 0.52, 0.21))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.1, 0.23, 0.49))
fd(1)
color((0.95, 0.95, 0.97))
fd(1)
color((1.0, 1.0, 1.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(2)
color((0.09, 0.22, 0.5))
fd(1)
color((0.0, 0.0, 0.0))
fd(43)
gt(-128.0,108.5)
fd(43)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(1)
color((0.98, 0.73, 0.0))
fd(1)
color((0.23, 0.34, 0.58))
fd(1)
color((1.0, 1.0, 1.0))
fd(5)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.25, 0.32, 0.41))
fd(1)
color((0.87, 0.14, 0.23))
fd(1)
color((0.88, 0.14, 0.23))
fd(17)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(6)
color((0.23, 0.3, 0.42))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.85, 0.88, 0.92))
fd(1)
color((1.0, 1.0, 1.0))
fd(2)
color((0.93, 0.94, 0.96))
fd(1)
color((0.1, 0.23, 0.51))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(6)
color((0.1, 0.23, 0.49))
fd(1)
color((0.57, 0.17, 0.33))
fd(1)
color((0.88, 0.14, 0.23))
fd(4)
color((0.11, 0.22, 0.49))
fd(1)
color((0.78, 0.62, 0.11))
fd(1)
color((0.98, 0.74, 0.0))
fd(9)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(8)
color((0.24, 0.2, 0.45))
fd(1)
color((0.57, 0.5, 0.23))
fd(1)
color((0.98, 0.74, 0.0))
fd(2)
color((0.97, 0.73, 0.01))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.98, 0.74, 0.0))
fd(3)
color((0.54, 0.48, 0.25))
fd(1)
color((0.25, 0.2, 0.44))
fd(1)
color((0.88, 0.14, 0.23))
fd(8)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(9)
color((0.76, 0.61, 0.12))
fd(1)
color((0.12, 0.22, 0.49))
fd(1)
color((0.88, 0.14, 0.23))
fd(4)
color((0.55, 0.17, 0.35))
fd(1)
color((0.1, 0.23, 0.49))
fd(1)
color((0.98, 0.74, 0.0))
fd(6)
color((0.09, 0.22, 0.5))
fd(1)
color((0.1, 0.24, 0.51))
fd(1)
color((0.94, 0.95, 0.96))
fd(1)
color((1.0, 1.0, 1.0))
fd(2)
color((0.84, 0.86, 0.91))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.27, 0.33, 0.4))
fd(1)
color((0.98, 0.74, 0.0))
fd(6)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(18)
color((0.24, 0.31, 0.42))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((1.0, 1.0, 1.0))
fd(5)
color((0.16, 0.28, 0.54))
fd(1)
color((0.98, 0.74, 0.0))
fd(2)
color((0.09, 0.22, 0.5))
fd(1)
color((0.0, 0.0, 0.0))
fd(43)
gt(-128.0,107.5)
fd(43)
color((0.09, 0.22, 0.5))
fd(1)
color((0.65, 0.55, 0.19))
fd(1)
color((0.98, 0.74, 0.0))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((1.0, 1.0, 1.0))
fd(5)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(18)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(7)
color((0.94, 0.71, 0.02))
fd(1)
color((0.09, 0.23, 0.49))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.86, 0.67, 0.07))
fd(1)
color((0.98, 0.74, 0.0))
fd(6)
color((0.09, 0.22, 0.5))
fd(1)
color((0.33, 0.2, 0.42))
fd(1)
color((0.88, 0.14, 0.23))
fd(5)
color((0.45, 0.18, 0.38))
fd(1)
color((0.19, 0.28, 0.44))
fd(1)
color((0.98, 0.74, 0.0))
fd(9)
color((0.09, 0.22, 0.5))
fd(1)
color((0.31, 0.2, 0.43))
fd(1)
color((0.88, 0.14, 0.23))
fd(6)
color((0.87, 0.14, 0.23))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.1, 0.23, 0.49))
fd(1)
color((0.78, 0.15, 0.27))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(1)
color((0.87, 0.14, 0.23))
fd(1)
color((0.88, 0.14, 0.23))
fd(6)
color((0.28, 0.2, 0.43))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(9)
color((0.17, 0.27, 0.45))
fd(1)
color((0.49, 0.18, 0.36))
fd(1)
color((0.88, 0.14, 0.23))
fd(5)
color((0.3, 0.2, 0.43))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(6)
color((0.84, 0.65, 0.08))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.11, 0.23, 0.49))
fd(1)
color((0.95, 0.72, 0.02))
fd(1)
color((0.98, 0.74, 0.0))
fd(7)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(18)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((1.0, 1.0, 1.0))
fd(5)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(1)
color((0.68, 0.56, 0.17))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.0, 0.0, 0.0))
fd(43)
gt(-128.0,106.5)
fd(43)
color((0.0, 0.25, 0.5))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.83, 0.86, 0.91))
fd(1)
color((1.0, 1.0, 1.0))
fd(3)
color((0.11, 0.24, 0.51))
fd(1)
color((0.7, 0.57, 0.16))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(18)
color((0.6, 0.16, 0.33))
fd(1)
color((0.16, 0.26, 0.46))
fd(1)
color((0.98, 0.74, 0.0))
fd(16)
color((0.3, 0.35, 0.38))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.69, 0.16, 0.29))
fd(1)
color((0.88, 0.14, 0.23))
fd(5)
color((0.87, 0.14, 0.23))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(11)
color((0.16, 0.27, 0.46))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.87, 0.14, 0.23))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.83, 0.14, 0.25))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.91, 0.69, 0.04))
fd(1)
color((0.98, 0.74, 0.0))
fd(2)
color((0.87, 0.67, 0.07))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(1)
color((0.65, 0.16, 0.31))
fd(1)
color((0.15, 0.25, 0.47))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.9, 0.69, 0.05))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.84, 0.14, 0.25))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.87, 0.14, 0.24))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.18, 0.27, 0.45))
fd(1)
color((0.98, 0.74, 0.0))
fd(11)
color((0.09, 0.22, 0.5))
fd(1)
color((0.87, 0.14, 0.23))
fd(1)
color((0.88, 0.14, 0.23))
fd(5)
color((0.68, 0.16, 0.3))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.31, 0.35, 0.37))
fd(1)
color((0.98, 0.74, 0.0))
fd(16)
color((0.19, 0.28, 0.44))
fd(1)
color((0.55, 0.17, 0.35))
fd(1)
color((0.88, 0.14, 0.23))
fd(18)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.65, 0.55, 0.18))
fd(1)
color((0.11, 0.24, 0.51))
fd(1)
color((1.0, 1.0, 1.0))
fd(3)
color((0.75, 0.79, 0.87))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.17, 0.17, 0.5))
fd(1)
color((0.0, 0.0, 0.0))
fd(43)
gt(-128.0,105.5)
fd(44)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(2)
color((0.09, 0.22, 0.5))
fd(4)
color((0.32, 0.36, 0.37))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(9)
color((0.27, 0.2, 0.44))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.75, 0.15, 0.27))
fd(1)
color((0.88, 0.14, 0.23))
fd(5)
color((0.09, 0.22, 0.5))
fd(1)
color((0.85, 0.66, 0.07))
fd(1)
color((0.98, 0.74, 0.0))
fd(10)
color((0.97, 0.73, 0.0))
fd(1)
color((0.98, 0.74, 0.0))
fd(2)
color((0.97, 0.73, 0.01))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.44, 0.18, 0.38))
fd(1)
color((0.88, 0.14, 0.23))
fd(7)
color((0.09, 0.22, 0.5))
fd(1)
color((0.93, 0.71, 0.03))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.94, 0.71, 0.03))
fd(1)
color((0.87, 0.67, 0.07))
fd(1)
color((0.77, 0.62, 0.12))
fd(1)
color((0.98, 0.73, 0.0))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.87, 0.67, 0.06))
fd(1)
color((0.09, 0.22, 0.5))
fd(5)
color((0.98, 0.74, 0.0))
fd(3)
color((0.98, 0.73, 0.0))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.81, 0.64, 0.1))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(5)
color((0.89, 0.69, 0.05))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.72, 0.58, 0.15))
fd(1)
color((0.82, 0.65, 0.09))
fd(1)
color((0.89, 0.69, 0.05))
fd(1)
color((0.96, 0.73, 0.01))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.91, 0.7, 0.04))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(7)
color((0.42, 0.19, 0.39))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.97, 0.73, 0.0))
fd(1)
color((0.98, 0.74, 0.0))
fd(2)
color((0.48, 0.44, 0.28))
fd(1)
color((0.98, 0.74, 0.0))
fd(10)
color((0.88, 0.68, 0.06))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(5)
color((0.76, 0.15, 0.27))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.25, 0.2, 0.45))
fd(1)
color((0.88, 0.14, 0.23))
fd(9)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.26, 0.32, 0.4))
fd(1)
color((0.09, 0.22, 0.5))
fd(4)
color((0.98, 0.74, 0.0))
fd(2)
color((0.09, 0.22, 0.5))
fd(1)
color((0.0, 0.0, 0.0))
fd(44)
gt(-128.0,104.5)
fd(44)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.96, 0.72, 0.02))
fd(1)
color((0.91, 0.69, 0.04))
fd(1)
color((0.98, 0.74, 0.0))
fd(6)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(7)
color((0.62, 0.16, 0.32))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.6, 0.52, 0.22))
fd(1)
color((0.98, 0.73, 0.0))
fd(1)
color((0.98, 0.74, 0.0))
fd(1)
color((0.93, 0.71, 0.03))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.1, 0.22, 0.49))
fd(1)
color((0.88, 0.14, 0.23))
fd(5)
color((0.09, 0.22, 0.5))
fd(1)
color((0.46, 0.44, 0.29))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.96, 0.73, 0.01))
fd(1)
color((0.64, 0.54, 0.19))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.1, 0.23, 0.49))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.94, 0.71, 0.02))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.78, 0.15, 0.27))
fd(1)
color((0.88, 0.14, 0.23))
fd(5)
color((0.11, 0.22, 0.49))
fd(1)
color((0.42, 0.41, 0.32))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(3)
color((0.34, 0.19, 0.42))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(7)
color((0.95, 0.72, 0.02))
fd(1)
color((0.98, 0.74, 0.0))
fd(5)
color((0.09, 0.22, 0.5))
fd(1)
color((0.63, 0.16, 0.32))
fd(1)
color((0.88, 0.14, 0.23))
fd(4)
color((0.09, 0.22, 0.5))
fd(1)
color((0.96, 0.72, 0.02))
fd(1)
color((0.98, 0.74, 0.0))
fd(5)
color((0.95, 0.72, 0.02))
fd(1)
color((0.98, 0.74, 0.0))
fd(6)
color((0.69, 0.57, 0.16))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.22, 0.21, 0.45))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.18, 0.28, 0.45))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.38, 0.39, 0.33))
fd(1)
color((0.11, 0.22, 0.49))
fd(1)
color((0.88, 0.14, 0.23))
fd(5)
color((0.76, 0.15, 0.27))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.95, 0.72, 0.02))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.69, 0.56, 0.16))
fd(1)
color((0.95, 0.72, 0.02))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.5, 0.46, 0.27))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.87, 0.14, 0.23))
fd(1)
color((0.88, 0.14, 0.23))
fd(4)
color((0.11, 0.22, 0.49))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.93, 0.71, 0.03))
fd(1)
color((0.98, 0.74, 0.0))
fd(1)
color((0.98, 0.73, 0.0))
fd(1)
color((0.64, 0.54, 0.19))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.56, 0.17, 0.34))
fd(1)
color((0.88, 0.14, 0.23))
fd(7)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(6)
color((0.91, 0.7, 0.04))
fd(1)
color((0.96, 0.72, 0.02))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.0, 0.0, 0.0))
fd(44)
gt(-128.0,103.5)
fd(43)
color((0.11, 0.21, 0.5))
fd(1)
color((0.11, 0.24, 0.49))
fd(1)
color((0.98, 0.74, 0.0))
fd(11)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(6)
color((0.53, 0.17, 0.35))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(6)
color((0.62, 0.53, 0.2))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(5)
color((0.29, 0.2, 0.43))
fd(1)
color((0.09, 0.22, 0.5))
fd(5)
color((0.78, 0.62, 0.11))
fd(1)
color((0.98, 0.74, 0.0))
fd(2)
color((0.7, 0.58, 0.16))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.56, 0.49, 0.24))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(1)
color((0.15, 0.22, 0.48))
fd(1)
color((0.87, 0.14, 0.23))
fd(1)
color((0.88, 0.14, 0.23))
fd(1)
color((0.84, 0.14, 0.24))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.43, 0.42, 0.31))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.33, 0.36, 0.37))
fd(1)
color((0.31, 0.2, 0.42))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.69, 0.16, 0.29))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.85, 0.66, 0.07))
fd(1)
color((0.98, 0.74, 0.0))
fd(10)
color((0.15, 0.25, 0.47))
fd(1)
color((0.28, 0.2, 0.44))
fd(1)
color((0.88, 0.14, 0.23))
fd(5)
color((0.85, 0.14, 0.24))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.9, 0.69, 0.05))
fd(1)
color((0.98, 0.74, 0.0))
fd(10)
color((0.34, 0.36, 0.36))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(4)
color((0.09, 0.22, 0.5))
fd(1)
color((0.97, 0.73, 0.0))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.4, 0.4, 0.33))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.85, 0.14, 0.24))
fd(1)
color((0.88, 0.14, 0.23))
fd(1)
color((0.87, 0.14, 0.23))
fd(1)
color((0.14, 0.22, 0.48))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.09, 0.23, 0.49))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.83, 0.65, 0.08))
fd(1)
color((0.09, 0.22, 0.5))
fd(5)
color((0.26, 0.2, 0.44))
fd(1)
color((0.88, | |
<gh_stars>0
import random
import math
version = "5.0"
# major - backwards incompatible
# minor - backwards compatible
# TODO:
# nothing?
'''
(dd.m.yyyy)
1.0 (31.5.2021?) - the
2.0 (01.6.2021?) - stuff like A. and A, / changed <'' and >'' into <, and >, / added the t command / added copying and pasting / added the [] thing
2.1 (01.6.2021?) - replaced <, and >, with <. and >. / added the {} and {}' (this version is actually backwards incompatible but nobody cares about this anywys)
2.2 (02.6.2021?) - z12 and Z12
3.0 (02.6.2021?/05.6.2021?) - added &' / added ['] / replaced {}' with {'}
4.0 (05.6.2021~06.6.2021) - changed z12 and Z12 to 1z2 and 1Z2 / you can now put {} and 12 inside other ones (it doesnt always work properly because i suck at coding) / removed @ / added f, F, x, +., -. / changed intro message / added text generator
5.0 (10.6.2021) - changed {} and {' } to {. } and {, } / added [. ]; [, ]; {. }; {, }; m; M; m'/M'
'''
# this is an esolang
# The Greatest And Most Awesomest Programming Language To Ever Exist - GAMAPLEE
'''
Data is stored in an infinite square grid. Each cell holds a number (0 by default)
-- COMMANDS --
# - text until the end of the line is ignored
<>^v - move the pointer in a specific direction
()AV - move the value of the current cell in the shown direction, while current cell's value becomes 0
('/)'/A'/V' - copies the current value in the shown direction
(./)./A./V. - ()AV but moves the value 2 tiles
(,/),/A,/V, - ('/)'/A'/V' but copies the value 2 tiles
+ - adds one to the cell
- - subtracts one from the cell
f - if current value is 0 it gets turned into 1 and gets saved below; if current value isnt 0 its becomes 0 and gets saved below
~ - ends the program
m - moves in a random direction horizontally/vertically
M - moves in a random direction diagonally
m'/M' - moves in any direction horizontally/vertically/diagonally
: - copies current cell's value
; - pastes copied value while keeping it "copied" (if you didnt copy anything, the value is 0)
+. - adds 1 to the copied value
-. - subtracts 1 from the copied value
x - set copied value to 0
F - f but for the copied value
_ - sets current cells value to 0
? - sets current cells value to a random ascii character value (between 33 and 126 inclusive)
[ ] - if current cell's value isnt 0, does whats inside the brackets, otherwise skips them
[' ] - if current cell's value IS 0, does whats inside the brackets, otherwise skips them
[. ] - if copied value isnt 0, does whats inside the brackets, otherwise skips them
[, ] - if copied value IS 0, does whats inside the brackets, otherwise skips them
{ } - repeats whats inside the brackets n times where n is the current cell's value
{' } - same as above but sets current value to 0 before repeating
{. } - repeats whats inside the brackets n times where n is the currently copied value and doesnt delete it (see ":" and ";")
{, } - same as above but sets copied value to 0
(1 IS LIKE LEFT BRACKET 2 IS RIGHT BRACKET) im sorry
1z 2 - loops stuff inside the brackets until "finds" a cell whose value is zero
1Z 2 - loops stuff inside the brackets until CURRENT cell's value is 0
n - gets users input which is a number and stores it in the current cell
N - same as above but stores the ascii value of the number
i - same as n but can also get strings as input - (numbers saved as themselves, symbols as ascii)
I - stores all characters as ascii
o - outputs the current cells value with a new line [opposite of n]
O - o without newline
o'/O' - o without newline but with space
q - outputs the ascii character with the cells value with a new line [opposite of I]
Q - q without newline
q'/Q' - q without newline but with space
c - outputs numbers 0-9 as themselves, everything else as ascii with a new line [opposite of i]
C - c without newline
c'/C' - c without newline but with space
= - [eq] if current value is equal to value above then value below is set to 1, otherwise its 0
=' - [not eq] if current value is NOT equal to value above then value below is set to 1, otherwise its 0
>' - [greater] if current value is greater than value above then value below is 1, otherwise is 0
>. - [greater or eq] if current value is greater or equals the value above then value below is 1, otherwise 0
<' - [less] if current value is less than value above then value below is 1 otherwise 0
<. - [less or eq] if current value is less or equals value above then value below is 1 otherwise 0
& - [and] if current value and value above arent 0, value below becomes 1, otherwise 0
&' - [nor] if current value and value above ARE both 0, value below gets set to 1, otherwise it gets set to 0
| - [or] if current value OR value above arent zero, value below is 1, otherwise 0
|' - [xor] same as above but there should but its only 1-0 or 0-1 (it cant be 1-1)
* - multiplies current cell's value by the value of the cell above and saves it into cell below
/ - divides current value by value of cell above and saves into cell below
-' - subtracts cell above from current cell and saves below
+' - adds current cell to cell above and saves below
^' - raises current cell to the power of cell above and saves below
s - squares current cell and save below
t - raises current cell to the power of itself and saves below
r - finds the nth root of the current cell, where n is the cell above and saves below
R - finds square root of current cell and saves below
% - finds modulo between current cell and cell above and saves below
! - calculates factorial of current cell and saves below
'''
''' cancelled and not coded (probably)
[CANCELLED] r - flips the current nonzero row (09504 -> 04059)
[CANCELLED] R - flips current column ^^^^ ^^^^
-- SPECIAL COMMANDS --
[ALL CANCELLED LOL!!!]
[] - moves the pointer, {} - doesnt
[zDIRECTIONS] - repeats the DIRECTIONS until the current cell value is 0 (EXAMPLE: [z>], [z)], [mz>?A])
[n , N , i , I , o , ODIRECTIONS] - same as n, N, i, I, o, O but inputs/outputs multiple symbols following DIRECTIONS (but what about 3505 - itll output 35)(EXAMPLE:
000
120
if pointer is at 1: [o>] outputs 12, pointer is at 2; [i>] replaces 1,2,0 etc with inputted stuff)
{same as above} - same as above but pointer stays in place
[dDIRECTIONS] - duplicates current cell's value into the cell in DIRECTIONS
{above}
[DDIRECTIONS] - duplicates in DIRECTIONS until value of cell is 0
{above}
-- MAYBE WILL ADD --
[*DIRECTIONS] - multiplies current cell by the cell that you get to if you do DIRECTIONS
[+ , - , / , %DIRECTIONS] - same as above but addition, subtraction, division and modulo
| |
models.Action.action.in_(consts.CLUSTER_SCALE_ACTIONS))
scaling_actions = query.all()
return scaling_actions
def action_get_by_name(context, name, project_safe=True):
return query_by_name(context, action_model_query, name,
project_safe=project_safe)
def action_get_by_short_id(context, short_id, project_safe=True):
return query_by_short_id(context, action_model_query, models.Action,
short_id, project_safe=project_safe)
def action_get_all_by_owner(context, owner_id):
query = action_model_query().filter_by(owner=owner_id)
return query.all()
def action_get_all_active_by_target(context, target_id, project_safe=True):
query = action_model_query()
if project_safe:
query = query.filter_by(project=context.project_id)
query = query.filter_by(target=target_id)
query = query.filter(
models.Action.status.in_(
[consts.ACTION_READY,
consts.ACTION_WAITING,
consts.ACTION_RUNNING,
consts.ACTION_WAITING_LIFECYCLE_COMPLETION]))
actions = query.all()
return actions
def action_get_all(context, filters=None, limit=None, marker=None, sort=None,
project_safe=True):
query = action_model_query()
if project_safe:
query = query.filter_by(project=context.project_id)
if filters:
query = utils.exact_filter(query, models.Action, filters)
keys, dirs = utils.get_sort_params(sort, consts.ACTION_CREATED_AT)
if marker:
marker = action_model_query().get(marker)
return sa_utils.paginate_query(query, models.Action, limit, keys,
marker=marker, sort_dirs=dirs).all()
@retry_on_deadlock
def action_check_status(context, action_id, timestamp):
with session_for_write() as session:
q = session.query(models.ActionDependency)
count = q.filter_by(dependent=action_id).count()
if count > 0:
return consts.ACTION_WAITING
action = session.query(models.Action).get(action_id)
if action.status == consts.ACTION_WAITING:
action.status = consts.ACTION_READY
action.status_reason = 'All depended actions completed.'
action.end_time = timestamp
action.save(session)
return action.status
def action_dependency_model_query():
with session_for_read() as session:
query = session.query(models.ActionDependency)
return query
@retry_on_deadlock
def dependency_get_depended(context, action_id):
q = action_dependency_model_query().filter_by(
dependent=action_id)
return [d.depended for d in q.all()]
@retry_on_deadlock
def dependency_get_dependents(context, action_id):
q = action_dependency_model_query().filter_by(
depended=action_id)
return [d.dependent for d in q.all()]
@retry_on_deadlock
def dependency_add(context, depended, dependent):
if isinstance(depended, list) and isinstance(dependent, list):
raise exception.Error(
'Multiple dependencies between lists not support')
with session_for_write() as session:
if isinstance(depended, list): # e.g. D depends on A,B,C
for d in depended:
r = models.ActionDependency(depended=d, dependent=dependent)
session.add(r)
query = session.query(models.Action).with_for_update()
query = query.filter_by(id=dependent)
query.update({'status': consts.ACTION_WAITING,
'status_reason': 'Waiting for depended actions.'},
synchronize_session='fetch')
return
# Only dependent can be a list now, convert it to a list if it
# is not a list
if not isinstance(dependent, list): # e.g. B,C,D depend on A
dependents = [dependent]
else:
dependents = dependent
for d in dependents:
r = models.ActionDependency(depended=depended, dependent=d)
session.add(r)
q = session.query(models.Action).with_for_update()
q = q.filter(models.Action.id.in_(dependents))
q.update({'status': consts.ACTION_WAITING,
'status_reason': 'Waiting for depended actions.'},
synchronize_session='fetch')
@retry_on_deadlock
def action_mark_succeeded(context, action_id, timestamp):
with session_for_write() as session:
query = session.query(models.Action).filter_by(id=action_id)
values = {
'owner': None,
'status': consts.ACTION_SUCCEEDED,
'status_reason': 'Action completed successfully.',
'end_time': timestamp,
}
query.update(values, synchronize_session=False)
subquery = session.query(models.ActionDependency).filter_by(
depended=action_id)
subquery.delete(synchronize_session='fetch')
@retry_on_deadlock
def action_mark_ready(context, action_id, timestamp):
with session_for_write() as session:
query = session.query(models.Action).filter_by(id=action_id)
values = {
'owner': None,
'status': consts.ACTION_READY,
'status_reason': 'Lifecycle timeout.',
'end_time': timestamp,
}
query.update(values, synchronize_session=False)
@retry_on_deadlock
def _mark_failed(action_id, timestamp, reason=None):
# mark myself as failed
with session_for_write() as session:
query = session.query(models.Action).filter_by(id=action_id)
values = {
'owner': None,
'status': consts.ACTION_FAILED,
'status_reason': (six.text_type(reason) if reason else
'Action execution failed'),
'end_time': timestamp,
}
query.update(values, synchronize_session=False)
action = query.all()
query = session.query(models.ActionDependency)
query = query.filter_by(depended=action_id)
dependents = [d.dependent for d in query.all()]
query.delete(synchronize_session=False)
if parent_status_update_needed(action):
for d in dependents:
_mark_failed(d, timestamp)
@retry_on_deadlock
def action_mark_failed(context, action_id, timestamp, reason=None):
_mark_failed(action_id, timestamp, reason)
@retry_on_deadlock
def _mark_cancelled(session, action_id, timestamp, reason=None):
query = session.query(models.Action).filter_by(id=action_id)
values = {
'owner': None,
'status': consts.ACTION_CANCELLED,
'status_reason': (six.text_type(reason) if reason else
'Action execution cancelled'),
'end_time': timestamp,
}
query.update(values, synchronize_session=False)
action = query.all()
query = session.query(models.ActionDependency)
query = query.filter_by(depended=action_id)
dependents = [d.dependent for d in query.all()]
query.delete(synchronize_session=False)
if parent_status_update_needed(action):
for d in dependents:
_mark_cancelled(session, d, timestamp)
@retry_on_deadlock
def action_mark_cancelled(context, action_id, timestamp, reason=None):
with session_for_write() as session:
_mark_cancelled(session, action_id, timestamp, reason)
@retry_on_deadlock
def action_acquire(context, action_id, owner, timestamp):
with session_for_write() as session:
action = session.query(models.Action).with_for_update().get(action_id)
if not action:
return None
if action.owner and action.owner != owner:
return None
if action.status != consts.ACTION_READY:
return None
action.owner = owner
action.start_time = timestamp
action.status = consts.ACTION_RUNNING
action.status_reason = 'The action is being processed.'
action.save(session)
return action
@retry_on_deadlock
def action_acquire_random_ready(context, owner, timestamp):
with session_for_write() as session:
action = (session.query(models.Action).
filter_by(status=consts.ACTION_READY).
filter_by(owner=None).
order_by(func.random()).
with_for_update().first())
if action:
action.owner = owner
action.start_time = timestamp
action.status = consts.ACTION_RUNNING
action.status_reason = 'The action is being processed.'
action.save(session)
return action
@retry_on_deadlock
def action_acquire_first_ready(context, owner, timestamp):
with session_for_write() as session:
action = session.query(models.Action).filter_by(
status=consts.ACTION_READY).filter_by(
owner=None).order_by(
consts.ACTION_CREATED_AT or func.random()).first()
if action:
return action_acquire(context, action.id, owner, timestamp)
@retry_on_deadlock
def action_abandon(context, action_id, values=None):
"""Abandon an action for other workers to execute again.
This API is always called with the action locked by the current
worker. There is no chance the action is gone or stolen by others.
"""
with session_for_write() as session:
action = session.query(models.Action).get(action_id)
action.owner = None
action.start_time = None
action.status = consts.ACTION_READY
action.status_reason = 'The action was abandoned.'
if values:
action.update(values)
action.save(session)
return action
@retry_on_deadlock
def action_lock_check(context, action_id, owner=None):
action = action_model_query().get(action_id)
if not action:
raise exception.ResourceNotFound(type='action', id=action_id)
if owner:
return owner if owner == action.owner else action.owner
else:
return action.owner if action.owner else None
@retry_on_deadlock
def action_signal(context, action_id, value):
with session_for_write() as session:
action = session.query(models.Action).get(action_id)
if not action:
return
action.control = value
action.save(session)
def action_signal_query(context, action_id):
action = action_model_query().get(action_id)
if not action:
return None
return action.control
@retry_on_deadlock
def action_delete(context, action_id):
with session_for_write() as session:
action = session.query(models.Action).get(action_id)
if not action:
return
if ((action.status == consts.ACTION_WAITING) or
(action.status == consts.ACTION_RUNNING) or
(action.status == consts.ACTION_SUSPENDED)):
raise exception.EResourceBusy(type='action', id=action_id)
session.delete(action)
@retry_on_deadlock
def action_delete_by_target(context, target, action=None,
action_excluded=None, status=None,
project_safe=True):
if action and action_excluded:
LOG.warning("action and action_excluded cannot be both specified.")
return None
with session_for_write() as session:
q = session.query(models.Action).filter_by(target=target)
if project_safe:
q = q.filter_by(project=context.project_id)
if action:
q = q.filter(models.Action.action.in_(action))
if action_excluded:
q = q.filter(~models.Action.action.in_(action_excluded))
if status:
q = q.filter(models.Action.status.in_(status))
return q.delete(synchronize_session='fetch')
@retry_on_deadlock
def action_purge(project, granularity='days', age=30):
with session_for_write() as session:
query = session.query(models.Action).with_for_update()
if project is not None:
query = query.filter(models.Action.project.in_(project))
if granularity is not None and age is not None:
if granularity == 'days':
age = age * 86400
elif granularity == 'hours':
age = age * 3600
elif granularity == 'minutes':
age = age * 60
time_line = timeutils.utcnow() - datetime.timedelta(seconds=age)
query = query.filter(models.Action.created_at < time_line)
# Get dependants to delete
for d in query.all():
q = session.query(models.ActionDependency).filter_by(depended=d.id)
q.delete(synchronize_session='fetch')
return query.delete(synchronize_session='fetch')
# Receivers
def receiver_model_query():
with session_for_read() as session:
query = session.query(models.Receiver)
return query
@retry_on_deadlock
def receiver_create(context, values):
with session_for_write() as session:
receiver = models.Receiver()
receiver.update(values)
session.add(receiver)
return receiver
def receiver_get(context, receiver_id, project_safe=True):
receiver = receiver_model_query().get(receiver_id)
if not receiver:
return None
if project_safe:
if context.project_id != receiver.project:
return None
return receiver
def receiver_get_all(context, limit=None, marker=None, filters=None, sort=None,
project_safe=True):
query = receiver_model_query()
if project_safe:
query = query.filter_by(project=context.project_id)
if filters:
query = utils.exact_filter(query, models.Receiver, filters)
keys, dirs = utils.get_sort_params(sort, consts.RECEIVER_NAME)
if marker:
marker = receiver_model_query().get(marker)
return sa_utils.paginate_query(query, models.Receiver, limit, keys,
marker=marker, sort_dirs=dirs).all()
def receiver_get_by_name(context, name, project_safe=True):
return query_by_name(context, receiver_model_query, name,
project_safe=project_safe)
def receiver_get_by_short_id(context, short_id, project_safe=True):
return query_by_short_id(context, receiver_model_query, models.Receiver,
short_id, project_safe=project_safe)
@retry_on_deadlock
def receiver_delete(context, receiver_id):
with session_for_write() as session:
receiver = session.query(models.Receiver).get(receiver_id)
if not receiver:
return
session.delete(receiver)
@retry_on_deadlock
def receiver_update(context, receiver_id, values):
with session_for_write() as session:
receiver = session.query(models.Receiver).get(receiver_id)
if not receiver:
raise exception.ResourceNotFound(type='receiver', id=receiver_id)
receiver.update(values)
receiver.save(session)
return receiver
@retry_on_deadlock
def service_create(service_id, host=None, binary=None, topic=None):
with session_for_write() as session:
time_now = timeutils.utcnow(True)
svc = models.Service(id=service_id, host=host, binary=binary,
topic=topic, created_at=time_now,
updated_at=time_now)
session.add(svc)
return svc
@retry_on_deadlock
def service_update(service_id, values=None):
with session_for_write() as session:
service = session.query(models.Service).get(service_id)
if not service:
return
if values is None:
values = {}
values.update({'updated_at': timeutils.utcnow(True)})
service.update(values)
service.save(session)
return service
@retry_on_deadlock
def service_delete(service_id):
with session_for_write() as session:
session.query(models.Service).filter_by(
id=service_id).delete(synchronize_session='fetch')
def service_get(service_id):
with session_for_read() as session:
return session.query(models.Service).get(service_id)
def service_get_all():
with session_for_read() as session:
return session.query(models.Service).all()
@retry_on_deadlock
def _mark_engine_failed(session, action_id, timestamp, reason=None):
query = session.query(models.ActionDependency)
# process cluster actions
d_query = query.filter_by(dependent=action_id)
dependents = [d.depended for d in d_query.all()]
if dependents:
for d in dependents:
_mark_engine_failed(session, d, timestamp, reason)
else:
depended = query.filter_by(depended=action_id)
depended.delete(synchronize_session=False)
# TODO(anyone): this will mark all depended actions' status to 'FAILED'
# even the action belong to other engines and the action is running
# mark myself as failed
action = session.query(models.Action).filter_by(id=action_id).first()
values = {
'owner': None,
'status': consts.ACTION_FAILED,
'status_reason': (six.text_type(reason) if reason else
'Action execution failed'),
'end_time': timestamp,
}
action.update(values)
action.save(session)
@retry_on_deadlock
def dummy_gc(engine_id):
with session_for_write() as session:
q_actions = session.query(models.Action).filter_by(owner=engine_id)
timestamp = time.time()
for action in q_actions.all():
_mark_engine_failed(session, action.id, timestamp,
reason='Engine failure')
# Release all node locks
query = (session.query(models.NodeLock).
filter_by(action_id=action.id))
query.delete(synchronize_session=False)
# Release all cluster locks
for clock in session.query(models.ClusterLock).all():
res = _release_cluster_lock(session, clock, action.id, -1)
if not res:
_release_cluster_lock(session, clock, action.id, 1)
@retry_on_deadlock
def gc_by_engine(engine_id):
# Get all actions locked by an engine
with session_for_write() as session:
q_actions = session.query(models.Action).filter_by(owner=engine_id)
timestamp = time.time()
for a in q_actions.all():
# Release all node locks
query = session.query(models.NodeLock).filter_by(action_id=a.id)
query.delete(synchronize_session=False)
# Release all cluster locks
for cl in session.query(models.ClusterLock).all():
res = _release_cluster_lock(session, cl, a.id, -1)
if not res:
_release_cluster_lock(session, cl, a.id, 1)
# mark action failed and release lock
_mark_failed(a.id, timestamp, reason="Engine failure")
# HealthRegistry
def health_registry_model_query():
with session_for_read() as session:
query = session.query(models.HealthRegistry)
return query
@retry_on_deadlock
def registry_create(context, | |
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from git_machete import __version__
import datetime
import getopt
import io
import itertools
import os
import re
import shutil
import subprocess
import sys
import textwrap
# Core utils
class MacheteException(Exception):
def __init__(self, msg, apply_fmt=True):
self.parameter = fmt(msg) if apply_fmt else msg
def __str__(self):
return str(self.parameter)
def excluding(iterable, s):
return list(filter(lambda x: x not in s, iterable))
def flat_map(func, iterable):
return sum(map(func, iterable), [])
def map_truthy_only(func, iterable):
return list(filter(None, map(func, iterable)))
def non_empty_lines(s):
return list(filter(None, s.split("\n")))
# Converts a lambda accepting N arguments to a lambda accepting one argument, an N-element tuple.
# Name matching Scala's `tupled` on `FunctionX`.
def tupled(f):
return lambda tple: f(*tple)
ENDC = '\033[0m'
BOLD = '\033[1m'
# `GIT_MACHETE_DIM_AS_GRAY` remains undocumented as for now,
# was just needed for animated gifs to render correctly (`[2m`-style dimmed text was invisible)
DIM = '\033[38;2;128;128;128m' if os.environ.get('GIT_MACHETE_DIM_AS_GRAY') == 'true' else '\033[2m'
UNDERLINE = '\033[4m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
ORANGE = '\033[00;38;5;208m'
RED = '\033[91m'
ascii_only = False
def bold(s):
return s if ascii_only or not s else BOLD + s + ENDC
def dim(s):
return s if ascii_only or not s else DIM + s + ENDC
def underline(s, star_if_ascii_only=False):
if s and not ascii_only:
return UNDERLINE + s + ENDC
elif s and star_if_ascii_only:
return s + " *"
else:
return s
def colored(s, color):
return s if ascii_only or not s else color + s + ENDC
fmt_transformations = [
lambda x: re.sub('<b>(.*?)</b>', bold(r"\1"), x, flags=re.DOTALL),
lambda x: re.sub('<u>(.*?)</u>', underline(r"\1"), x, flags=re.DOTALL),
lambda x: re.sub('<dim>(.*?)</dim>', dim(r"\1"), x, flags=re.DOTALL),
lambda x: re.sub('<red>(.*?)</red>', colored(r"\1", RED), x, flags=re.DOTALL),
lambda x: re.sub('<yellow>(.*?)</yellow>', colored(r"\1", YELLOW), x, flags=re.DOTALL),
lambda x: re.sub('<green>(.*?)</green>', colored(r"\1", GREEN), x, flags=re.DOTALL),
lambda x: re.sub('`(.*?)`', r"`\1`" if ascii_only else UNDERLINE + r"\1" + ENDC, x),
]
def fmt(*parts):
result = ''.join(parts)
for f in fmt_transformations:
result = f(result)
return result
def vertical_bar():
return "|" if ascii_only else u"│"
def right_arrow():
return "->" if ascii_only else u"➔"
def safe_input(msg):
if sys.version_info[0] == 2: # Python 2
return raw_input(msg) # noqa: F821
else: # Python 3
return input(msg)
def ask_if(msg, opt_yes_msg, apply_fmt=True):
if opt_yes and opt_yes_msg:
print(fmt(opt_yes_msg) if apply_fmt else opt_yes_msg)
return 'y'
return safe_input(fmt(msg) if apply_fmt else msg).lower()
def pretty_choices(*choices):
def format_choice(c):
if not c:
return ''
elif c.lower() == 'y':
return colored(c, GREEN)
elif c.lower() == 'yq':
return colored(c[0], GREEN) + colored(c[1], RED)
elif c.lower() in ('n', 'q'):
return colored(c, RED)
else:
return colored(c, ORANGE)
return " (" + ", ".join(map_truthy_only(format_choice, choices)) + ") "
def pick(choices, name, apply_fmt=True):
xs = "".join("[%i] %s\n" % (idx + 1, x) for idx, x in enumerate(choices))
msg = xs + "Specify " + name + " or hit <return> to skip: "
try:
ans = safe_input(fmt(msg) if apply_fmt else msg)
if not ans:
sys.exit(0)
idx = int(ans) - 1
except ValueError:
sys.exit(1)
if idx not in range(len(choices)):
raise MacheteException("Invalid index: %i" % (idx + 1))
return choices[idx]
def debug(hdr, msg):
if opt_debug:
sys.stderr.write("%s: %s\n" % (bold(hdr), dim(msg)))
# To avoid displaying the same warning multiple times during a single run.
displayed_warnings = set()
def warn(msg, apply_fmt=True):
global displayed_warnings
if msg not in displayed_warnings:
sys.stderr.write(colored("Warn: ", RED) + (fmt(msg) if apply_fmt else msg) + "\n")
displayed_warnings.add(msg)
def directory_exists(path):
try:
# Note that os.path.isdir itself (without os.path.abspath) isn't reliable
# since it returns a false positive (True) for the current directory when if it doesn't exist
return os.path.isdir(os.path.abspath(path))
except OSError:
return False
def current_directory_or_none():
try:
return os.getcwd()
except OSError:
# This happens when current directory does not exist (typically: has been deleted)
return None
# Let's keep the flag to avoid checking for current directory's existence
# every time any command is being popened or run.
current_directory_confirmed_to_exist = False
initial_current_directory = current_directory_or_none() or os.getenv('PWD')
def mark_current_directory_as_possibly_non_existent():
global current_directory_confirmed_to_exist
current_directory_confirmed_to_exist = False
def chdir_upwards_until_current_directory_exists():
global current_directory_confirmed_to_exist
if not current_directory_confirmed_to_exist:
current_directory = current_directory_or_none()
if not current_directory:
while not current_directory:
# Note: 'os.chdir' only affects the current process and its subprocesses;
# it doesn't propagate to the parent process (which is typically a shell).
os.chdir(os.path.pardir)
current_directory = current_directory_or_none()
debug("chdir_upwards_until_current_directory_exists()",
"current directory did not exist, chdired up into %s" % current_directory)
current_directory_confirmed_to_exist = True
def run_cmd(cmd, *args, **kwargs):
chdir_upwards_until_current_directory_exists()
flat_cmd = cmd_shell_repr(cmd, *args)
if opt_debug:
sys.stderr.write(bold(">>> " + flat_cmd) + "\n")
elif opt_verbose:
sys.stderr.write(flat_cmd + "\n")
exit_code = subprocess.call([cmd] + list(args), **kwargs)
# Let's defensively assume that every command executed via run_cmd
# (but not via popen_cmd) can make the current directory disappear.
# In practice, it's mostly 'git checkout' that carries such risk.
mark_current_directory_as_possibly_non_existent()
if opt_debug and exit_code != 0:
sys.stderr.write(dim("<exit code: %i>\n\n" % exit_code))
return exit_code
def popen_cmd(cmd, *args, **kwargs):
chdir_upwards_until_current_directory_exists()
flat_cmd = cmd_shell_repr(cmd, *args)
if opt_debug:
sys.stderr.write(bold(">>> " + flat_cmd) + "\n")
elif opt_verbose:
sys.stderr.write(flat_cmd + "\n")
process = subprocess.Popen([cmd] + list(args), stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
stdout_bytes, stderr_bytes = process.communicate()
stdout, stderr = stdout_bytes.decode('utf-8'), stderr_bytes.decode('utf-8')
exit_code = process.returncode
if opt_debug:
if exit_code != 0:
sys.stderr.write(colored("<exit code: %i>\n\n" % exit_code, RED))
if stdout:
sys.stderr.write("%s\n%s\n" % (dim("<stdout>:"), dim(stdout)))
if stderr:
sys.stderr.write("%s\n%s\n" % (dim("<stderr>:"), colored(stderr, RED)))
return exit_code, stdout, stderr
# Git core
def cmd_shell_repr(cmd, *args):
def shell_escape(arg):
return arg.replace("(", "\\(") \
.replace(")", "\\)") \
.replace(" ", "\\ ") \
.replace("\t", "$'\\t'")
return " ".join([cmd] + list(map(shell_escape, args)))
def run_git(git_cmd, *args, **kwargs):
exit_code = run_cmd("git", git_cmd, *args)
if not kwargs.get("allow_non_zero") and exit_code != 0:
raise MacheteException("`%s` returned %i" % (cmd_shell_repr("git", git_cmd, *args), exit_code))
return exit_code
def popen_git(git_cmd, *args, **kwargs):
exit_code, stdout, stderr = popen_cmd("git", git_cmd, *args)
if not kwargs.get("allow_non_zero") and exit_code != 0:
exit_code_msg = fmt("`%s` returned %i\n" % (cmd_shell_repr("git", git_cmd, *args), exit_code))
stdout_msg = "\n%s:\n%s" % (bold("stdout"), dim(stdout)) if stdout else ""
stderr_msg = "\n%s:\n%s" % (bold("stderr"), dim(stderr)) if stderr else ""
# Not applying the formatter to avoid transforming whatever characters might be in the output of the command.
raise MacheteException(exit_code_msg + stdout_msg + stderr_msg, apply_fmt=False)
return stdout
# Manipulation on definition file/tree of branches
def expect_in_managed_branches(b):
if b not in managed_branches:
raise MacheteException("Branch `%s` not found in the tree of branch dependencies. "
"Use `git machete add %s` or `git machete edit`" % (b, b))
def expect_at_least_one_managed_branch():
if not roots:
raise_no_branches_error()
def raise_no_branches_error():
raise MacheteException(
"No branches listed in %s; use `git machete discover` or `git machete edit`, or edit %s manually." % (
definition_file_path, definition_file_path))
def read_definition_file(verify_branches=True):
global indent, managed_branches, down_branches, up_branch, roots, annotations
with open(definition_file_path) as f:
lines = [line.rstrip() for line in f.readlines() if not line.isspace()]
managed_branches = []
down_branches = {}
up_branch = {}
indent = None
roots = []
annotations = {}
at_depth = {}
last_depth = -1
hint = "Edit the definition file manually with `git machete edit`"
invalid_branches = []
for idx, l in enumerate(lines):
pfx = "".join(itertools.takewhile(str.isspace, l))
if pfx and not indent:
indent = pfx
b_a = l.strip().split(" ", 1)
b = b_a[0]
if len(b_a) > 1:
annotations[b] = b_a[1]
if b in managed_branches:
raise MacheteException("%s, line %i: branch `%s` re-appears in the tree definition. %s" %
(definition_file_path, idx + 1, b, hint))
if verify_branches and b not in local_branches():
invalid_branches += [b]
managed_branches += [b]
if pfx:
depth = len(pfx) // len(indent)
if pfx != indent * depth:
mapping = {" ": "<SPACE>", "\t": "<TAB>"}
pfx_expanded = "".join(mapping[c] for c in pfx)
indent_expanded = "".join(mapping[c] for c in indent)
raise MacheteException("%s, line %i: invalid indent `%s`, expected a multiply of `%s`. %s" %
(definition_file_path, idx + 1, pfx_expanded, indent_expanded, hint))
else:
depth = 0
if depth > last_depth + 1:
raise MacheteException("%s, line %i: too much indent (level %s, expected at most %s) for the branch `%s`. %s" %
(definition_file_path, idx + 1, depth, last_depth + 1, b, hint))
last_depth = depth
at_depth[depth] = b
if depth:
p = at_depth[depth - 1]
up_branch[b] = p
if p in down_branches:
down_branches[p] += [b]
else:
down_branches[p] = [b]
else:
roots += [b]
if not invalid_branches:
return
if len(invalid_branches) == 1:
ans = ask_if("Skipping `" + invalid_branches[0] +
"` which is not a local branch (perhaps it has been deleted?).\n" +
"Slide it out from the definition file?" +
pretty_choices("y", "e[dit]", "N"), opt_yes_msg=None)
else:
ans = ask_if("Skipping " + ", ".join("`" + b + "`" for b in invalid_branches) +
" which are not local branches (perhaps they have been deleted?).\n" | |
= {} # type: Dict[int, Any]
self.MATS3 = {} # type: Dict[int, Any]
self.MATS8 = {} # type: Dict[int, Any]
#: stores MATTx
self.MATT1 = {} # type: Dict[int, Any]
self.MATT2 = {} # type: Dict[int, Any]
self.MATT3 = {} # type: Dict[int, Any]
self.MATT4 = {} # type: Dict[int, Any]
self.MATT5 = {} # type: Dict[int, Any]
self.MATT8 = {} # type: Dict[int, Any]
self.MATT9 = {} # type: Dict[int, Any]
self.nxstrats = {} # type: Dict[int, Any]
#: stores the CREEP card
self.creep_materials = {} # type: Dict[int, Any]
self.tics = {} # type: Optional[Any]
# stores DLOAD entries.
self.dloads = {} # type: Dict[int, Any]
# stores ACSRCE, RLOAD1, RLOAD2, TLOAD1, TLOAD2, and ACSRCE,
# and QVECT entries.
self.dload_entries = {} # type: Dict[int, Any]
#self.gusts = {} # Case Control GUST = 100
#self.random = {} # Case Control RANDOM = 100
#: stores coordinate systems
origin = array([0., 0., 0.])
zaxis = array([0., 0., 1.])
xzplane = array([1., 0., 0.])
coord = CORD2R(cid=0, rid=0, origin=origin, zaxis=zaxis, xzplane=xzplane)
self.coords = {0 : coord} # type: Dict[int, Any]
# --------------------------- constraints ----------------------------
#: stores SUPORT1s
#self.constraints = {} # suport1, anything else???
self.suport = [] # type: List[Any]
self.suport1 = {} # type: Dict[int, Any]
self.se_suport = [] # type: List[Any]
#: stores SPC, SPC1, SPCAX, GMSPC
self.spcs = {} # type: Dict[int, List[Any]]
#: stores SPCADD
self.spcadds = {} # type: Dict[int, List[Any]]
self.spcoffs = {} # type: Dict[int, List[Any]]
self.mpcs = {} # type: Dict[int, List[Any]]
self.mpcadds = {} # type: Dict[int, List[Any]]
# --------------------------- dynamic ----------------------------
#: stores DAREA
self.dareas = {} # type: Dict[int, Any]
self.dphases = {} # type: Dict[int, Any]
self.pbusht = {} # type: Dict[int, Any]
self.pdampt = {} # type: Dict[int, Any]
self.pelast = {} # type: Dict[int, Any]
#: frequencies
self.frequencies = {} # type: Dict[int, List[Any]]
# ----------------------------------------------------------------
#: direct matrix input - DMIG
self.dmi = {} # type: Dict[str, Any]
self.dmig = {} # type: Dict[str, Any]
self.dmij = {} # type: Dict[str, Any]
self.dmiji = {} # type: Dict[str, Any]
self.dmik = {} # type: Dict[str, Any]
self.dmiax = {} # type: Dict[str, Any]
self.dti = {} # type: Dict[str, Any]
self._dmig_temp = defaultdict(list) # type: Dict[str, List[str]]
# ----------------------------------------------------------------
#: SETy
self.sets = {} # type: Dict[int, Any]
self.asets = [] # type: List[Any]
self.omits = [] # type: List[Any]
self.bsets = [] # type: List[Any]
self.csets = [] # type: List[Any]
self.qsets = [] # type: List[Any]
self.usets = {} # type: Dict[str, Any]
#: SExSETy
self.se_bsets = [] # type: List[Any]
self.se_csets = [] # type: List[Any]
self.se_qsets = [] # type: List[Any]
self.se_usets = {} # type: Dict[str, Any]
self.se_sets = {} # type: Dict[str, Any]
# ----------------------------------------------------------------
#: parametric
self.pset = {}
self.pval = {}
self.gmcurv = {}
self.gmsurf = {}
self.feedge = {}
self.feface = {}
# ----------------------------------------------------------------
#: tables
# TABLES1, ...
self.tables = {} # type: Dict[int, TABLES1]
# TABLEDx
self.tables_d = {} # type: Dict[int, Union[TABLED1, TABLED2, TABLED3, TABLED4]]
# TABLEMx
self.tables_m = {} # type: Dict[int, Union[TABLEM1, TABLEM2, TABLEM3, TABLEM4]]
#: random_tables
self.random_tables = {} # type: Dict[int, Any]
#: TABDMP1
self.tables_sdamping = {} # type: Dict[int, TABDMP1]
# ----------------------------------------------------------------
#: EIGB, EIGR, EIGRL methods
self.methods = {} # type: Dict[int, Union[EIGR, EIGRL, EIGB]]
# EIGC, EIGP methods
self.cMethods = {} # type: Dict[int, Union[EIGC, EIGP]]
# ---------------------------- optimization --------------------------
# optimization
self.dconadds = {} # type: Dict[int, DCONADD]
self.dconstrs = {} # type: Dict[int, DCONSTR]
self.desvars = {} # type: Dict[int, DESVAR]
self.topvar = {} # type: Dict[int, TOPVAR]
self.ddvals = {} # type: Dict[int, DDVAL]
self.dlinks = {} # type: Dict[int, DLINK]
self.dresps = {} # type: Dict[int, Union[DRESP1, DRESP2, DRESP3]]
self.dtable = None # type: Optional[DTABLE]
self.dequations = {} # type: Dict[int, DEQATN]
#: stores DVPREL1, DVPREL2...might change to DVxRel
self.dvprels = {} # type: Dict[int, Union[DVPREL1, DVPREL2]]
self.dvmrels = {} # type: Dict[int, Union[DVMREL1, DVMREL2]]
self.dvcrels = {} # type: Dict[int, Union[DVCREL1, DVCREL2]]
self.dvgrids = {} # type: Dict[int, DVGRID]
self.doptprm = None # type: Optional[DOPTPRM]
self.dscreen = {} # type: Dict[int, DSCREEN]
# ------------------------- nonlinear defaults -----------------------
#: stores NLPCI
self.nlpcis = {} # type: Dict[int, NLPCI]
#: stores NLPARM
self.nlparms = {} # type: Dict[int, NLPARM]
#: stores TSTEPs, TSTEP1s
self.tsteps = {} # type: Dict[int, Union[TSTEP, TSTEP1]]
#: stores TSTEPNL
self.tstepnls = {} # type: Dict[int, TSTEPNL]
#: stores TF
self.transfer_functions = {} # type: Dict[int, TF]
#: stores DELAY
self.delays = {} # type: Dict[int, DELAY]
#: stores ROTORD, ROTORG
self.rotors = {} # type: Dict[int, Union[ROTORD, ROTORG]]
# --------------------------- aero defaults --------------------------
# aero cards
#: stores CAEROx
self.caeros = {} # type: Dict[int, Union[CAERO1, CAERO2, CAERO3, CAERO4, CAERO5]]
#: stores PAEROx
self.paeros = {} # type: Dict[int, Union[PAERO1, PAERO2, PAERO3, PAERO4, PAERO5]]
# stores MONPNT1
self.monitor_points = [] # type: List[Union[MONPNT1, MONPNT2, MONPNT3]]
#: stores AECOMP
self.aecomps = {} # type: Dict[int, AECOMP]
#: stores AEFACT
self.aefacts = {} # type: Dict[int, AEFACT]
#: stores AELINK
self.aelinks = {} # type: Dict[int, List[AELINK]]
#: stores AELIST
self.aelists = {} # type: Dict[int, AELIST]
#: stores AEPARAM
self.aeparams = {} # type: Dict[int, AEPARAM]
#: stores AESURF
self.aesurf = {} # type: Dict[int, AESURF]
#: stores AESURFS
self.aesurfs = {} # type: Dict[int, AESURFS]
#: stores AESTAT
self.aestats = {} # type: Dict[int, AESTAT]
#: stores CSSCHD
self.csschds = {} # type: Dict[int, CSSCHD]
#: store SPLINE1,SPLINE2,SPLINE4,SPLINE5
self.splines = {} # type: Dict[int, Union[SPLINE1, SPLINE2, SPLINE3, SPLINE4, SPLINE5]]
self.zona = ZONA(self)
# axisymmetric
self.axic = None # type: Optional[AXIC]
self.axif = None # type: Optional[AXIF]
self.ringfl = {} # type: Dict[int, RINGFL]
self._is_axis_symmetric = False
# cyclic
self.cyax = None # type: Optional[CYAX]
self.cyjoin = {} # type: Dict[int, CYJOIN]
self.modtrak = None # type: Optional[MODTRAK]
# acoustic
self.acmodl = None
# ------ SOL 144 ------
#: stores AEROS
self.aeros = None # type: Optional[AEROS]
#: stores TRIM, TRIM2
self.trims = {} # type: Dict[int, Union[TRIM, TRIM2]]
#: stores DIVERG
self.divergs = {} # type: Dict[int, DIVERG]
# ------ SOL 145 ------
#: stores AERO
self.aero = None # type: Optional[AERO]
#: stores FLFACT
self.flfacts = {} # type: Dict[int, FLFACT]
#: stores FLUTTER
self.flutters = {} # type: Dict[int, FLUTTER]
#: mkaeros
self.mkaeros = [] # type: List[Union[MKAERO1,MKAERO2]]
# ------ SOL 146 ------
#: stores GUST cards
self.gusts = {} # type: Dict[int, GUST]
# ------------------------- thermal defaults -------------------------
# BCs
#: stores thermal boundary conditions - CONV,RADBC
self.bcs = {} # type: Dict[int, Union[CONV, RADBC]]
#: stores PHBDY
self.phbdys = {} # type: Dict[int, PHBDY]
#: stores convection properties - PCONV, PCONVM ???
self.convection_properties = {} # type: Dict[int, Union[PCONV, PCONVM]]
#: stores TEMPD
self.tempds = {} # type: Dict[int, TEMPD]
#: stores VIEW
self.views = {} # type: Dict[int, VIEW]
#: stores VIEW3D
self.view3ds = {} # type: Dict[int, VIEW3D]
self.radset = None
self.radcavs = {} # type: Dict[int, RADCAV]
self.radmtx = {} # type: Dict[int, RADMTX]
# -------------------------contact cards-------------------------------
self.bcrparas = {} # type: Dict[int, BCRPARA]
self.bctadds = {} # type: Dict[int, BCTADD]
self.bctparas = {} # type: Dict[int, BCTPARA]
self.bctsets = {} # type: Dict[int, BCTSET]
self.bsurf = {} # type: Dict[int, BSURF]
self.bsurfs = {} # type: Dict[int, BSURFS]
self.bconp = {} # type: Dict[int, BCONP]
self.blseg = {} # type: Dict[int, BLSEG]
self.bfric = {} # type: Dict[int, BFRIC]
self.bgadds = {} # type: Dict[int, BGADD]
self.bgsets = {} # type: Dict[int, BGSET]
self.bctparms = {} # type: Dict[int, BCTPARAM]
#--------------------------superelements------------------------------
self.setree = {} # type: Dict[int, SETREE]
self.senqset = {} # type: Dict[int, Union[SENQSET, SENQSET1]]
self.sebulk = {} # type: Dict[int, SEBULK]
self.sebndry = {} # type: Dict[int, SEBNDRY]
self.release = {} # type: Dict[int, RELEASE]
self.seloc = | |
grid3 nodes because of
heavy melting of surface nodes (problematic in ablation area)
-> merge k batches of n1 grid3 nodes into k grid23 nodes (k is maximum nb of batches of n1 grid3 nodes available)
if nb of layers in grid2 is <k:
-> calculate the nb of supplementary grid2 layers required
-> calculate xx: the number of grid22 layers that must be split to provide the supplementary grid2 layers
-> merge xx batches of n2 grid23 layers into xx grid22 layer
-> divide xx grid22 layers into xx*n2 grid2 layers
-> divide k grid2 layer into n1 grid1 layer
5 grids:
grid1 -> high resolution determined by accumulation events
grid2 -> low resolution
grid22 -> very low resolution
grid23 -> low resolution
grid3 -> high resolution
gridtrack keeps track of which grid each layer is in
'''
n1 = self.c['nodestocombine'] # nodes to combine from grid3 to grid23 and to split from grid2 to grid1
n2 = self.c['multnodestocombine'] # nodes to combine from grid23 to grid22 and to split from grid22 to grid2
inds1 = np.where(self.gridtrack==1)[0] # all nodes in grid1
inds2 = np.where(self.gridtrack==2)[0] # all nodes in grid2
inds22 = np.where(self.gridtrack==22)[0] # all nodes in grid22
inds23 = np.where(self.gridtrack==23)[0] # layers in grid23
inds3 = np.where(self.gridtrack==3)[0] # layers in grid3
# Create list of batches of grid3 nodes that will be merged in grid23 nodes #
i3_23 = [np.arange(i3,i3+n1) for i3 in range(inds3[0],inds3[-n1],n1)] # layers to transition from grid3 to grid23
# Initialize the 10 lists of the new grid23 nodes #
g23dz,g23mass,g23rho,g23Tz,g23gt,g23age,g23bdm,g23lwc,g23r2,g23inds = ([] for ii in range(10))
# Fill in the lists with the batch properties #
for bb,batch in enumerate(i3_23):
# Properties of the new grid23 nodes #
g23dz.append(np.sum(self.dz[batch])) # sum thickness
g23mass.append(np.sum(self.mass[batch])) # sum mass
g23rho.append(g23mass[bb]/g23dz[bb])
bb_Tz0 = np.sum(self.Tz[batch]*self.mass[batch])
g23Tz.append(bb_Tz0/g23mass[bb]) # Use a weighted average for temperature (effectively the enthalpy)
g23gt.append(23) #gridtrack
g23age.append(np.mean(self.age[batch])) # mean age
g23bdm.append(np.mean(self.bdot_mean[batch])) #mean bdotmean
g23lwc.append(np.sum(self.LWC[batch])) # sum for lwc
if self.r2 is not None:
g23r2.append(np.mean(self.r2[batch])) # mean for r2
g23inds.append(batch) #old indices of the nodes merged into grid23
nfl = np.size(i3_23) #nb of fine nodes lost
nmg = int(np.size(i3_23)/n1) #nb of medium nodes gained
##### Not enough nodes in grid2 -> we have to split nodes from grid22 (avoid emptying grid2) #####
if len(inds2)-nmg<=0:
ncl = np.ceil((nmg-len(inds2)+1)/n2) #nb of nodes to split from grid22 to grid2 (nb of coarse layers lost)
ncl = ncl.astype(int) #convert to int for indexing
### First: merge ncl times batches of n2 layers from grid23 to grid22 ###
## Enough nodes in initial grid23 to form the ncl coarse nodes ##
if ncl*n2<=len(inds23):
# Create list of batches of grid23 nodes that will be merged in grid22 nodes #
i23_22 = [np.arange(i23,i23+n2) for i23 in range(inds23[0],inds23[int(ncl*n2-1)],n2)] # layers to transition from grid23 to grid22
hi23 = np.size(i23_22) #highest node in the self.dz[inds23] array that will still be part of grid23
# Initialize the 9 lists of the new grid22 nodes #
g22dz,g22mass,g22rho,g22Tz,g22gt,g22age,g22bdm,g22lwc,g22r2 = ([] for ii in range(9))
# Fill in the lists with the batch properties #
for bb,batch in enumerate(i23_22):
# Properties of the new grid22 nodes #
g22dz.append(np.sum(self.dz[batch])) # sum thickness
g22mass.append(np.sum(self.mass[batch])) # sum mass
g22rho.append(g22mass[bb]/g22dz[bb])
bb_Tz0 = np.sum(self.Tz[batch]*self.mass[batch])
g22Tz.append(bb_Tz0/g22mass[bb]) # Use a weighted average for temperature (effectively the enthalpy)
g22gt.append(22) #gridtrack
g22age.append(np.mean(self.age[batch])) # mean age
g22bdm.append(np.mean(self.bdot_mean[batch])) #mean bdotmean
g22lwc.append(np.sum(self.LWC[batch])) # sum for lwc
if self.r2 is not None:
g22r2.append(np.mean(self.r2[batch])) # mean for r2
## Not enough nodes in initial grid23 to form the ncl coarse nodes -> also merge g23 nodes ##
elif ncl*n2>len(inds23):
hi23 = len(inds23) #no nodes of the self.dz[inds23] will still be part of grid23
ng23l = ncl*n2-len(inds23) #nb of nodes of g23 that will contribute to the merging
# Create list of batches of the grid23 nodes to be merged in grid22 nodes #
i23_22 = [np.arange(i23,i23+n2) for i23 in range(inds23[0],inds23[-n2],n2)] # layers to transition from grid23 to grid22
rem23 = np.arange(i23_22[-1][-1]+1,inds23[-1]+1) #remaining nodes that did not create an entire grid22 node
i0g23 = n2-len(rem23) #index until which we have to take g23 nodes to compensate for rem23 not having enough nodes
for sublist in g23inds[0:i0g23]:
rem23 = np.append(rem23,sublist) #progressively append the g23inds sublists to fill in rem23
i23_22.append(rem23) #append the batch overlapping grid23 and g23
for lsi in range(i0g23,ng23l,n2):
g23i_toap = [ii for sublist in g23inds[lsi:lsi+n2] for ii in sublist] #the g23 indices that form a single batch for the new grid22 layers
i23_22.append(np.array(g23i_toap)) #append to the list of all indices contributing to the grid22 new nodes' formation
# Remove the g23 nodes that are going to grid22 from the g23 lists #
g23dz = g23dz[ng23l:]
g23mass = g23mass[ng23l:]
g23rho = g23rho[ng23l:]
g23Tz = g23Tz[ng23l:]
g23gt = g23gt[ng23l:]
g23age = g23age[ng23l:]
g23bdm = g23bdm[ng23l:]
g23lwc = g23lwc[ng23l:]
if self.r2 is not None:
g23r2 = g23r2[ng23l:]
g23inds = g23inds[ng23l:]
# Initialize the 9 lists of the new grid22 nodes #
g22dz,g22mass,g22rho,g22Tz,g22gt,g22age,g22bdm,g22lwc,g22r2 = ([] for ii in range(9))
# Fill in the lists with the batch properties #
for bb,batch in enumerate(i23_22):
# Properties of the new grid22 nodes #
g22dz.append(np.sum(self.dz[batch])) # sum thickness
g22mass.append(np.sum(self.mass[batch])) # sum mass
g22rho.append(g22mass[bb]/g22dz[bb])
bb_Tz0 = np.sum(self.Tz[batch]*self.mass[batch])
g22Tz.append(bb_Tz0/g22mass[bb]) # Use a weighted average for temperature (effectively the enthalpy)
g22gt.append(22) #gridtrack
g22age.append(np.mean(self.age[batch])) # mean age
g22bdm.append(np.mean(self.bdot_mean[batch])) #mean bdotmean
g22lwc.append(np.sum(self.LWC[batch])) # sum for lwc
if self.r2 is not None:
g22r2.append(np.mean(self.r2[batch])) # mean for r2
### Second: split the ncl highest grid22 nodes in ncl*n2 grid2 nodes ###
i22_2 = inds22[0:ncl] #nodes to transition from grid22 to grid 2
# Initialize the 9 lists of the new grid2 nodes #
g2dz,g2mass,g2rho,g2Tz,g2gt,g2age,g2bdm,g2lwc,g2r2 = ([] for ii in range(9))
# Fill in the lists with the nodes' properties #
for i22 in i22_2:
# Properties of the new grid2 nodes #
g2dz = np.append(g2dz,self.dz[i22]/n2*np.ones(n2))
g2rho = np.append(g2rho,self.rho[i22]*np.ones(n2))
g2Tz = np.append(g2Tz,self.Tz[i22]*np.ones(n2))
g2gt = np.append(g2gt,2*np.ones(n2))
g2age = np.append(g2age,np.linspace(self.age[i22],self.age[i22+1],n2)) #assume linearly increasing age until layer below
g2bdm = np.append(g2age,self.bdot_mean[i22]*np.ones(n2))
g2lwc = np.append(g2lwc,self.LWC[i22]/n2*np.ones(n2))
if self.r2 is not None:
g2r2 = np.append(g2r2,self.r2[i22]*np.ones(n2))
g2mass = g2dz*g2rho
### Now there are enough layers in grid2 combined with g2 to form nfl new nodes in grid1 ###
i2_1 = inds2[0:] #all nodes of grid2 will be split into grid1 nodes
#ng2l = len(inds2)-nmg #nb of nodes from g2 that will also be split in grid1 nodes
ng2l = nmg-len(inds2) #nb of nodes from g2 that will also be split in grid1 nodes
if ng2l==0: #Case where there are just enough nodes from grid2 for the splitting (g2 created for non-empty grid2)
ig2_1 = np.array([]) #no nodes of g2 will be split
else:
ig2_1 = np.arange(0,ng2l) #nodes of g2 that will also be split in grid1 nodes (indices are on the g2 grid!)
# Initialize the 9 lists of the new grid1 nodes #
g1dz,g1mass,g1rho,g1Tz,g1gt,g1age,g1bdm,g1lwc,g1r2 = ([] for ii in range(9))
# Fill in the lists with the nodes' properties #
for i2 in i2_1: #Proceed first to the splitting of the grid2 nodes
# Properties of the new grid1 nodes #
g1dz = np.append(g1dz,self.dz[i2]/n1*np.ones(n1))
g1rho = np.append(g1rho,self.rho[i2]*np.ones(n1))
g1Tz = np.append(g1Tz,self.Tz[i2]*np.ones(n1))
g1gt = np.append(g1gt,1*np.ones(n1))
g1age = np.append(g1age,np.linspace(self.age[i2],self.age[i2+1],n1)) #assume linearly increasing age until layer below
g1bdm = np.append(g1bdm,self.bdot_mean[i2]*np.ones(n1))
g1lwc = np.append(g1lwc,self.LWC[i2]/n1*np.ones(n1))
if self.r2 is not None:
g1r2 = np.append(g1r2,self.r2[i2]*np.ones(n1))
for ig2 in ig2_1: #Then proceed to the splitting of g2 nodes (if necessary, otherwise ig2_1 is empty)
# Properties of the new grid1 nodes #
g1dz = np.append(g1dz,g2dz[ig2]/n1*np.ones(n1))
g1rho = np.append(g1rho,g2rho[ig2]*np.ones(n1))
g1Tz = np.append(g1Tz,g2Tz[ig2]*np.ones(n1))
g1gt = np.append(g1gt,1*np.ones(n1))
g1age = np.append(g1age,np.linspace(g2age[ig2],g2age[ig2+1],n1)) #assume linearly increasing age until layer below
g1bdm = np.append(g1bdm,g2bdm[ig2]*np.ones(n1))
g1lwc = np.append(g1lwc,g2lwc[ig2]/n1*np.ones(n1))
if self.r2 is not None:
g1r2 = np.append(g1r2,g2r2[ig2]*np.ones(n1))
g1mass = g1dz*g1rho
# Remove the g2 nodes that are going to grid1 from the g1 lists #
g2dz = g2dz[ng2l:]
g2mass = g2mass[ng2l:]
g2rho | |
MCacheFormatDescription_getNumChannels(*args, **kwargs):
pass
def MArrayDataBuilder_removeElement(*args, **kwargs):
pass
def MItSurfaceCV_getIndex(*args, **kwargs):
pass
def MFnSubd_polygonGetCenterUV(*args, **kwargs):
pass
def MFnLatticeData_className(*args, **kwargs):
pass
def MFnDagNode_inModel(*args, **kwargs):
pass
def MVector_z_get(*args, **kwargs):
pass
def MCurveAttribute_setValueAtIndex(*args, **kwargs):
pass
def MProfiler_getEventTime(*args, **kwargs):
pass
def MItCurveCV_updateCurve(*args, **kwargs):
pass
def MVector___eq__(*args, **kwargs):
pass
def MFnNurbsCurveData_className(*args, **kwargs):
pass
def MFnLayeredShader_compositingFlag(*args, **kwargs):
pass
def MFnLambertShader_swigregister(*args, **kwargs):
pass
def MEvaluationManager_evaluationManagerActive(*args, **kwargs):
pass
def MStreamUtils_swigregister(*args, **kwargs):
pass
def MArgList_asAngle(*args, **kwargs):
pass
def MItSubdVertex_level(*args, **kwargs):
pass
def MFnSubd_edgeCreaseRelevant(*args, **kwargs):
pass
def MFnGeometryData_addObjectGroupComponent(*args, **kwargs):
pass
def MFnNurbsSurface_removeOneKnotInV(*args, **kwargs):
pass
def delete_MFnDagNode(*args, **kwargs):
pass
def MVector___ne__(*args, **kwargs):
pass
def MConditionMessage_className(*args, **kwargs):
pass
def MPoint_z_set(*args, **kwargs):
pass
def MInt64Array___ne__(*args, **kwargs):
pass
def MFnLambertShader_diffuseCoeff(*args, **kwargs):
pass
def MEulerRotation_x_set(*args, **kwargs):
pass
def MSetAttrEdit_className(*args, **kwargs):
pass
def MFnExpression_className(*args, **kwargs):
pass
def MArgParser_swigregister(*args, **kwargs):
pass
def MFnTransform_setRotation(*args, **kwargs):
pass
def MFnSubd_vertexIsBoundary(*args, **kwargs):
pass
def MFnGeometryData_type(*args, **kwargs):
pass
def MFnDependencyNode_icon(*args, **kwargs):
pass
def MCommandResult_swigregister(*args, **kwargs):
pass
def MPoint___sub__(*args, **kwargs):
pass
def MInt64Array_setLength(*args, **kwargs):
pass
def MCacheFormatDescription_getChannelSamplingType(*args, **kwargs):
pass
def MFnNumericAttribute_setMin(*args, **kwargs):
pass
def MFnAttribute_getAddAttrCmd(*args, **kwargs):
pass
def MEulerRotation___ne__(*args, **kwargs):
pass
def MSelectionMask_assign(*args, **kwargs):
pass
def MArgParser_flagArgumentInt(*args, **kwargs):
pass
def delete_MItSelectionList(*args, **kwargs):
pass
def MFnSubd_vertexEditsSetAllNonBase(*args, **kwargs):
pass
def MFnFloatArrayData_set(*args, **kwargs):
pass
def MNamespace_validateName(*args, **kwargs):
pass
def MFnDependencyNode_isNewAttribute(*args, **kwargs):
pass
def MUuid_swigregister(*args, **kwargs):
pass
def new_MMessage(*args, **kwargs):
pass
def MFnComponentListData___getitem__(*args, **kwargs):
pass
def MPointArray_remove(*args, **kwargs):
pass
def MIntArray_sizeIncrement(*args, **kwargs):
pass
def delete_MFnNumericAttribute(*args, **kwargs):
pass
def MFnAttribute_setUsedAsColor(*args, **kwargs):
pass
def MDoubleArray_swigregister(*args, **kwargs):
pass
def MSelectionList_remove(*args, **kwargs):
pass
def MAngle_asAngSeconds(*args, **kwargs):
pass
def MImage_depth(*args, **kwargs):
pass
def MItMeshVertex_hasColor(*args, **kwargs):
pass
def MFnSubd_className(*args, **kwargs):
pass
def MFnExpression_expression(*args, **kwargs):
pass
def MFnDependencyNode_getConnections(*args, **kwargs):
pass
def MUserEventMessage_addUserEventCallback(*args, **kwargs):
pass
def MMessageNode_fHeadNode_set(*args, **kwargs):
pass
def MPlug_swigregister(*args, **kwargs):
pass
def MImage_convertPixelFormat(*args, **kwargs):
pass
def MFnMesh_polyTriangulate(*args, **kwargs):
pass
def MFnExpression_setExpression(*args, **kwargs):
pass
def MFnDependencyNode_isShared(*args, **kwargs):
pass
def MDoubleArray_append(*args, **kwargs):
pass
def MSceneMessage_addReferenceCallback(*args, **kwargs):
pass
def MObject___eq__(*args, **kwargs):
pass
def MItMeshVertex_setUV(*args, **kwargs):
pass
def MFnStringArrayData_swigregister(*args, **kwargs):
pass
def MFnEnumAttribute_className(*args, **kwargs):
pass
def MFnBase_object(*args, **kwargs):
pass
def MEulerRotation_isEquivalent(*args, **kwargs):
pass
def MURI_removeQueryItem(*args, **kwargs):
pass
def MColor_g_set(*args, **kwargs):
pass
def MPlug_setNumElements(*args, **kwargs):
pass
def MImage_setFloatPixels(*args, **kwargs):
pass
def MFnMesh_stringBlindDataComponentId(*args, **kwargs):
pass
def MFnAttribute_isReadable(*args, **kwargs):
pass
def MDistance_uiUnit(*args, **kwargs):
pass
def delete_MFcurveEdit(*args, **kwargs):
pass
def array4dDouble_getptr(*args, **kwargs):
pass
def new_MItMeshVertex(*args, **kwargs):
pass
def MConnectDisconnectAttrEdit_srcPlug(*args, **kwargs):
pass
def MFnSpotLight_setUseDecayRegions(*args, **kwargs):
pass
def MEulerRotation___iadd__(*args, **kwargs):
pass
def MFnDoubleArrayData_array(*args, **kwargs):
pass
def MFloatVector_get(*args, **kwargs):
pass
def MURI_asString(*args, **kwargs):
pass
def MColor_assign(*args, **kwargs):
pass
def MPlug_setMPxData(*args, **kwargs):
pass
def MImageFileInfo_pixelType(*args, **kwargs):
pass
def MFnMesh_clearColors(*args, **kwargs):
pass
def MImage_depthMap(*args, **kwargs):
pass
def MFnArrayAttrsData_count(*args, **kwargs):
pass
def MCameraSetMessage_swigregister(*args, **kwargs):
pass
def MAddRemoveAttrEdit_isAttributeAdded(*args, **kwargs):
pass
def array4dFloat_swigregister(*args, **kwargs):
pass
def MItMeshPolygon_isConnectedToEdge(*args, **kwargs):
pass
def MFnSphereData_swigregister(*args, **kwargs):
pass
def MFnCameraSet_setLayerActive(*args, **kwargs):
pass
def delete_MFloatVector(*args, **kwargs):
pass
def MURI___ne__(*args, **kwargs):
pass
def MFnExpression_setAnimated(*args, **kwargs):
pass
def MPlug_isLocked(*args, **kwargs):
pass
def MAttributeIndex___eq__(*args, **kwargs):
pass
def MIffFile_beginGet(*args, **kwargs):
pass
def MFnMesh_setFaceColors(*args, **kwargs):
pass
def MFnNonExtendedLight_swigregister(*args, **kwargs):
pass
def MDGMessage_addTimeChangeCallback(*args, **kwargs):
pass
def MRenderPassRegistry_swigregister(*args, **kwargs):
pass
def new_array2dFloat(*args, **kwargs):
pass
def MItMeshPolygon_hasColor(*args, **kwargs):
pass
def MFnContainerNode_getPublishedNodes(*args, **kwargs):
pass
def new_MFnSingleIndexedComponent(*args, **kwargs):
pass
def MFnDirectionalLight_useLightPosition(*args, **kwargs):
pass
def delete_MFloatVectorArray(*args, **kwargs):
pass
def MUintArray_sizeIncrement(*args, **kwargs):
pass
def MPlug_numChildren(*args, **kwargs):
pass
def MGlobal_stopErrorLogging(*args, **kwargs):
pass
def MFnMesh_getUvShellsIds(*args, **kwargs):
pass
def MCallbackIdArray_assign(*args, **kwargs):
pass
def MArgParser_commandArgumentDouble(*args, **kwargs):
pass
def MRampAttribute_sampleValueRamp(*args, **kwargs):
pass
def uCharPtr_assign(*args, **kwargs):
pass
def MItMeshPolygon_getPoints(*args, **kwargs):
pass
def MFnSet_getIntersection(*args, **kwargs):
pass
def MFnAmbientLight_setCastSoftShadows(*args, **kwargs):
pass
def delete_MFnCompoundAttribute(*args, **kwargs):
pass
def MFloatPoint___ne__(*args, **kwargs):
pass
def MUint64Array___add__(*args, **kwargs):
pass
def MNamespace_moveNamespace(*args, **kwargs):
pass
def MPlug_setAttribute(*args, **kwargs):
pass
def MGlobal_displayInfo(*args, **kwargs):
pass
def MFnMesh_getAssociatedUVSetTextures(*args, **kwargs):
pass
def MFnAnisotropyShader_setRefractiveIndex(*args, **kwargs):
pass
def MDagPath_isVisible(*args, **kwargs):
pass
def MRampAttribute_setValueAtIndex(*args, **kwargs):
pass
def MItMeshFaceVertex_swigregister(*args, **kwargs):
pass
def MFnPointArrayData_set(*args, **kwargs):
pass
def MVector_y_get(*args, **kwargs):
pass
def MPoint_x_get(*args, **kwargs):
pass
def MFloatPointArray_className(*args, **kwargs):
pass
def MUint64Array_length(*args, **kwargs):
pass
def delete_MPlugArray(*args, **kwargs):
pass
def MGlobal_getPreselectionHiliteList(*args, **kwargs):
pass
def MFnMesh_getInvisibleFaces(*args, **kwargs):
pass
def delete_MFnAnisotropyShader(*args, **kwargs):
pass
def MFnContainerNode_getCurrentAsMObject(*args, **kwargs):
pass
def MDagPath_push(*args, **kwargs):
pass
def MQuaternion_x_set(*args, **kwargs):
pass
def MItMeshFaceVertex_currentItem(*args, **kwargs):
pass
def MFnPhongShader_setCosPower(*args, **kwargs):
pass
def MFnCamera_setUsePivotAsLocalSpace(*args, **kwargs):
pass
def MFloatMatrix_swigregister(*args, **kwargs):
pass
def MTrimBoundaryArray_getMergedBoundary(*args, **kwargs):
pass
def MCallbackIdArray_swigregister(*args, **kwargs):
pass
def MObjectHandle_object(*args, **kwargs):
pass
def MCallbackIdArray_remove(*args, **kwargs):
pass
def MGlobal_selectFromScreen(*args, **kwargs):
pass
def MItEdits_addRemoveAttrEdit(*args, **kwargs):
pass
def MFnMesh_unlockFaceVertexNormals(*args, **kwargs):
pass
def MDAGDrawOverrideInfo_fPlaybackVisible_set(*args, **kwargs):
pass
def MQuaternion___mul__(*args, **kwargs):
pass
def MItMeshEdge_numConnectedFaces(*args, **kwargs):
pass
def delete_MFnPhongEShader(*args, **kwargs):
pass
def MNamespace_getNamespaceFromName(*args, **kwargs):
pass
def MFnCamera_computeDepthOfField(*args, **kwargs):
pass
def MFloatMatrix___iadd__(*args, **kwargs):
pass
def MTransformationMatrix_getRotationQuaternion(*args, **kwargs):
pass
def MCacheFormatDescription_className(*args, **kwargs):
pass
def delete_MObjectArray(*args, **kwargs):
pass
def MGlobal_mayaState(*args, **kwargs):
pass
def MFnMesh_setNormals(*args, **kwargs):
pass
def MFnLight_intensity(*args, **kwargs):
pass
def MFnNurbsSurface_boundaryType(*args, **kwargs):
pass
def MDagPathArray_sizeIncrement(*args, **kwargs):
pass
def MScriptUtil_getUint4ArrayItem(*args, **kwargs):
pass
def MItCurveCV_cv(*args, **kwargs):
pass
def MItMeshEdge_next(*args, **kwargs):
pass
def MFnNurbsSurface_assignUVs(*args, **kwargs):
pass
def MFloatArray___delitem__(*args, **kwargs):
pass
def MTransformationMatrix_addTranslation(*args, **kwargs):
pass
def MBoundingBox_swigregister(*args, **kwargs):
pass
def MNodeMessage_swigregister(*args, **kwargs):
pass
def MFnLight_className(*args, **kwargs):
pass
def MFnVolumeLight_setLightShape(*args, **kwargs):
pass
def MFnMesh_getClosestUVs(*args, **kwargs):
pass
def MFnContainerNode_getPublishedNames(*args, **kwargs):
pass
def MDagModifier_createNode(*args, **kwargs):
pass
def MScriptUtil_setShort4ArrayItem(*args, **kwargs):
pass
def doublePtr_value(*args, **kwargs):
pass
def MItInstancer_next(*args, **kwargs):
pass
def MFnNurbsSurface_getTrimBoundaries(*args, **kwargs):
pass
def MCallbackIdArray_clear(*args, **kwargs):
pass
def MFnCamera_filmTranslateH(*args, **kwargs):
pass
def delete_MFloatArray(*args, **kwargs):
pass
def MImage_pixels(*args, **kwargs):
pass
def MTransformationMatrix_asRotateMatrix(*args, **kwargs):
pass
def MAttributePatternArray_sizeIncrement(*args, **kwargs):
pass
def MNodeClass_hasAttribute(*args, **kwargs):
pass
def MFnNurbsCurve_setCV(*args, **kwargs):
pass
def MFnMesh_getTriangleOffsets(*args, **kwargs):
pass
def MDGModifier_newPlugValueMAngle(*args, **kwargs):
pass
def MFcurveEdit_swigregister(*args, **kwargs):
pass
def MScriptUtil_getCharArrayItem(*args, **kwargs):
pass
def new_shortPtr(*args, **kwargs):
pass
def MItGeometry_position(*args, **kwargs):
pass
def MFnNurbsSurface_isFoldedOnBispan(*args, **kwargs):
pass
def MFnCamera_setFilmFitOffset(*args, **kwargs):
pass
def MFileObject_pathCount(*args, **kwargs):
pass
def MTimer___ne__(*args, **kwargs):
pass
def MAttributePattern_addRootAttr(*args, **kwargs):
pass
def new_MNamespace(*args, **kwargs):
pass
def MFnUnitAttribute_create(*args, **kwargs):
pass
def MFnTransform_scalePivotTranslation(*args, **kwargs):
pass
def MDataHandle_asGenericBool(*args, **kwargs):
pass
def MDGModifier_setNodeLockState(*args, **kwargs):
pass
def MScriptUtil_getUchar(*args, **kwargs):
pass
def MItEdits_connectDisconnectEdit(*args, **kwargs):
pass
def MFnFloatArrayData_array(*args, **kwargs):
pass
def MTime___iadd__(*args, **kwargs):
pass
def MDataHandle_setMVector(*args, **kwargs):
pass
def MEulerRotation_x_get(*args, **kwargs):
pass
def MFnCamera_preScale(*args, **kwargs):
pass
def MFileIO_getErrorStatus(*args, **kwargs):
pass
def new_MTime(*args, **kwargs):
pass
def MAttributeSpecArray_set(*args, **kwargs):
pass
def MRichSelection_getSymmetry(*args, **kwargs):
pass
def new_MMeshSmoothOptions(*args, **kwargs):
pass
def new_MFnNurbsSurfaceData(*args, **kwargs):
pass
def MIntArray_assign(*args, **kwargs):
pass
def MFileIO_mustRenameToSave(*args, **kwargs):
pass
def MFnMesh_getUVSetsInFamily(*args, **kwargs):
pass
def MDataHandle_asShort2(*args, **kwargs):
pass
def delete_MDagMessage(*args, **kwargs):
pass
def delete_MFnGenericAttribute(*args, **kwargs):
pass
def MEulerRotation_z_get(*args, **kwargs):
pass
def MURI_getFragment(*args, **kwargs):
pass
def MDagMessage_addChildAddedCallback(*args, **kwargs):
pass
def MColorArray_set(*args, **kwargs):
pass
def MVector_swigregister(*args, **kwargs):
pass
def MCurveAttribute_setPositionAtIndex(*args, **kwargs):
pass
def MNodeClass_pluginName(*args, **kwargs):
pass
def new_MSyntax(*args, **kwargs):
pass
def new_MConditionMessage(*args, **kwargs):
pass
def MFnLight_numShadowSamples(*args, **kwargs):
pass
def delete_MItSubdFace(*args, **kwargs):
pass
def new_MFnExpression(*args, **kwargs):
pass
def MVectorArray_swigregister(*args, **kwargs):
pass
def MNodeClass_removeExtensionAttribute(*args, **kwargs):
pass
def MFnAttribute_addToCategory(*args, **kwargs):
pass
def MDataHandle_data(*args, **kwargs):
pass
def new_MVectorArray(*args, **kwargs):
pass
def MCommandResult_stringResult(*args, **kwargs):
pass
def MFnContainerNode_type(*args, **kwargs):
pass
def MFnNurbsSurface_getPatchUV(*args, **kwargs):
pass
def new_MPlane(*args, **kwargs):
pass
def MFnLight_opticalFXvisibility(*args, **kwargs):
pass
def MFnCamera_postScale(*args, **kwargs):
pass
def new_MFnEnumAttribute(*args, **kwargs):
pass
def delete_MEventMessage(*args, **kwargs):
pass
def MURI_removeAllQueryItems(*args, **kwargs):
pass
def delete_MItMeshVertex(*args, **kwargs):
pass
def MSceneMessage_addCheckReferenceCallback(*args, **kwargs):
pass
def MAddRemoveAttrEdit_editType(*args, **kwargs):
pass
def MURI_getScheme(*args, **kwargs):
pass
def MImageFileInfo_imageType(*args, **kwargs):
pass
def MNodeClass_className(*args, **kwargs):
pass
def MFnArrayAttrsData_list(*args, **kwargs):
pass
def new_array2dDouble(*args, **kwargs):
pass
def MFloatVector_assign(*args, **kwargs):
pass
def MURI_copy(*args, **kwargs):
pass
def MSceneMessage_addNamespaceRenamedCallback(*args, **kwargs):
pass
def MQuaternion___imul__(*args, **kwargs):
pass
def MIffFile_endGet(*args, **kwargs):
pass
def MFnMesh_setVertexColors(*args, **kwargs):
pass
def delete_array2dFloat(*args, **kwargs):
pass
def MEvaluationManager_evaluationInExecution(*args, **kwargs):
pass
def MFnSingleIndexedComponent_create(*args, **kwargs):
pass
def MFnDirectionalLight_setUseLightPosition(*args, **kwargs):
pass
def MUintArray___len__(*args, **kwargs):
pass
def MFnMesh_getPinUVs(*args, **kwargs):
pass
def MEvaluationManager_className(*args, **kwargs):
pass
def MItCurveCV_className(*args, **kwargs):
pass
def MDataBlock_swigregister(*args, **kwargs):
pass
def MRampAttribute_swigregister(*args, **kwargs):
pass
def uCharPtr_value(*args, **kwargs):
pass
def MItMeshPolygon_setPoint(*args, **kwargs):
pass
def MFnSet_clear(*args, **kwargs):
pass
def MUint64Array___radd__(*args, **kwargs):
pass
def MIntArray___delitem__(*args, **kwargs):
pass
def new_MItMeshPolygon(*args, **kwargs):
pass
def MFnGeometryData_getMatrix(*args, **kwargs):
pass
def MItMeshFaceVertex_position(*args, **kwargs):
pass
def MUint64Array_remove(*args, **kwargs):
pass
def MPlugArray___getitem__(*args, **kwargs):
pass
def MFnAnisotropyShader_className(*args, **kwargs):
pass
def MFnGeometryData_matrixIsIdentity(*args, **kwargs):
pass
def MQuaternion_x_get(*args, **kwargs):
pass
def MFileObject_expandedPath(*args, **kwargs):
pass
def MFnCamera_usePivotAsLocalSpace(*args, **kwargs):
pass
def MTrimBoundaryArray_className(*args, **kwargs):
pass
def MGlobal_setPreselectionHiliteList(*args, **kwargs):
pass
def MItMeshEdge_numConnectedEdges(*args, **kwargs):
pass
def MFnPhongEShader_className(*args, **kwargs):
pass
def MDataHandle_asDouble(*args, **kwargs):
pass
def MFnCamera_setMotionBlur(*args, **kwargs):
pass
def MTransformationMatrix_setRotationQuaternion(*args, **kwargs):
pass
def MObjectArray_assign(*args, **kwargs):
pass
def MGlobal_getFunctionSetList(*args, **kwargs):
pass
def MFnMesh_getFaceVertexNormal(*args, **kwargs):
pass
def MFnLight_setIntensity(*args, **kwargs):
pass
def MFnPartition_className(*args, **kwargs):
pass
def MScriptUtil_setUint4ArrayItem(*args, **kwargs):
pass
def MFnPointArrayData_array(*args, **kwargs):
pass
def MItMeshEdge_reset(*args, **kwargs):
pass
def MFnNurbsSurface_clearUVs(*args, **kwargs):
pass
def MFnCamera_isClippingPlanes(*args, **kwargs):
pass
def MFloatArray___repr__(*args, **kwargs):
pass
def MTransformationMatrix_setShear(*args, **kwargs):
pass
def MCacheFormatDescription_setDistribution(*args, **kwargs):
pass
def new_MNurbsIntersector(*args, **kwargs):
pass
def MNodeMessage_addNodePreRemovalCallback(*args, **kwargs):
pass
def MFnVolumeLight_volumeLightDirection(*args, **kwargs):
pass
def MDagModifier_reparentNode(*args, **kwargs):
pass
def MScriptUtil_getFloat2ArrayItem(*args, **kwargs):
pass
def doublePtr_cast(*args, **kwargs):
pass
def MItInstancer_nextParticle(*args, **kwargs):
pass
def MFnNurbsSurface_trimWithBoundaries(*args, **kwargs):
pass
def MEvaluationNode_datablock(*args, **kwargs):
pass
def MFnMesh_getAssociatedUVSetInstances(*args, **kwargs):
pass
def MFloatArray_assign(*args, **kwargs):
pass
def MTransformationMatrix_getScale(*args, **kwargs):
pass
def MFnVectorArrayData_type(*args, **kwargs):
pass
def MFnMesh_booleanOp(*args, **kwargs):
pass
def MDataHandle_className(*args, **kwargs):
pass
def MNodeMessage_addKeyableChangeOverride(*args, **kwargs):
pass
def MScriptUtil_getUcharArrayItem(*args, **kwargs):
pass
def MItGeometry_normal(*args, **kwargs):
pass
def MFnNurbsSurface_area(*args, **kwargs):
pass
def MFnCamera_filmFitOffset(*args, **kwargs):
pass
def MFileObject_ithPath(*args, **kwargs):
pass
def MTimer_clear(*args, **kwargs):
pass
def MAttributePattern_className(*args, **kwargs):
pass
def delete_MNamespace(*args, **kwargs):
pass
def MFnPartition_addMember(*args, **kwargs):
pass
def MFnUnitAttribute_unitType(*args, **kwargs):
pass
def MFnMesh_collapseFaces(*args, **kwargs):
pass
def MFnCompoundAttribute_className(*args, **kwargs):
pass
def MDataHandle_asGenericChar(*args, **kwargs):
pass
def MDGModifier_connect(*args, **kwargs):
pass
def MScriptUtil_setIntArray(*args, **kwargs):
pass
def MItEdits_className(*args, **kwargs):
pass
def MFnNurbsSurface_numKnotsInV(*args, **kwargs):
pass
def MFnCamera_panZoomEnabled(*args, **kwargs):
pass
def MFnAmbientLight_shadowRadius(*args, **kwargs):
pass
def delete_MFileObject(*args, **kwargs):
pass
def MTime___sub__(*args, **kwargs):
pass
def MAttributeSpec_assign(*args, **kwargs):
pass
def MNamespace_currentNamespace(*args, **kwargs):
pass
def MFnUint64SingleIndexedComponent_type(*args, **kwargs):
pass
def new_MFnMesh(*args, **kwargs):
pass
def MFnTransform_type(*args, **kwargs):
pass
def MDataHandle_setMFloatVector(*args, **kwargs):
pass
def MMeshSmoothOptions_setOpenSubdivFaceVaryingBoundary(*args, **kwargs):
pass
def MScriptUtil_setInt(*args, **kwargs):
pass
def new_MEvaluationNodeIterator(*args, **kwargs):
pass
def MFnCamera_setFilmTranslateV(*args, **kwargs):
pass
def MFnNurbsSurface_formInU(*args, **kwargs):
pass
def MFnLight_useRayTraceShadows(*args, **kwargs):
pass
def MFnCamera_getAspectRatioLimits(*args, **kwargs):
pass
def MFileIO_resetError(*args, **kwargs):
pass
def delete_MTime(*args, **kwargs):
pass
def MMatrixArray_copy(*args, **kwargs):
pass
def MFnTypedAttribute_className(*args, **kwargs):
pass
def MSelectionList_replace(*args, **kwargs):
pass
def MFnMatrixData_className(*args, **kwargs):
pass
def MFnAssembly_getRepLabel(*args, **kwargs):
pass
def MDataHandle_asSubdSurface(*args, **kwargs):
pass
def delete_MMeshSmoothOptions(*args, **kwargs):
pass
def MScriptUtil_asCharPtr(*args, **kwargs):
pass
def MItDependencyGraph_thisNodeHasUnknownType(*args, **kwargs):
pass
def MFnNurbsSurfaceData_create(*args, **kwargs):
pass
def MInt64Array_length(*args, **kwargs):
pass
def MFileIO_setMustRenameToSave(*args, **kwargs):
pass
def new_MTimeArray(*args, **kwargs):
pass
def MFnContainerNode_getMembers(*args, **kwargs):
pass
def MMatrix_det3x3(*args, **kwargs):
pass
def MFnSubdNames_toSelectionIndices(*args, **kwargs):
pass
def new_MFnMatrixArrayData(*args, **kwargs):
pass
def MFnDagNode_objectGroupComponent(*args, **kwargs):
pass
def MDataHandle_asLong2(*args, **kwargs):
pass
def MDagMessage_swigregister(*args, **kwargs):
pass
def new_MScriptUtil(*args, **kwargs):
pass
def MFnGeometryData_removeObjectGroupComponent(*args, **kwargs):
pass
def MItDependencyGraph_disablePruningOnFilter(*args, **kwargs):
pass
def MFnNurbsCurve_tangent(*args, **kwargs):
pass
def MFnReference_isLoaded(*args, **kwargs):
pass
def MFileIO_exportSelectedAnimFromReference(*args, **kwargs):
pass
def MTesselationParams_setWorldspaceToScreenTransform(*args, **kwargs):
pass
def MArrayDataHandle_setAllClean(*args, **kwargs):
pass
def MMatrix_get(*args, **kwargs):
pass
def MFnSubdNames_fromMUint64(*args, **kwargs):
pass
def MEvaluationNodeIterator_next(*args, **kwargs):
pass
def MFnLayeredShader_setHardwareColor(*args, **kwargs):
pass
def new_MIteratorType(*args, **kwargs):
pass
def MFnDagNode_setObjectColorType(*args, **kwargs):
pass
def MDagMessage_addChildAddedDagPathCallback(*args, **kwargs):
pass
def MProfiler_isDataFromFile(*args, **kwargs):
pass
def MItDag_instanceCount(*args, **kwargs):
pass
def MFnBlinnShader_eccentricity(*args, **kwargs):
pass
def MSelectionList_assign(*args, **kwargs):
pass
def MEvaluationNodeIterator_reset(*args, **kwargs):
pass
def MSyntax_swigregister(*args, **kwargs):
pass
def MArrayDataBuilder_growArray(*args, **kwargs):
pass
def MItSurfaceCV_currentItem(*args, **kwargs):
pass
def MFnSubd_evaluateNormal(*args, **kwargs):
pass
def MFnLatticeData_create(*args, **kwargs):
pass
def MFnDagNode_setInstanceable(*args, **kwargs):
pass
def new_MWeight(*args, **kwargs):
| |
<filename>SequenceWrangler.py
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold, train_test_split
from sklearn import preprocessing
import time
import sys
import os
import dill as pickle
import utils
import pathos.multiprocessing as mp
# Class to take a list of continuous, contiguous data logs that need to be collated and split for the batch handler
# This generates sequences of proper lengths (history track and ground truth prediction track) in the parameters file
# WARNING - ANY change to this file (including comments) invalidates the hash and the Wrangler runs again.
class SequenceWrangler:
def __init__(self, parameters, sourcename, n_folds=5, training=0.55, val=0.2, test=0.25):
self.n_folds = n_folds
self.parameters = parameters.parameters
#TODO Normalize the below splits
self.training_split = training
self.val_split = val
self.test_split = test
self.pool_dir = 'data_pool'
self.sourcename = sourcename
self.trainval_idxs = None
self.test_idxs = None
self.encoder_means = None
self.encoder_vars = None
self.encoder_stddev = None
return
def get_pool_filename(self):
ibeo = True
if ibeo:
filename = "pool_ckpt_ibeo_" + \
''.join([x[0] + x[-1] + '-' for x in self.parameters['ibeo_data_columns']]) + \
"obs-" + str(self.parameters["observation_steps"]) + \
"_pred-" + str(self.parameters["prediction_steps"]) + \
str(hash(tuple(self.sourcename))
+ hash(self.parameters['reject_stopped_vehicles_before_intersection_enable'])
+ hash(self.parameters['reject_stopped_vehicles_before_intersection_speed'])
+ hash(self.parameters['reject_stopped_vehicles_before_intersection_duration'])
+ utils.get_library_hash()) + \
".pkl"
else:
filename = "pool_ckpt_" +\
"obs-" + str(self.parameters["observation_steps"]) + \
"_pred-" + str(self.parameters["prediction_steps"]) + \
".pkl"
return filename
def load_from_checkpoint(self,):
#Function that returns True if data can be loaded, else false.
if not os.path.exists(self.pool_dir):
return False
file_path = os.path.join(self.pool_dir, self.get_pool_filename())
file_exists = os.path.isfile(file_path)
if not file_exists:
return False
print "Reading pool cache from disk..."
self.master_pool = pd.read_pickle(file_path)
return True
def load_splits_from_checkpoint(self, filename):
if not os.path.exists(self.pool_dir):
return False
file_path = os.path.join(self.pool_dir, filename)
file_exists = os.path.isfile(file_path)
if not file_exists:
return False
print "Reading cached crossfold pool data..."
with open(file_path, 'rb') as pkl_file:
try:
from_pickle = pickle.load(pkl_file)
except ImportError:
print "Cache miss due to incompatible pandas version between saving and loading"
return False
self.crossfold_pool = from_pickle['crossfold_pool']
self.test_pool = from_pickle['test_pool']
return True
def split_into_evaluation_pools(self, trainval_idxs=None, test_idxs=None, test_csv=None):
# I want to cache the pool concat
# So I need a unique id from master pool --> self.get_pool_filename
# I also want a unique id for the crossfold_indicies. Only if that is deterministic. I'll have to check
#
seed = 42296 #np.random.randint(4294967296)
print "Using seed: " + str(seed) + " for test/train split"
encoder_pool = []
for encoder_data in self.master_pool.encoder_sample.iteritems():
encoder_values = encoder_data[1]
encoder_pool.append(encoder_values[0])
last_encoder = encoder_values
encoder_pool.extend(encoder_values[1:])
encoder_pool = np.array(encoder_pool)
#Compute averages here
self.encoder_means = np.mean(encoder_pool, axis=0)
self.encoder_vars = np.var(encoder_pool, axis=0)
self.encoder_stddev = np.std(encoder_pool, axis=0)
print "Encoder means: " + str(self.encoder_means)
print "Encoder vars: " + str(self.encoder_vars)
print "Encoder standard deviations: " + str(self.encoder_stddev)
raw_indices = self.master_pool.track_idx.unique()
# origin_destination_class_list = self.master_pool.track_class.unique()
if 'relative' in self.parameters['ibeo_data_columns'][0]:
class_to_fit = 'relative_destination'
else:
class_to_fit = 'track_class'
# rebuild track_class vector
raw_classes = []
for raw_idx in raw_indices:
#Get the first results that matches the track_idx and return its destination class
#by construction, this data is consistent across all sample values for that track
track_class = self.master_pool[self.master_pool.track_idx==raw_idx][class_to_fit].unique()
raw_classes.append(track_class[0])
st_encoder = preprocessing.LabelEncoder()
st_encoder.fit(raw_classes)
origin_destination_enc_classes = st_encoder.transform(raw_classes)
#########
if (trainval_idxs is None) and (test_idxs is None):
# if we are not loading a model from a checkpoint
if test_csv is not None:
# if we are doing a full intersection holdout.
self.trainval_idxs = []
self.test_idxs = []
for track_idx in raw_indices:
if test_csv in self.master_pool[self.master_pool.track_idx == track_idx]['csv_name'].unique()[0]:
self.test_idxs.append(track_idx)
else:
self.trainval_idxs.append(track_idx)
self.test_idxs = np.array(self.test_idxs)
self.trainval_idxs = np.array(self.trainval_idxs)
else:
self.trainval_idxs, self.test_idxs = train_test_split(raw_indices, # BREAK HERE
test_size=self.test_split,
stratify=origin_destination_enc_classes,
random_state=seed)
else:
self.trainval_idxs = trainval_idxs
self.test_idxs = test_idxs
# Cache the test/train/val splits. The concats take forever.
cache_name = self.get_pool_filename() + '-' + str(abs(hash(tuple(self.trainval_idxs))
+ utils.get_library_hash())) + '.pkl'
if not self.load_splits_from_checkpoint(cache_name):
print "Crossfold cache miss, calculating splits and making sub-pools"
#cache miss
crossfold_idx_lookup = np.array(self.trainval_idxs)
#Now I need the class of each track in trainval_idx
trainval_class = []
for trainval_idx in self.trainval_idxs:
track_class = self.master_pool[self.master_pool.track_idx==trainval_idx]['track_class'].unique()
trainval_class.append(track_class[0])
skf = StratifiedKFold(n_splits=self.n_folds,random_state=seed)
crossfold_indicies = [list(skf.split(self.trainval_idxs, trainval_class))[0]] # I only use one fold anyway
crossfold_pool = [[[], []] for x in xrange(self.n_folds)]
test_pool = []
#Now iterate over each track, and dump it into the apropriate crossfold sub-pool or test pool
for track_raw_idx in raw_indices:
# For each pool
for fold_idx in range(len(crossfold_indicies)):
# For train or validate in the pool
for trainorval_pool_idx in range(len(crossfold_indicies[fold_idx])):
# If the crossfold_list index of the track matches
if track_raw_idx in crossfold_idx_lookup[crossfold_indicies[fold_idx][trainorval_pool_idx]]:
#Here, I want to append all data in the master pool that is from the track
crossfold_pool[fold_idx][trainorval_pool_idx].append(
self.master_pool[self.master_pool['track_idx']==track_raw_idx]
)
#print "Added track " + str(track_raw_idx) + " to cf pool " + str(fold_idx) + \
# (" train" if trainorval_pool_idx is 0 else " test")
# else it must exist in the test_pool
if track_raw_idx in self.test_idxs:
test_pool.append(
self.master_pool[self.master_pool['track_idx'] == track_raw_idx]
)
#print "Added track " + str(track_raw_idx) + " to test pool"
print "concatenating pools"
for fold_idx in range(len(crossfold_indicies)):
for trainorval_pool_idx in range(len(crossfold_indicies[fold_idx])):
crossfold_pool[fold_idx][trainorval_pool_idx] = pd.concat(crossfold_pool[fold_idx][trainorval_pool_idx])
self.crossfold_pool = crossfold_pool
self.test_pool = pd.concat(test_pool)
to_pickle = {}
to_pickle['crossfold_pool'] = crossfold_pool
to_pickle['test_pool'] = pd.concat(test_pool)
file_path = os.path.join(self.pool_dir, cache_name)
with open(file_path, 'wb') as pkl_file:
pickle.dump(to_pickle, pkl_file)
print "wrote crossfolding cache"
return
# This function will generate the data pool for the dataset from the natualistic driving data set.
# Its input is a list of tracks, and a list of labels in the format "origin-destination"
# The tracks are in [Data T], where Data is a list of floats of len 4, x,y, heading speed.
def generate_master_pool_naturalistic_2015(self, raw_sequences=None, raw_classes=None):
# Convert raw_classes into a list of indicies
st_encoder = preprocessing.LabelEncoder()
st_encoder.fit(raw_classes)
origin_destintation_classes = st_encoder.transform(raw_classes)
dest_raw_classes = [label[label.find('-') + 1:] for label in raw_classes]
origin = [label[:label.find('-')] for label in raw_classes]
des_encoder = preprocessing.LabelEncoder()
des_encoder.fit(dest_raw_classes)
self.des_classes = des_encoder.transform(dest_raw_classes)
dest_1hot_enc = preprocessing.OneHotEncoder()
dest_1hot_enc.fit(np.array(self.des_classes).reshape(-1,1))
# Forces continuity b/w crossfold template and test template
def _generate_template(track_idx, track_class,origin, destination, destination_vec):
return pd.DataFrame({"track_idx": track_idx,
"track_class": track_class,
"origin":origin,
"destination": destination,
"destination_vec": destination_vec,
"dest_1_hot":
pd.Series([dest_1hot_enc.transform(destination_vec).toarray().astype(np.float32)[0]],
dtype=object)
}, index=[0])
"""
The notionally correct way to validate the algorithm is as follows:
--90/10 split for (train/val) and test
--Within train/val, do a crossfold search
So I'm going to wrap the crossvalidator in another test/train picker, so
that both are picked with an even dataset.
"""
master_pool = []
# For all tracks
for track_raw_idx in range(len(raw_sequences)):
try:
# if track_raw_idx > 10:
# break
# Lookup the index in the original collection
# Get data
# print "Wrangling track: " + str(track_raw_idx)
wrangle_time = time.time()
single_track = raw_sequences[track_raw_idx]
df_template = _generate_template(track_raw_idx, raw_classes[track_raw_idx],
origin[track_raw_idx],
dest_raw_classes[track_raw_idx],
self.des_classes[track_raw_idx])
track_pool = self._track_slicer(single_track,
self.parameters['observation_steps'],
self.parameters['prediction_steps'],
df_template,
bbox=20) # FIXME parameters.bbox)
master_pool.append(track_pool)
except ValueError:
print "Warning, track discarded as it did not meet minimum length requirements"
continue
self.master_pool = pd.concat(master_pool)
if not os.path.exists(self.pool_dir):
os.makedirs(self.pool_dir)
file_path = os.path.join(self.pool_dir, self.get_pool_filename())
self.master_pool.to_pickle(file_path)
return
def _extract_ibeo_data_for_encoders(self,single_track):
# Code that transforms the big dataframe into the input data list style for encoder/decoder
# DOES NOT DO TRACK SPLITTING. Len output shoulbe be equal to len input
'''
'level_0', u'index', u'ObjectId', u'Flags',
u'trackedByStationaryModel', u'mobile', u'motionModelValidated',
u'ObjectAge', u'Timestamp', u'ObjectPredAge', u'Classification',
u'ClassCertainty', u'ClassAge', u'ObjBoxCenter_X', u'ObjBoxCenter_Y',
u'ObjBoxCenterSigma_X', u'ObjBoxCenterSigma_Y', u'ObjBoxSize_X',
u'ObjBoxSize_Y', u'ObjCourseAngle', u'ObjCourseAngleSigma',
u'ObjBoxOrientation', u'ObjBoxOrientationSigma', u'RelVelocity_X',
u'RelVelocity_Y', u'RelVelocitySigma_X', u'RelVelocitySigma_Y',
u'AbsVelocity_X', u'AbsVelocity_Y', u'AbsVelocitySigma_X',
u'AbsVelocitySigma_Y', u'RefPointLocation', u'RefPointCoords_X',
u'RefPointCoords_Y', u'RefPointCoordsSigma_X', u'RefPointCoordsSigma_Y',
u'RefPointPosCorrCoeffs', u'ObjPriority', u'ObjExtMeasurement',
u'EgoLatitude', u'EgoLongitude', u'EgoAltitude', u'EgoHeadingRad',
u'EgoPosTimestamp', u'GPSFixStatus', u'ObjPrediction', u'Object_X',
u'Object_Y', u'uniqueId', u'origin', u'destination', u'distance'],
dtype = 'object')
'''
# ibeo_data_columns = ["Object_X","Object_Y","ObjBoxOrientation","AbsVelocity_X","AbsVelocity_Y","ObjectPredAge"]
output_df = single_track.loc[:,self.parameters["ibeo_data_columns"]].values.astype(np.float32)
return output_df
def _pad_single_track(self, single_track, padding_length):
# Track is padded unconditionally by the length of the future_track
track_padding_bool = [False] * len(single_track) + [True] * padding_length
single_track = single_track.append(single_track.iloc[[-1]*padding_length], ignore_index=True)
single_track['trackwise_padding'] = track_padding_bool
return single_track
def _split_and_slice_tracks_multiprocess_helper(self, args):
return self._split_and_slice_tracks(*args)
def _split_and_slice_tracks(self, single_track, track_raw_idx, des_encoder, dest_1hot_enc, data_columns):
# Forces continuity b/w crossfold template and test template
def _generate_ibeo_template(track_idx, track_class, origin, destination, destination_vec):
return pd.DataFrame({"track_idx": track_idx,
"track_class": track_class,
"origin": origin,
"destination": destination,
"destination_vec": destination_vec,
"dest_1_hot":
pd.Series([dest_1hot_enc.transform(destination_vec.reshape(-1, 1)
).astype(np.float32).toarray()[0]],
dtype=object)
}, index=[0])
# Lookup the index in the original collection
# Get data
# rint "Wrangling track: " + str(track_raw_idx) + " of: " + str(len(ibeo_track_list))
sys.stdout.write("\rWrangling track: %04d" % (track_raw_idx))
sys.stdout.flush()
wrangle_time = time.time()
#single_track = ibeo_track_list[track_raw_idx]
# If we want | |
__str__(self):
self.templateMap = {
'message': self.getMessage()
,'identifier': self.identifier
# ,'props': stringify([ conf.INDENT + str(p) for p in self.getProperties() if len(self) == 2])
,'props': stringify([ conf.INDENT + str(p) for p in self.getProperties()], prefix=(len(self) > 2))
,'token': self.token
}
return tidy(Template(getattr(conf.TEMPLATES,self.token)).substitute(**self.templateMap))
class View(base):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
def __init__(self, input):
self._fields = {}
self.primaryKey = ''
self.message = ''
self.children = {}
self.parent = None
super(View, self).__init__(input)
self.token = 'view'
def __str__(self):
self.templateMap = {
'message':self.getMessage()
,'token':self.token
,'identifier':self.identifier
,'props': stringify([str(p) for p in self.getProperties() if p.name != "sets"])
,'parameters':stringify(sortMe(self.parameters()))
,'filters': stringify(sortMe(self.filters()))
,'dimensions': stringify(sortMe(self.dims()))
,'dimensionGroups': stringify(sortMe(self.dimensionGroups()))
,'measures': stringify(sortMe(self.measures()))
,'sets': stringify([str(p) for p in self.getProperties() if p.name == "sets"])
,'children': stringify(self.children.values()) if self.children else ''
}
return tidy(Template(getattr(conf.TEMPLATES,self.token)).substitute(**self.templateMap))
def _bind_lkml(self,jsonDict):
t = 'measures'
if t in jsonDict.keys():
for field in jsonDict[t]:
self + Measure(field)
jsonDict.pop(t)
else:
pass
t = 'dimensions'
if t in jsonDict.keys():
for field in jsonDict[t]:
self + Dimension(field)
jsonDict.pop(t)
else:
pass
t = 'filters'
if t in jsonDict.keys():
for field in jsonDict[t]:
self + Filter(field)
jsonDict.pop(t)
else:
pass
t = 'dimension_groups'
if t in jsonDict.keys():
for field in jsonDict[t]:
self + DimensionGroup(field)
jsonDict.pop(t)
else:
pass
t = 'parameters'
if t in jsonDict.keys():
for field in jsonDict[t]:
self + Parameter(field)
jsonDict.pop(t)
else:
pass
super()._bind_lkml(jsonDict)
def getFieldsSorted(self):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
# ''' returns all the fields sorted first by alpabetical dimensions/filters, then alphabetical measures '''
return sorted(self._fields.values(), key=lambda field: ''.join([str(isinstance(field, Measure)), field.identifier]))
def __repr__(self):
return "%s (%r) fields: %s id: %s" % (self.__class__, self.identifier, len(self), hex(id(self)))
def __len__(self):
return len([f for f in self.fields()])
def __add__(self,other):
if isinstance(other, Field):
return self.addField(other)
elif isinstance(other, str):
#TODO: decide if still want to support view + 'id' behavior, and if so check regex first. Maybe a regex string to just ask: is snake str -> dim
if len(other) < 10:
return self.addDimension(dbColumn=other)
else:
self._bind_lkml(lkml.load(other))
else:
raise Exception(str(type(other)) + ' cannot be added to View')
def __radd__(self,other):
return self.__add__(other)
def __sub__(self,other):
if isinstance(other, Field):
return self.removeField(other)
elif isinstance(other, str):
return self.removeField(other)
elif isinstance(other,View):
return self.children.pop(other.identifier,None)
else:
raise Exception(str(type(other)) + ' cannot be subtracted from View')
def __rsub__(self,other):
return self.__sub__(other)
def __invert__(self):
''' hides all dimensions (not measures) '''
for dim in self.dims():
dim.hide()
for dim in self.dimensionGroups():
dim.hide()
for dim in self.parameters():
dim.hide()
for dim in self.filters():
dim.hide()
return self
def __contains__(self,item):
return item in self._fields.keys()
def __getitem__(self,identifier):
return self.field(identifier)
def __getattr__(self, key):
if key in self.__dict__.keys():
return self.__dict__[key]
elif key in self.properties.props():
return self.getProperty(key)
elif key == 'name':
return self.identifier
elif key == 'pk':
return self.getPrimaryKey()
elif key == '__ref__':
return splice('${',self.identifier,'}')
else:
return self.field(key)
def __setattr__(self, name, value):
if name == 'label':
self.setLabel(value)
return self
elif name == 'name':
self.setName(value)
return self
elif name == 'pk':
self.setPrimaryKey(value)
return self
elif name in conf.language_rules.view_props:
self.setProperty(name, value)
else:
object.__setattr__(self, name, value)
def setExtensionRequired(self):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
# ''' Sets the view to be "extension: required" '''
self.properties.addProperty('extension','required')
return self
def getFieldsByTag(self,tag):
'''
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
for field in self.fields():
if tag in field.tags:
yield field
def fields(self):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
# '''Returns all the fields as a generator'''
for field, literal in self._fields.items():
## Does this yeild only return the first instance it is looped?
yield literal
def fieldNames(self):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
return list(self._fields.keys())
def getFieldsByType(self, t):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
return filter(lambda field: str(field.type) == 'type: '+ t, list(self._fields.values()))
def sumAllNumDimensions(self):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
# '''
# Adds a "total" measure to the view for all number dimensions
# '''
for field in self.getFieldsByType('number'):
tmpFieldName = 'total_' + field.name
if tmpFieldName not in self.fieldNames() and isinstance(field,Dimension):
self + Measure({
'name': tmpFieldName
,'type':'sum'
,'sql':field.__refs__
})
def field(self, f):
'''
get a field (most commonly, will pass in a field name)
:param field: Field to return
:type field: str or Field (or Dimension, Measure...) object
:return: Returns a subtype of Field
:rtype: Dimension, Measure, Filter or Parameter
'''
# ''' retrieve a field, argument can be the name or a field'''
if isinstance(f,str):
try:
return self._fields[f]
except KeyError:
raise KeyError
elif isinstance(f,Field):
return self._fields[f.identifier]
def search(self, prop, pattern):
'''
pass a regex expression and will return the fields whose sql match
:param prop: name of proprty you'd like to search
:param pattern: the regex pattern
:type prop: str
:type patter: a regex search string
:return: a generator / iteratble set of fields who have a member property matching the pattern
:rtype: Field
'''
if isinstance(pattern,list):
pattern = '('+'|'.join(pattern)+')'
searchString = r''.join([r'.*',pattern,r'.*'])
for field in self.fields():
if re.match(searchString,str(field.getProperty(prop))):
yield field
def addField(self, field):
'''
add a field to the view
* if the field is a dimension and primary key it will be set as the view primary key
* the field will have its view set to so that the view may be referenced from the field object
:param arg1: Field
:type arg1: Field (or subtype)
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
# '''Takes a field object as an argument and adds it to the view, if the field is a dimension and primary key it will be set as the view primary key'''
# uses the 'setView' method on field which returns self so that field can fully qualify itself and so that field can be a member of view
self._fields.update({field.identifier: field.setView(self)})
# If a primary key is added it will overwrite the existing primary key....
if isinstance(field, Dimension):
if field.isPrimaryKey():
# field.setPrimaryKey()
self.setPrimaryKey(field.identifier)
return self
def removeField(self,field):
'''
Removes a field from the View
* also unsets primary key
:param arg1: field to remove
:type arg1: Field object or str name of field
:return: returns the removed field
:rtype: Field or None
'''
# '''Removes a field, either by object or by string of identifier, safely checks and de-refs primary key'''
def pk(k):
if k.isPrimaryKey():
self.unSetPrimaryKey()
if isinstance(field,Field):
if isinstance(field,Dimension):
pk(field)
pk(self.field(field.identifier))
return self._fields.pop(field.identifier, None)
elif isinstance(field,str):
dimToDel = self.field(field)
if isinstance(dimToDel,Dimension):
pk(dimToDel)
return self._fields.pop(field, None)
else:
raise Exception('Not a string or Field instance provided')
def addFields(self, fields):
'''
Add multiple fields to a view. An iterable collection of field objects will be passed to the add field function. Helpful for adding many fields at once
:param fields: set or list of fields [field1, field2 ...]
:type fields: type description
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
for field in fields:
self.addField(field)
return self
def setPrimaryKey(self, f, callFromChild=False):
'''
TODO: Complete Desctiption
represents a view onject in LookML
:param | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from typing import Any, Dict, List, Union
import numpy as np
import pandas as pd
from ... import opcodes as OperandDef
from ...core import ENTITY_TYPE, OutputType, recursive_tile
from ...core.context import Context
from ...serialization.serializables import KeyField, AnyField
from ...tensor.core import Tensor
from ...tensor.datasource import tensor as astensor
from ...tensor.utils import unify_chunks
from ...typing import EntityType, TileableType
from ...utils import has_unknown_shape
from ..core import INDEX_TYPE, SERIES_TYPE
from ..operands import DataFrameOperand, DataFrameOperandMixin
from ..utils import parse_index
class DataFrameFromTensor(DataFrameOperand, DataFrameOperandMixin):
"""
Represents data from mars tensor
"""
_op_type_ = OperandDef.DATAFRAME_FROM_TENSOR
input = AnyField("input")
index = AnyField("index")
columns = AnyField("columns")
def __init__(self, *args, **kwargs):
kwargs["_output_types"] = [OutputType.dataframe]
super().__init__(*args, **kwargs)
def _set_inputs(self, inputs: List[EntityType]):
super()._set_inputs(inputs)
inputs_iter = iter(self._inputs)
if self.input is not None:
if not isinstance(self.input, dict):
self.input = next(inputs_iter)
else:
# check each value for input
new_input = OrderedDict()
for k, v in self.input.items():
if isinstance(v, ENTITY_TYPE):
new_input[k] = next(inputs_iter)
else:
new_input[k] = v
self.input = new_input
if isinstance(self.index, ENTITY_TYPE):
self.index = next(inputs_iter)
def __call__(
self,
input_tensor: Tensor,
index: Union[TileableType, pd.Index],
columns: pd.Index,
dtypes: pd.Series,
):
if isinstance(input_tensor, dict):
return self._call_input_1d_tileables(input_tensor, index, columns, dtypes)
elif input_tensor is not None:
return self._call_input_tensor(input_tensor, index, columns, dtypes)
else:
return self._call_tensor_none(index, columns, dtypes)
def _process_index(
self, index: Union[TileableType, pd.Index], inputs: List[EntityType]
):
if not isinstance(index, pd.Index):
if isinstance(index, INDEX_TYPE):
index_value = index.index_value
inputs.append(index)
elif isinstance(index, ENTITY_TYPE):
index = astensor(index)
if index.ndim != 1:
raise ValueError(f"index should be 1-d, got {index.ndim}-d")
index_value = parse_index(
pd.Index([], dtype=index.dtype), index, type(self).__name__
)
inputs.append(index)
else:
index = pd.Index(index)
index_value = parse_index(index)
else:
index_value = parse_index(index)
return index_value
def _call_input_1d_tileables(
self,
input_1d_tileables: Dict[Any, TileableType],
index: Union[TileableType, pd.Index],
columns: pd.Index,
dtypes: pd.Series,
):
tileables = []
shape = None
for tileable in input_1d_tileables.values():
tileable_shape = astensor(tileable).shape
if len(tileable_shape) > 0:
if shape is None:
shape = tileable_shape
elif shape != tileable_shape:
raise ValueError("input 1-d tensors should have same shape")
if isinstance(tileable, ENTITY_TYPE):
tileables.append(tileable)
if index is not None:
tileable_size = tileables[0].shape[0]
if hasattr(index, "shape"):
index_size = index.shape[0]
else:
index_size = len(index)
if (
not pd.isna(tileable_size)
and not pd.isna(index_size)
and tileable_size != index_size
):
raise ValueError(
f"index {index} should have the same shape "
f"with tensor: {tileable_size}"
)
index_value = self._process_index(index, tileables)
else:
self.index = index = pd.RangeIndex(0, tileables[0].shape[0])
index_value = parse_index(index)
if columns is not None:
if len(input_1d_tileables) != len(columns):
raise ValueError(
f"columns {columns} should have size {len(input_1d_tileables)}"
)
if not isinstance(columns, pd.Index):
if isinstance(columns, ENTITY_TYPE):
raise NotImplementedError("The columns value cannot be a tileable")
columns = pd.Index(columns)
columns_value = parse_index(columns, store_data=True)
else:
columns_value = parse_index(
pd.RangeIndex(0, len(input_1d_tileables)), store_data=True
)
shape = (shape[0], len(input_1d_tileables))
return self.new_dataframe(
tileables,
shape,
dtypes=dtypes,
index_value=index_value,
columns_value=columns_value,
)
def _call_input_tensor(
self,
input_tensor: Tensor,
index: Union[TileableType, pd.Index],
columns: pd.Index,
dtypes: pd.Series,
):
if input_tensor.ndim not in {1, 2}:
raise ValueError("Must pass 1-d or 2-d input")
inputs = [input_tensor]
if index is not None:
if input_tensor.shape[0] != len(index):
raise ValueError(
f"index {index} should have the same shape with tensor: {input_tensor.shape[0]}"
)
index_value = self._process_index(index, inputs)
elif isinstance(input_tensor, SERIES_TYPE):
index_value = input_tensor.index_value
else:
stop = input_tensor.shape[0]
stop = -1 if np.isnan(stop) else stop
index = self.index = pd.RangeIndex(start=0, stop=stop)
index_value = parse_index(index)
if columns is not None:
if not (
input_tensor.ndim == 1
and len(columns) == 1
or input_tensor.shape[1] == len(columns)
):
raise ValueError(
f"columns {columns} should have the same shape with tensor: {input_tensor.shape[1]}"
)
if not isinstance(columns, pd.Index):
if isinstance(columns, ENTITY_TYPE):
raise NotImplementedError("The columns value cannot be a tileable")
columns = pd.Index(columns)
columns_value = parse_index(columns, store_data=True)
else:
if input_tensor.ndim == 1:
# convert to 1-d DataFrame
columns_value = parse_index(
pd.RangeIndex(start=0, stop=1), store_data=True
)
else:
columns_value = parse_index(
pd.RangeIndex(start=0, stop=input_tensor.shape[1]), store_data=True
)
if input_tensor.ndim == 1:
shape = (input_tensor.shape[0], 1)
else:
shape = input_tensor.shape
return self.new_dataframe(
inputs,
shape,
dtypes=dtypes,
index_value=index_value,
columns_value=columns_value,
)
def _call_tensor_none(
self, index: Union[TileableType, pd.Index], columns: pd.Index, dtypes: pd.Series
):
inputs = []
shape = []
if index is not None:
index_value = self._process_index(index, inputs)
shape.append(index.shape[0])
else:
index = self.index = pd.Index([], dtype=object)
index_value = parse_index(index)
shape.append(0)
if columns is not None:
if not isinstance(columns, pd.Index):
if isinstance(columns, ENTITY_TYPE):
raise NotImplementedError("The columns value cannot be a tileable")
columns = pd.Index(columns)
columns_value = parse_index(columns, store_data=True)
shape.append(columns.shape[0])
else:
columns_value = parse_index(pd.Index([], dtype=object), store_data=True)
shape.append(0)
return self.new_dataframe(
inputs,
shape=tuple(shape),
dtypes=dtypes,
index_value=index_value,
columns_value=columns_value,
)
@classmethod
def tile(cls, op: "DataFrameFromTensor"):
if isinstance(op.input, dict):
return (yield from cls._tile_input_1d_tileables(op))
elif op.input is not None:
return (yield from cls._tile_input_tensor(op))
else:
return cls._tile_tensor_none(op)
@classmethod
def _tile_input_1d_tileables(cls, op: "DataFrameFromTensor"):
# make sure all tensor have known chunk shapes
if has_unknown_shape(*op.inputs):
yield
out_df = op.outputs[0]
in_tensors = op.inputs
in_tensors = yield from unify_chunks(*in_tensors)
nsplit = in_tensors[0].nsplits[0]
cum_sizes = [0] + np.cumsum(nsplit).tolist()
out_chunks = []
for i in range(in_tensors[0].chunk_shape[0]):
chunk_op = op.copy().reset_key()
new_input = OrderedDict()
for k, v in op.input.items():
if not isinstance(v, ENTITY_TYPE):
try:
new_input[k] = v[cum_sizes[i] : cum_sizes[i + 1]]
except TypeError:
# scalar
new_input[k] = v
else:
# do not need to do slice,
# will be done in set_inputs
new_input[k] = v
chunk_op.input = new_input
columns_value = out_df.columns_value
dtypes = out_df.dtypes
chunk_index = (i, 0)
if isinstance(op.index, INDEX_TYPE):
index_value = in_tensors[-1].chunks[i].index_value
elif isinstance(op.index, pd.Index):
chunk_op.index = pd_index = op.index[cum_sizes[i] : cum_sizes[i + 1]]
index_value = parse_index(pd_index, store_data=True)
else:
assert op.index is not None
index_chunk = in_tensors[-1].cix[
i,
]
index_value = parse_index(
pd.Index([], dtype=index_chunk.dtype),
index_chunk,
type(chunk_op).__name__,
)
shape = (nsplit[i], len(out_df.dtypes))
out_chunk = chunk_op.new_chunk(
[t.cix[(i,)] for t in in_tensors],
shape=shape,
index=chunk_index,
dtypes=dtypes,
index_value=index_value,
columns_value=columns_value,
)
out_chunks.append(out_chunk)
nsplits = (nsplit, (len(out_df.dtypes),))
new_op = op.copy()
return new_op.new_dataframes(
out_df.inputs,
out_df.shape,
dtypes=out_df.dtypes,
index_value=out_df.index_value,
columns_value=out_df.columns_value,
chunks=out_chunks,
nsplits=nsplits,
)
@classmethod
def _tile_input_tensor(cls, op: "DataFrameFromTensor"):
out_df = op.outputs[0]
in_tensor = op.input
out_chunks = []
if out_df.index_value.has_value() and has_unknown_shape(in_tensor):
yield
nsplits = in_tensor.nsplits
if op.index is not None and hasattr(op.index, "key"):
# rechunk index if it's a tensor
if has_unknown_shape(*op.inputs):
yield
index_tensor = yield from recursive_tile(op.index.rechunk([nsplits[0]]))
else:
index_tensor = None
# nsplits
if in_tensor.ndim == 1:
out_nsplits = in_tensor.nsplits + ((1,),)
else:
out_nsplits = in_tensor.nsplits
cum_nsplits = [[0] + np.cumsum(ns).tolist() for ns in out_nsplits]
for in_chunk in in_tensor.chunks:
out_op = op.copy().reset_key()
chunk_inputs = [in_chunk]
if in_chunk.ndim == 1:
i = in_chunk.index[0]
chunk_index = (i, 0)
chunk_shape = (in_chunk.shape[0], 1)
else:
i, j = in_chunk.index
chunk_index = in_chunk.index
chunk_shape = in_chunk.shape
if op.columns is not None:
column_nsplit = cum_nsplits[1]
j = chunk_index[1]
out_op.columns = op.columns[column_nsplit[j] : column_nsplit[j + 1]]
if isinstance(op.index, INDEX_TYPE):
index_chunk = index_tensor.chunks[i]
chunk_inputs.append(index_chunk)
elif isinstance(op.index, pd.Index):
index_nsplit = cum_nsplits[0]
if op.index.size > 0:
out_op.index = op.index[index_nsplit[i] : index_nsplit[i + 1]]
elif index_tensor is not None:
index_chunk = index_tensor.cix[i]
chunk_inputs.append(index_chunk)
out_chunk = out_op.new_chunk(
chunk_inputs, shape=chunk_shape, index=chunk_index
)
out_chunk._set_tileable_meta(
tileable_key=out_df.key,
nsplits=out_nsplits,
index_value=out_df.index_value,
columns_value=out_df.columns_value,
dtypes=out_df.dtypes,
)
out_chunks.append(out_chunk)
new_op = op.copy()
params = out_df.params.copy()
params["chunks"] = out_chunks
params["nsplits"] = out_nsplits
return new_op.new_dataframes(out_df.inputs, kws=[params])
@classmethod
def _tile_tensor_none(cls, op: "DataFrameFromTensor"):
out_df = op.outputs[0]
out_chunks = []
assert isinstance(op.index, INDEX_TYPE)
# tile as index
for index_chunk in op.index.chunks:
index_value = index_chunk.index_value
chunk_shape = (index_chunk.shape[0], out_df.shape[1])
chunk_index = (index_chunk.index[0], 0)
chunk_op = op.copy().reset_key()
out_chunk = chunk_op.new_chunk(
[index_chunk],
shape=chunk_shape,
index=chunk_index,
index_value=index_value,
columns_value=out_df.columns_value,
dtypes=out_df.dtypes,
)
out_chunks.append(out_chunk)
new_op = op.copy()
params = out_df.params.copy()
params["nsplits"] = (op.index.nsplits[0], (out_df.shape[1],))
params["chunks"] = out_chunks
return new_op.new_dataframes(out_df.inputs, kws=[params])
@classmethod
def execute(cls, ctx: Union[dict, Context], op: "DataFrameFromTensor"):
chunk = op.outputs[0]
if isinstance(op.input, dict):
d = OrderedDict()
for k, v in op.input.items():
if hasattr(v, "key"):
d[k] = ctx[v.key]
else:
d[k] = v
if op.index is not None and hasattr(op.index, "key"):
index_data = ctx[op.index.key]
else:
index_data = op.index
ctx[chunk.key] = pd.DataFrame(d, index=index_data, columns=op.columns)
elif op.input is not None:
tensor_data = | |
self.client.post(reverse('inbox_api:inbox', kwargs={'author_id':author2.id}), data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
# test admin
self.client.logout()
self.auth_helper.authorize_client(self.client)
response = self.client.post(reverse('inbox_api:inbox', kwargs={'author_id':author.id}), data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
dict_resp_data = json.loads(response.content)["data"]
postId = dict_resp_data["id"]
self.client.logout()
self.client.login(username=user2.username, password=password)
data = {
"object": f"{postId}",
"author":{
"type":"author",
"id":f"{author2.id}"
},
}
response = self.client.post(reverse('likes_api:inbox_like', kwargs={'author_id':author.id}), data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
like_data = json.loads(response.content)["data"]
# test anonymous user
self.client.logout()
response = self.client.post(reverse('inbox_api:inbox', kwargs={'author_id':author.id}), like_data, format="json")
self.assertEqual(response.status_code, 401, f"expected 401. got: {response.status_code}")
# test non participant
self.client.logout()
nonParticipant = User.objects.create_user("nonParticipant", password=password)
self.assertTrue(self.client.login(username=nonParticipant.username, password=password))
response = self.client.post(reverse('inbox_api:inbox', kwargs={'author_id':author.id}), like_data, format="json")
self.assertEqual(response.status_code, 403, f"expected 403. got: {response.status_code}")
# test likee
self.client.logout()
self.assertTrue(self.client.login(username=user.username, password=password))
response = self.client.post(reverse('inbox_api:inbox', kwargs={'author_id':author.id}), like_data, format="json")
self.assertEqual(response.status_code, 403, f"expected 403. got: {response.status_code}")
# test liker
self.client.logout()
self.assertTrue(self.client.login(username=user2.username, password=password))
response = self.client.post(reverse('inbox_api:inbox', kwargs={'author_id':author.id}), like_data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
# test admin
self.client.logout()
self.auth_helper.authorize_client(self.client)
response = self.client.post(reverse('inbox_api:inbox', kwargs={'author_id':author.id}), like_data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
self.client.logout()
self.auth_helper.authorize_client(self.client)
response = self.client.put(reverse('author:follower-info', kwargs={'author_id':author2.id, 'foreign_author_id':author.id}), format="json")
self.assertEqual(response.status_code, 200, f"expected 200. got: {response.status_code}")
follow_data = {
"type": f"{InboxItem.ItemTypeEnum.FOLLOW}",
"actor": {
"type": "author",
"id": f"{author.id}",
},
"object": {
"type": "author",
"id": f"{author2.id}"
},
}
# test anonymous user
self.client.logout()
response = self.client.post(reverse('inbox_api:inbox', kwargs={'author_id':author.id}), follow_data, format="json")
self.assertEqual(response.status_code, 401, f"expected 401. got: {response.status_code}")
# test non participant
self.client.logout()
self.assertTrue(self.client.login(username=nonParticipant.username, password=password))
response = self.client.post(reverse('inbox_api:inbox', kwargs={'author_id':author.id}), follow_data, format="json")
self.assertEqual(response.status_code, 403, f"expected 403. got: {response.status_code}")
# test followee
self.client.logout()
self.assertTrue(self.client.login(username=user2.username, password=password))
response = self.client.post(reverse('inbox_api:inbox', kwargs={'author_id':author.id}), follow_data, format="json")
self.assertEqual(response.status_code, 403, f"expected 403. got: {response.status_code}")
# test follower
self.client.logout()
self.assertTrue(self.client.login(username=user.username, password=password))
response = self.client.post(reverse('inbox_api:inbox', kwargs={'author_id':author.id}), follow_data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
# test admin
self.client.logout()
self.auth_helper.authorize_client(self.client)
response = self.client.post(reverse('inbox_api:inbox', kwargs={'author_id':author.id}), follow_data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
def test_post_inbox_overwrite(self):
"""
should return modify the existing entry and not create a new one
"""
author = self.auth_helper.get_author()
data = {
"type":"post",
"title":"A post title about a post about web dev",
"description":"This post discusses stuff -- brief",
"contentType":f"{Post.ContentTypeEnum.MARKDOWN}",
"author":{
"type":"author",
"id":f"{author.id}"
},
"visibility":f"{Post.VisibilityEnum.PUBLIC}",
"unlisted":"false"
}
# need to do this because inbox expects an id
response = self.client.post(reverse('post_api:posts', kwargs={'author_id':author.id}), data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
post_data = json.loads(response.content)["data"]
data["id"] = post_data["id"]
response = self.client.post(reverse('inbox_api:inbox', kwargs={'author_id':author.id}), data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
dict_resp_data = json.loads(response.content)["data"]
self.assertEqual(dict_resp_data["type"], data["type"], "returned item had wrong type!")
self.assertEqual(dict_resp_data["title"], data["title"], "returned item had wrong title!")
self.assertEqual(dict_resp_data["description"], data["description"], "returned item had wrong description!")
self.assertEqual(dict_resp_data["contentType"], data["contentType"], "returned item had wrong contentType!")
self.assertEqual(dict_resp_data["visibility"], data["visibility"], "returned item had wrong visibility!")
# Public post uri-id contains its authors id in it
self.assertIn(str(author.id), dict_resp_data["id"], "returned item referenced wrong author!")
postId = dict_resp_data["id"].split("posts/")[1].rstrip("/")
data["title"] = "A different title"
data["description"] = "A different description"
data["contentType"] = f"{Post.ContentTypeEnum.PLAIN}"
data["visibility"] = f"{Post.VisibilityEnum.FRIENDS}"
response = self.client.post(reverse('inbox_api:inbox', kwargs={'author_id':author.id}), data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
dict_resp_data = json.loads(response.content)["data"]
self.assertEqual(dict_resp_data["type"], data["type"], "returned item had wrong type!")
self.assertEqual(dict_resp_data["title"], data["title"], "returned item had wrong title!")
self.assertEqual(dict_resp_data["description"], data["description"], "returned item had wrong description!")
self.assertEqual(dict_resp_data["contentType"], data["contentType"], "returned item had wrong contentType!")
self.assertEqual(dict_resp_data["visibility"], data["visibility"], "returned item had wrong visibility!")
# Public post uri-id contains its authors id in it
self.assertIn(str(author.id), dict_resp_data["id"], "returned item referenced wrong author!")
response = self.client.get(reverse('inbox_api:inbox', kwargs={'author_id':author.id}))
self.assertEqual(response.status_code, 200, f"expected 200. got: {response.status_code}")
returned_list = json.loads(response.content)["data"]
self.assertEqual(len(returned_list), 1)
data1 = returned_list[0]
self.assertEqual(data1["type"], data["type"], "returned item had wrong type!")
self.assertEqual(data1["title"], data["title"], "returned item had wrong title!")
self.assertEqual(data1["description"], data["description"], "returned item had wrong description!")
self.assertEqual(data1["contentType"], data["contentType"], "returned item had wrong contentType!")
self.assertEqual(data1["visibility"], data["visibility"], "returned item had wrong visibility!")
# Public post uri-id contains its authors id in it
self.assertIn(str(author.id), data1["id"], "returned item referenced wrong author!")
def test_post_inbox_no_data(self):
"""
should return 400
"""
author = self.auth_helper.get_author()
data = {}
# need to do this because inbox expects an id
response = self.client.post(reverse('inbox_api:inbox', kwargs={'author_id':author.id}), data, format="json")
self.assertEqual(response.status_code, 400, f"expected 400. got: {response.status_code}")
def test_post_inbox_invalid_type(self):
"""
should return 400
"""
author = self.auth_helper.get_author()
data = {
"type":"post",
"title":"A post title about a post about web dev",
"description":"This post discusses stuff -- brief",
"contentType":f"{Post.ContentTypeEnum.MARKDOWN}",
"author":{
"type":"author",
"id":f"{author.id}"
},
"visibility":f"{Post.VisibilityEnum.PUBLIC}",
"unlisted":"false"
}
# need to do this because inbox expects an id
response = self.client.post(reverse('post_api:posts', kwargs={'author_id':author.id}), data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
post_data = json.loads(response.content)["data"]
post_data["type"] = "someOtherType"
response = self.client.post(reverse('inbox_api:inbox', kwargs={'author_id':author.id}), post_data, format="json")
self.assertEqual(response.status_code, 400, f"expected 400. got: {response.status_code}")
def test_post_inbox_author_nonexist(self):
"""
should return 404
"""
author = self.auth_helper.get_author()
data = {
"type":"post",
"title":"A post title about a post about web dev",
"description":"This post discusses stuff -- brief",
"contentType":f"{Post.ContentTypeEnum.MARKDOWN}",
"author":{
"type":"author",
"id":f"{author.id}"
},
"visibility":f"{Post.VisibilityEnum.PUBLIC}",
"unlisted":"false"
}
# need to do this because inbox expects an id
response = self.client.post(reverse('post_api:posts', kwargs={'author_id':author.id}), data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
post_data = json.loads(response.content)["data"]
authorId = uuid4()
response = self.client.post(reverse('inbox_api:inbox', kwargs={'author_id':authorId}), post_data, format="json")
self.assertEqual(response.status_code, 404, f"expected 404. got: {response.status_code}")
def test_post_inbox_bad_uuid(self):
"""
should return 404
"""
author = self.auth_helper.get_author()
data = {
"type":"post",
"title":"A post title about a post about web dev",
"description":"This post discusses stuff -- brief",
"contentType":f"{Post.ContentTypeEnum.MARKDOWN}",
"author":{
"type":"author",
"id":f"{author.id}"
},
"visibility":f"{Post.VisibilityEnum.PUBLIC}",
"unlisted":"false"
}
# need to do this because inbox expects an id
response = self.client.post(reverse('post_api:posts', kwargs={'author_id':author.id}), data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
post_data = json.loads(response.content)["data"]
authorId = "<PASSWORD>"
response = self.client.post(reverse('inbox_api:inbox', kwargs={'author_id':authorId}), post_data, format="json")
self.assertEqual(response.status_code, 404, f"expected 404. got: {response.status_code}")
def test_post_inbox_swapped_type(self):
"""
should return 400 both times
"""
author = self.auth_helper.get_author()
user = User(username="username1")
user.save()
author2: Author = Author.objects.get(userId=user)
data = {
"type":"post",
"title":"A post title about a post about web dev",
"description":"This post discusses stuff -- brief",
"contentType":f"{Post.ContentTypeEnum.MARKDOWN}",
"author":{
"type":"author",
"id":f"{author.id}"
},
"visibility":f"{Post.VisibilityEnum.PUBLIC}",
"unlisted":"false"
}
# need to do this because inbox expects an id
response = self.client.post(reverse('post_api:posts', kwargs={'author_id':author.id}), data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
post_data = json.loads(response.content)["data"]
postId = post_data["id"]
data["id"] = post_data["id"]
data["type"] = f"{InboxItem.ItemTypeEnum.LIKE}"
response = self.client.post(reverse('inbox_api:inbox', kwargs={'author_id':author.id}), data, format="json")
self.assertEqual(response.status_code, 400, f"expected 400. got: {response.status_code}")
data = {
"object": f"{postId}",
"author":{
"type":"author",
"id":f"{author2.id}"
},
}
response = self.client.post(reverse('likes_api:inbox_like', kwargs={'author_id':author.id}), data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
like_data = json.loads(response.content)["data"]
like_data["type"] = "post"
self.assertEqual(postId, like_data["object"], "returned item referenced wrong object!")
self.assertEqual(like_data["author"]["id"], str(author2.id), "returned item referenced wrong author!")
response = self.client.post(reverse('inbox_api:inbox', kwargs={'author_id':author.id}), like_data, format="json")
self.assertEqual(response.status_code, 400, f"expected 400. got: {response.status_code}")
# DELETEs ##################
def test_delete_inbox(self):
"""
should delete the items in the inbox
"""
author = self.auth_helper.get_author()
data = {
"type":"post",
"title":"A post title about a post about web dev",
"description":"This post discusses stuff -- brief",
"contentType":f"{Post.ContentTypeEnum.MARKDOWN}",
"author":{
"type":"author",
"id":f"{author.id}"
},
"visibility":f"{Post.VisibilityEnum.PUBLIC}",
"unlisted":"false"
}
# need to do this because inbox expects an id
response = self.client.post(reverse('post_api:posts', kwargs={'author_id':author.id}), data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
post_data = json.loads(response.content)["data"]
data["id"] = post_data["id"]
response = self.client.post(reverse('inbox_api:inbox', kwargs={'author_id':author.id}), data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
response = self.client.delete(reverse('inbox_api:inbox', kwargs={'author_id':author.id}))
self.assertEqual(response.status_code, 204, f"expected 200. got: {response.status_code}")
response = self.client.get(reverse('inbox_api:inbox', kwargs={'author_id':author.id}))
self.assertEqual(response.status_code, 200, f"expected 200. got: {response.status_code}")
dict_resp = json.loads(response.content)
# checking default pagination
self.assertEqual(dict_resp["page"], DEFAULT_PAGE, f"expected page {DEFAULT_PAGE}. got: {dict_resp['page']}")
self.assertEqual(dict_resp["size"], DEFAULT_PAGE_SIZE, f"expected page size {DEFAULT_PAGE_SIZE}. got: {dict_resp['size']}")
self.assertEqual(len(dict_resp["data"]), 0, "inbox should have been empty but wasn't!")
def test_delete_inbox_access_levels(self):
"""
should return 401 for anonymous users, 403 for non owners, 204 for owners and admins
"""
password = "password"
user = User.objects.create_user("username1", password=password)
author: Author = Author.objects.get(userId=user)
self.client.logout()
self.client.login(username=user.username, password=password)
data = {
"type":"post",
"title":"A post title about a post about web dev",
"description":"This post discusses stuff -- brief",
"contentType":f"{Post.ContentTypeEnum.MARKDOWN}",
"author":{
"type":"author",
"id":f"{author.id}"
},
"visibility":f"{Post.VisibilityEnum.PUBLIC}",
"unlisted":"false"
}
# need to do this because inbox expects an id
response = self.client.post(reverse('post_api:posts', kwargs={'author_id':author.id}), data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
post_data = json.loads(response.content)["data"]
data["id"] = post_data["id"]
response = self.client.post(reverse('inbox_api:inbox', kwargs={'author_id':author.id}), data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
# test anonymous user
self.client.logout()
response = self.client.delete(reverse('inbox_api:inbox', kwargs={'author_id':author.id}))
self.assertEqual(response.status_code, 401, f"expected 401. got: {response.status_code}")
# test non participant
self.client.logout()
nonParticipant = User.objects.create_user("nonParticipant", password=password)
self.assertTrue(self.client.login(username=nonParticipant.username, password=password))
response = self.client.delete(reverse('inbox_api:inbox', kwargs={'author_id':author.id}))
self.assertEqual(response.status_code, 403, f"expected 403. got: {response.status_code}")
# test owner
self.client.logout()
self.assertTrue(self.client.login(username=user.username, password=password))
response = | |
if full port details might be provided to this call,
# they are not used since there is no guarantee the notifications
# are processed in the same order as the relevant API requests
self.updated_ports.add(port['id'])
def port_delete(self, context, **kwargs):
port_id = kwargs.get('port_id')
self.deleted_ports.add(port_id)
self.updated_ports.discard(port_id)
def network_update(self, context, **kwargs):
network_id = kwargs['network']['id']
for port_id in self.network_ports[network_id]:
# notifications could arrive out of order, if the port is deleted
# we don't want to update it anymore
if port_id not in self.deleted_ports:
self.updated_ports.add(port_id)
LOG.debug("network_update message processed for network "
"%(network_id)s, with ports: %(ports)s",
{'network_id': network_id,
'ports': self.network_ports[network_id]})
def _clean_network_ports(self, port_id):
for port_set in self.network_ports.values():
if port_id in port_set:
port_set.remove(port_id)
break
def process_deleted_ports(self, port_info):
# don't try to process removed ports as deleted ports since
# they are already gone
if 'removed' in port_info:
self.deleted_ports -= port_info['removed']
deleted_ports = list(self.deleted_ports)
while self.deleted_ports:
port_id = self.deleted_ports.pop()
port = self.int_br.get_vif_port_by_id(port_id)
self._clean_network_ports(port_id)
self.ext_manager.delete_port(self.context,
{"vif_port": port,
"port_id": port_id})
# move to dead VLAN so deleted ports no
# longer have access to the network
if port:
# don't log errors since there is a chance someone will be
# removing the port from the bridge at the same time
self.port_dead(port, log_errors=False)
self.port_unbound(port_id)
# Flush firewall rules after ports are put on dead VLAN to be
# more secure
self.sg_agent.remove_devices_filter(deleted_ports)
def tunnel_update(self, context, **kwargs):
LOG.debug("tunnel_update received")
if not self.enable_tunneling:
return
tunnel_ip = kwargs.get('tunnel_ip')
tunnel_type = kwargs.get('tunnel_type')
if not tunnel_type:
LOG.error("No tunnel_type specified, cannot create tunnels")
return
if tunnel_type not in self.tunnel_types:
LOG.error("tunnel_type %s not supported by agent",
tunnel_type)
return
if tunnel_ip == self.local_ip:
return
tun_name = self.get_tunnel_name(tunnel_type, self.local_ip, tunnel_ip)
if tun_name is None:
return
if not self.l2_pop:
self._setup_tunnel_port(self.tun_br, tun_name, tunnel_ip,
tunnel_type)
self._setup_tunnel_flood_flow(self.tun_br, tunnel_type)
def tunnel_delete(self, context, **kwargs):
LOG.debug("tunnel_delete received")
if not self.enable_tunneling:
return
tunnel_ip = kwargs.get('tunnel_ip')
if not tunnel_ip:
LOG.error("No tunnel_ip specified, cannot delete tunnels")
return
tunnel_type = kwargs.get('tunnel_type')
if not tunnel_type:
LOG.error("No tunnel_type specified, cannot delete tunnels")
return
if tunnel_type not in self.tunnel_types:
LOG.error("tunnel_type %s not supported by agent",
tunnel_type)
return
ofport = self.tun_br_ofports[tunnel_type].get(tunnel_ip)
self.cleanup_tunnel_port(self.tun_br, ofport, tunnel_type)
def _tunnel_port_lookup(self, network_type, remote_ip):
return self.tun_br_ofports[network_type].get(remote_ip)
def fdb_add(self, context, fdb_entries):
LOG.debug("fdb_add received")
for lvm, agent_ports in self.get_agent_ports(fdb_entries):
agent_ports.pop(self.local_ip, None)
if len(agent_ports):
if not self.enable_distributed_routing:
with self.tun_br.deferred() as deferred_br:
self.fdb_add_tun(context, deferred_br, lvm,
agent_ports, self._tunnel_port_lookup)
else:
self.fdb_add_tun(context, self.tun_br, lvm,
agent_ports, self._tunnel_port_lookup)
def fdb_remove(self, context, fdb_entries):
LOG.debug("fdb_remove received")
for lvm, agent_ports in self.get_agent_ports(fdb_entries):
agent_ports.pop(self.local_ip, None)
if len(agent_ports):
if not self.enable_distributed_routing:
with self.tun_br.deferred() as deferred_br:
self.fdb_remove_tun(context, deferred_br, lvm,
agent_ports,
self._tunnel_port_lookup)
else:
self.fdb_remove_tun(context, self.tun_br, lvm,
agent_ports, self._tunnel_port_lookup)
def add_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
if port_info == n_const.FLOODING_ENTRY:
lvm.tun_ofports.add(ofport)
br.install_flood_to_tun(lvm.vlan, lvm.segmentation_id,
lvm.tun_ofports)
else:
self.setup_entry_for_arp_reply(br, 'add', lvm.vlan,
port_info.mac_address,
port_info.ip_address)
br.install_unicast_to_tun(lvm.vlan,
lvm.segmentation_id,
ofport,
port_info.mac_address)
def del_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
if port_info == n_const.FLOODING_ENTRY:
if ofport not in lvm.tun_ofports:
LOG.debug("attempt to remove a non-existent port %s", ofport)
return
lvm.tun_ofports.remove(ofport)
if len(lvm.tun_ofports) > 0:
br.install_flood_to_tun(lvm.vlan, lvm.segmentation_id,
lvm.tun_ofports)
else:
# This local vlan doesn't require any more tunneling
br.delete_flood_to_tun(lvm.vlan)
else:
self.setup_entry_for_arp_reply(br, 'remove', lvm.vlan,
port_info.mac_address,
port_info.ip_address)
br.delete_unicast_to_tun(lvm.vlan, port_info.mac_address)
def _fdb_chg_ip(self, context, fdb_entries):
LOG.debug("update chg_ip received")
with self.tun_br.deferred() as deferred_br:
self.fdb_chg_ip_tun(context, deferred_br, fdb_entries,
self.local_ip)
def setup_entry_for_arp_reply(self, br, action, local_vid, mac_address,
ip_address):
'''Set the ARP respond entry.
When the l2 population mechanism driver and OVS supports to edit ARP
fields, a table (ARP_RESPONDER) to resolve ARP locally is added to the
tunnel bridge.
'''
if not self.arp_responder_enabled:
return
ip = netaddr.IPAddress(ip_address)
if ip.version == 6:
return
ip = str(ip)
mac = str(netaddr.EUI(mac_address, dialect=_mac_mydialect))
if action == 'add':
br.install_arp_responder(local_vid, ip, mac)
elif action == 'remove':
br.delete_arp_responder(local_vid, ip)
else:
LOG.warning('Action %s not supported', action)
def _local_vlan_for_flat(self, lvid, physical_network):
phys_br = self.phys_brs[physical_network]
phys_port = self.phys_ofports[physical_network]
int_br = self.int_br
int_port = self.int_ofports[physical_network]
phys_br.provision_local_vlan(port=phys_port, lvid=lvid,
segmentation_id=None,
distributed=False)
int_br.provision_local_vlan(port=int_port, lvid=lvid,
segmentation_id=None)
def _local_vlan_for_vlan(self, lvid, physical_network, segmentation_id):
distributed = self.enable_distributed_routing
phys_br = self.phys_brs[physical_network]
phys_port = self.phys_ofports[physical_network]
int_br = self.int_br
int_port = self.int_ofports[physical_network]
phys_br.provision_local_vlan(port=phys_port, lvid=lvid,
segmentation_id=segmentation_id,
distributed=distributed)
int_br.provision_local_vlan(port=int_port, lvid=lvid,
segmentation_id=segmentation_id)
def provision_local_vlan(self, net_uuid, network_type, physical_network,
segmentation_id):
'''Provisions a local VLAN.
:param net_uuid: the uuid of the network associated with this vlan.
:param network_type: the network type ('gre', 'vxlan', 'vlan', 'flat',
'local', 'geneve')
:param physical_network: the physical network for 'vlan' or 'flat'
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
'''
# On a restart or crash of OVS, the network associated with this VLAN
# will already be assigned, so check for that here before assigning a
# new one.
try:
lvm = self.vlan_manager.get(net_uuid)
lvid = lvm.vlan
except vlanmanager.MappingNotFound:
lvid = self._local_vlan_hints.pop(net_uuid, None)
if lvid is None:
if not self.available_local_vlans:
LOG.error("No local VLAN available for net-id=%s",
net_uuid)
return
lvid = self.available_local_vlans.pop()
self.vlan_manager.add(
net_uuid, lvid, network_type, physical_network,
segmentation_id)
LOG.info("Assigning %(vlan_id)s as local vlan for "
"net-id=%(net_uuid)s",
{'vlan_id': lvid, 'net_uuid': net_uuid})
if network_type in constants.TUNNEL_NETWORK_TYPES:
if self.enable_tunneling:
# outbound broadcast/multicast
ofports = list(self.tun_br_ofports[network_type].values())
if ofports:
self.tun_br.install_flood_to_tun(lvid,
segmentation_id,
ofports)
# inbound from tunnels: set lvid in the right table
# and resubmit to Table LEARN_FROM_TUN for mac learning
if self.enable_distributed_routing:
self.dvr_agent.process_tunneled_network(
network_type, lvid, segmentation_id)
else:
self.tun_br.provision_local_vlan(
network_type=network_type, lvid=lvid,
segmentation_id=segmentation_id)
else:
LOG.error("Cannot provision %(network_type)s network for "
"net-id=%(net_uuid)s - tunneling disabled",
{'network_type': network_type,
'net_uuid': net_uuid})
elif network_type == n_const.TYPE_FLAT:
if physical_network in self.phys_brs:
self._local_vlan_for_flat(lvid, physical_network)
else:
LOG.error("Cannot provision flat network for "
"net-id=%(net_uuid)s - no bridge for "
"physical_network %(physical_network)s",
{'net_uuid': net_uuid,
'physical_network': physical_network})
elif network_type == n_const.TYPE_VLAN:
if physical_network in self.phys_brs:
self._local_vlan_for_vlan(lvid, physical_network,
segmentation_id)
else:
LOG.error("Cannot provision VLAN network for "
"net-id=%(net_uuid)s - no bridge for "
"physical_network %(physical_network)s",
{'net_uuid': net_uuid,
'physical_network': physical_network})
elif network_type == n_const.TYPE_LOCAL:
# no flows needed for local networks
pass
else:
LOG.error("Cannot provision unknown network type "
"%(network_type)s for net-id=%(net_uuid)s",
{'network_type': network_type,
'net_uuid': net_uuid})
def reclaim_local_vlan(self, net_uuid):
'''Reclaim a local VLAN.
:param net_uuid: the network uuid associated with this vlan.
'''
try:
lvm = vlanmanager.LocalVlanManager().pop(net_uuid)
except KeyError:
LOG.debug("Network %s not used on agent.", net_uuid)
return
LOG.info("Reclaiming vlan = %(vlan_id)s from "
"net-id = %(net_uuid)s",
{'vlan_id': lvm.vlan, 'net_uuid': net_uuid})
if lvm.network_type in constants.TUNNEL_NETWORK_TYPES:
if self.enable_tunneling:
self.tun_br.reclaim_local_vlan(
network_type=lvm.network_type,
segmentation_id=lvm.segmentation_id)
self.tun_br.delete_flood_to_tun(lvm.vlan)
self.tun_br.delete_unicast_to_tun(lvm.vlan, None)
self.tun_br.delete_arp_responder(lvm.vlan, None)
if self.l2_pop:
# Try to remove tunnel ports if not used by other networks
for ofport in lvm.tun_ofports:
self.cleanup_tunnel_port(self.tun_br, ofport,
lvm.network_type)
elif lvm.network_type == n_const.TYPE_FLAT:
if lvm.physical_network in self.phys_brs:
# outbound
br = self.phys_brs[lvm.physical_network]
br.reclaim_local_vlan(
port=self.phys_ofports[lvm.physical_network],
lvid=lvm.vlan)
# inbound
br = self.int_br
br.reclaim_local_vlan(
port=self.int_ofports[lvm.physical_network],
segmentation_id=None)
elif lvm.network_type == n_const.TYPE_VLAN:
if lvm.physical_network in self.phys_brs:
# outbound
br = self.phys_brs[lvm.physical_network]
br.reclaim_local_vlan(
port=self.phys_ofports[lvm.physical_network],
lvid=lvm.vlan)
# inbound
br = self.int_br
br.reclaim_local_vlan(
port=self.int_ofports[lvm.physical_network],
segmentation_id=lvm.segmentation_id)
elif lvm.network_type == n_const.TYPE_LOCAL:
# no flows needed for local networks
pass
else:
LOG.error("Cannot reclaim unknown network type "
"%(network_type)s for net-id=%(net_uuid)s",
{'network_type': lvm.network_type,
'net_uuid': net_uuid})
self.available_local_vlans.add(lvm.vlan)
def port_bound(self, port, net_uuid,
network_type, physical_network,
segmentation_id, fixed_ips, device_owner,
provisioning_needed):
'''Bind port to net_uuid/lsw_id and install flow for inbound traffic
to vm.
:param port: an ovs_lib.VifPort object.
:param net_uuid: the net_uuid this port is to be associated with.
:param network_type: the network type ('gre', 'vlan', 'flat', 'local')
:param physical_network: the physical network for 'vlan' or 'flat'
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
:param fixed_ips: the ip addresses assigned to this port
:param device_owner: the string indicative of owner of this port
:param provisioning_needed: indicates if this is called for an OVS
restart or recreated physical bridges
and requires to do local vlan provisioning
'''
if net_uuid not in self.vlan_manager or provisioning_needed:
self.provision_local_vlan(net_uuid, network_type,
physical_network, segmentation_id)
lvm = self.vlan_manager.get(net_uuid)
lvm.vif_ports[port.vif_id] = port
self.dvr_agent.bind_port_to_dvr(port, lvm,
fixed_ips,
device_owner)
port_other_config = self.int_br.db_get_val("Port", port.port_name,
"other_config")
if port_other_config is None:
if port.vif_id in self.deleted_ports:
LOG.debug("Port %s deleted concurrently", port.vif_id)
elif port.vif_id in self.updated_ports:
LOG.error("Expected port %s not found", port.vif_id)
else:
LOG.debug("Unable to get config for port %s", port.vif_id)
return False
vlan_mapping = {'net_uuid': net_uuid,
'network_type': network_type,
'physical_network': str(physical_network)}
if segmentation_id is not None:
vlan_mapping['segmentation_id'] = str(segmentation_id)
port_other_config.update(vlan_mapping)
self.int_br.set_db_attribute("Port", port.port_name, "other_config",
port_other_config)
return True
def _add_port_tag_info(self, need_binding_ports):
port_names = [p['vif_port'].port_name for p in need_binding_ports]
port_info = self.int_br.get_ports_attributes(
"Port", columns=["name", "tag", "other_config"],
ports=port_names, if_exists=True)
info_by_port = {
x['name']: {
'tag': x['tag'],
'other_config': x['other_config'] or {}
}
for x in | |
-1.0: 140,
0.0: 2.0,
90.0: 14.0,
180.0: 10.0,
270.0: 14.0}
def test_fill_window_area_dict(self):
"""test of fill_window_area_dict"""
prj.buildings[-1].fill_window_area_dict()
assert prj.buildings[-1].window_area == {90.0: 1.0,
180.0: 8.0,
270.0: 5.0}
def test_calc_building_parameter(self):
"""test of calc_building_parameter"""
prj.set_default()
helptest.building_test2(prj)
prj.buildings[-1].calc_building_parameter(number_of_elements=2,
merge_windows=True,
used_library='AixLib')
assert round(prj.buildings[-1].volume, 1) == 490.0
assert round(
prj.buildings[-1].sum_heat_load, 4) == 5023.0256
# methods in therm_zone
def test_calc_zone_parameters(self):
"""test of calc zone parameter, no calculation verification"""
prj.buildings[-1].thermal_zones[-1].calc_zone_parameters(
number_of_elements=2, merge_windows=False)
prj.buildings[-1].thermal_zones[-1].calc_zone_parameters(
number_of_elements=2, merge_windows=True)
def test_heat_load(self):
"""test of heating_load"""
prj.set_default()
helptest.building_test2(prj)
prj.buildings[-1].thermal_zones[-1].infiltration_rate = 0.5
prj.buildings[-1].thermal_zones[-1].calc_zone_parameters(
number_of_elements=2,
merge_windows=True)
prj.buildings[-1].thermal_zones[-1].model_attr.calc_attributes()
assert round(
prj.buildings[-1].thermal_zones[-1].model_attr.heat_load,
4) == 6659.6256
def test_sum_building_elements_one(self):
"""test of combine_building_elements"""
prj.set_default()
helptest.building_test2(prj)
from teaser.logic.buildingobjects.calculation.one_element import\
OneElement
therm_zone = prj.buildings[-1].thermal_zones[-1]
calc_attr = OneElement(therm_zone, merge_windows=False, t_bt=5)
helplist = therm_zone.outer_walls + therm_zone.rooftops +\
therm_zone.ground_floors + therm_zone.inner_walls +\
therm_zone.ceilings + therm_zone.floors + therm_zone.windows
for element in helplist:
element.calc_equivalent_res()
element.calc_ua_value()
calc_attr._sum_outer_wall_elements()
calc_attr._sum_window_elements()
# outerwall
assert round(calc_attr.ua_value_ow, 16) == 135.5818558809656
assert round(calc_attr.area_ow, 1) == 328.0
assert round(calc_attr.r_conv_inner_ow, 19) == 0.0016512549537648611
assert round(calc_attr.r_rad_inner_ow, 18) == 0.000609756097560976
assert round(calc_attr.r_comb_inner_ow, 20) == 0.00044531528322052017
assert round(calc_attr.r_conv_outer_ow, 20) == 0.00026595744680851064
assert round(calc_attr.r_rad_outer_ow, 18) == 0.001063829787234043
assert round(calc_attr.r_comb_outer_ow, 20) == 0.0002127659574468085
assert round(calc_attr.alpha_conv_inner_ow, 5) == 1.84634
assert round(calc_attr.alpha_rad_inner_ow, 5) == 5.0
assert round(calc_attr.alpha_comb_inner_ow, 5) == 6.84634
assert round(calc_attr.alpha_conv_outer_ow, 1) == 20.0
assert round(calc_attr.alpha_rad_outer_ow, 5) == 5.0
assert round(calc_attr.alpha_comb_outer_ow, 1) == 25.0
# window
assert round(calc_attr.ua_value_win, 16) == 32.87895310796074
assert round(calc_attr.area_win, 1) == 18.0
assert round(calc_attr.r_conv_inner_win, 19) == 0.032679738562091505
assert round(calc_attr.r_rad_inner_win, 4) == 0.0111
assert round(calc_attr.r_comb_inner_win, 19) == 0.008291873963515755
assert round(calc_attr.r_conv_outer_win, 5) == 0.00278
assert round(calc_attr.r_rad_outer_win, 4) == 0.0111
assert round(calc_attr.r_comb_outer_win, 4) == 0.0022
assert round(calc_attr.alpha_conv_inner_win, 1) == 1.7
assert round(calc_attr.alpha_comb_outer_win, 1) == 25.0
assert round(calc_attr.alpha_conv_outer_win, 1) == 20.0
assert round(calc_attr.weighted_g_value, 3) == 0.789
def test_calc_chain_matrix_one(self):
"""test of calc_chain_matrix"""
from teaser.logic.buildingobjects.calculation.one_element import \
OneElement
therm_zone = prj.buildings[-1].thermal_zones[-1]
calc_attr = OneElement(therm_zone, merge_windows=False, t_bt=5)
helplist = therm_zone.outer_walls + therm_zone.rooftops + \
therm_zone.ground_floors + therm_zone.inner_walls + \
therm_zone.ceilings + therm_zone.floors + therm_zone.windows
for element in helplist:
element.calc_equivalent_res()
element.calc_ua_value()
omega = (2 * math.pi / 86400 / 5)
helplist_outer_walls = therm_zone.outer_walls + therm_zone.rooftops +\
therm_zone.ground_floors + therm_zone.windows
r1_ow, c1_ow = calc_attr._calc_parallel_connection(
element_list=helplist_outer_walls,
omega=omega)
assert round(r1_ow, 14) == 0.00100751548411
assert round(c1_ow, 5) == 3648580.59312
def test_sum_building_elements_two(self):
"""test of combine_building_elements"""
prj.set_default()
helptest.building_test2(prj)
from teaser.logic.buildingobjects.calculation.two_element import\
TwoElement
therm_zone = prj.buildings[-1].thermal_zones[-1]
calc_attr = TwoElement(therm_zone, merge_windows=False, t_bt=5)
helplist = therm_zone.outer_walls + therm_zone.rooftops +\
therm_zone.ground_floors + therm_zone.inner_walls +\
therm_zone.ceilings + therm_zone.floors + therm_zone.windows
for element in helplist:
element.calc_equivalent_res()
element.calc_ua_value()
calc_attr._sum_outer_wall_elements()
calc_attr._sum_inner_wall_elements()
calc_attr._sum_window_elements()
# innerwall
assert round(calc_attr.ua_value_iw, 16) == 14.286493860845841
assert round(calc_attr.area_iw, 1) == 34.0
assert round(calc_attr.r_conv_inner_iw, 18) == 0.010893246187363833
assert round(calc_attr.r_rad_inner_iw, 19) == 0.0058823529411764705
assert round(calc_attr.r_comb_inner_iw, 19) == 0.003819709702062643
assert round(calc_attr.alpha_conv_inner_iw, 1) == 2.7
assert round(calc_attr.alpha_rad_inner_iw, 1) == 5.0
assert round(calc_attr.alpha_comb_inner_iw, 1) == 7.7
# outerwall
assert round(calc_attr.ua_value_ow, 16) == 135.5818558809656
assert round(calc_attr.area_ow, 1) == 328.0
assert round(calc_attr.r_conv_inner_ow, 19) == 0.0016512549537648611
assert round(calc_attr.r_rad_inner_ow, 18) == 0.000609756097560976
assert round(calc_attr.r_comb_inner_ow, 20) == 0.00044531528322052017
assert round(calc_attr.r_conv_outer_ow, 20) == 0.00026595744680851064
assert round(calc_attr.r_rad_outer_ow, 18) == 0.001063829787234043
assert round(calc_attr.r_comb_outer_ow, 20) == 0.0002127659574468085
assert round(calc_attr.alpha_conv_inner_ow, 5) == 1.84634
assert round(calc_attr.alpha_rad_inner_ow, 5) == 5.0
assert round(calc_attr.alpha_comb_inner_ow, 5) == 6.84634
assert round(calc_attr.alpha_conv_outer_ow, 1) == 20.0
assert round(calc_attr.alpha_rad_outer_ow, 5) == 5.0
assert round(calc_attr.alpha_comb_outer_ow, 1) == 25.0
# window
assert round(calc_attr.ua_value_win, 16) == 32.87895310796074
assert round(calc_attr.area_win, 1) == 18.0
assert round(calc_attr.r_conv_inner_win, 19) == 0.032679738562091505
assert round(calc_attr.r_rad_inner_win, 4) == 0.0111
assert round(calc_attr.r_comb_inner_win, 19) == 0.008291873963515755
assert round(calc_attr.r_conv_outer_win, 5) == 0.00278
assert round(calc_attr.r_rad_outer_win, 4) == 0.0111
assert round(calc_attr.r_comb_outer_win, 4) == 0.0022
assert round(calc_attr.alpha_conv_inner_win, 1) == 1.7
assert round(calc_attr.alpha_comb_outer_win, 1) == 25.0
assert round(calc_attr.alpha_conv_outer_win, 1) == 20.0
assert round(calc_attr.weighted_g_value, 3) == 0.789
def test_calc_chain_matrix_two(self):
"""test of calc_chain_matrix"""
from teaser.logic.buildingobjects.calculation.two_element import \
TwoElement
therm_zone = prj.buildings[-1].thermal_zones[-1]
calc_attr = TwoElement(therm_zone, merge_windows=False, t_bt=5)
helplist = therm_zone.outer_walls + therm_zone.rooftops + \
therm_zone.ground_floors + therm_zone.inner_walls + \
therm_zone.ceilings + therm_zone.floors + therm_zone.windows
for element in helplist:
element.calc_equivalent_res()
element.calc_ua_value()
omega = (2 * math.pi / 86400 / 5)
calc_attr = TwoElement(therm_zone, merge_windows=True, t_bt=5)
helplist_outer_walls = therm_zone.outer_walls + therm_zone.rooftops +\
therm_zone.ground_floors + therm_zone.windows
r1_ow, c1_ow = calc_attr._calc_parallel_connection(
element_list=helplist_outer_walls,
omega=omega)
assert round(r1_ow, 14) == 0.00100751548411
assert round(c1_ow, 5) == 3648580.59312
helplist_inner_walls = therm_zone.inner_walls +\
therm_zone.ceilings + therm_zone.floors
r1_iw, c1_iw = calc_attr._calc_parallel_connection(
element_list=helplist_inner_walls,
omega=omega)
assert round(r1_iw, 13) == 0.0097195611408
assert round(c1_iw, 6) == 319983.518743
def test_sum_building_elements_three(self):
"""test of combine_building_elements"""
prj.set_default()
helptest.building_test2(prj)
from teaser.logic.buildingobjects.calculation.three_element import\
ThreeElement
therm_zone = prj.buildings[-1].thermal_zones[-1]
calc_attr = ThreeElement(therm_zone, merge_windows=False, t_bt=5)
helplist = therm_zone.outer_walls + therm_zone.rooftops +\
therm_zone.ground_floors + therm_zone.inner_walls +\
therm_zone.ceilings + therm_zone.floors + therm_zone.windows
for element in helplist:
element.calc_equivalent_res()
element.calc_ua_value()
calc_attr._sum_outer_wall_elements()
calc_attr._sum_ground_floor_elements()
calc_attr._sum_inner_wall_elements()
calc_attr._sum_window_elements()
# innerwall
assert round(calc_attr.ua_value_iw, 16) == 14.286493860845841
assert round(calc_attr.area_iw, 1) == 34.0
assert round(calc_attr.r_conv_inner_iw, 18) == 0.010893246187363833
assert round(calc_attr.r_rad_inner_iw, 19) == 0.0058823529411764705
assert round(calc_attr.r_comb_inner_iw, 19) == 0.003819709702062643
assert round(calc_attr.alpha_conv_inner_iw, 1) == 2.7
assert round(calc_attr.alpha_rad_inner_iw, 1) == 5.0
assert round(calc_attr.alpha_comb_inner_iw, 1) == 7.7
# outerwall
assert round(calc_attr.ua_value_ow, 16) == 77.23037843150993
assert round(calc_attr.area_ow, 1) == 188.0
assert round(calc_attr.r_conv_inner_ow, 19) == 0.0027203482045701846
assert round(calc_attr.r_rad_inner_ow, 18) == 0.001063829787234043
assert round(calc_attr.r_comb_inner_ow, 20) == 0.0007647598654022638
assert round(calc_attr.r_conv_outer_ow, 20) == 0.00026595744680851064
assert round(calc_attr.r_rad_outer_ow, 18) == 0.001063829787234043
assert round(calc_attr.r_comb_outer_ow, 20) == 0.0002127659574468085
assert round(calc_attr.alpha_conv_inner_ow, 5) == 1.95532
assert round(calc_attr.alpha_rad_inner_ow, 5) == 5.0
assert round(calc_attr.alpha_comb_inner_ow, 5) == 6.95532
assert round(calc_attr.alpha_conv_outer_ow, 1) == 20.0
assert round(calc_attr.alpha_rad_outer_ow, 5) == 5.0
assert round(calc_attr.alpha_comb_outer_ow, 1) == 25.0
# groundfloor
assert round(calc_attr.ua_value_gf, 16) == 58.351477449455686
assert round(calc_attr.area_gf, 1) == 140.0
assert round(calc_attr.r_conv_inner_gf, 19) == 0.004201680672268907
assert round(calc_attr.r_rad_inner_gf, 18) == 0.001428571428571429
assert round(calc_attr.r_comb_inner_gf, 20) == 0.0010660980810234541
assert round(calc_attr.alpha_conv_inner_gf, 5) == 1.7
assert round(calc_attr.alpha_rad_inner_gf, 5) == 5.0
assert round(calc_attr.alpha_comb_inner_gf, 5) == 6.7
# window
assert round(calc_attr.ua_value_win, 16) == 32.87895310796074
assert round(calc_attr.area_win, 1) == 18.0
assert round(calc_attr.r_conv_inner_win, 19) == 0.032679738562091505
assert round(calc_attr.r_rad_inner_win, 4) == 0.0111
assert round(calc_attr.r_comb_inner_win, 19) == 0.008291873963515755
assert round(calc_attr.r_conv_outer_win, 5) == 0.00278
assert round(calc_attr.r_rad_outer_win, 4) == 0.0111
assert round(calc_attr.r_comb_outer_win, 4) == 0.0022
assert round(calc_attr.alpha_conv_inner_win, 1) == 1.7
assert round(calc_attr.alpha_comb_outer_win, 1) == 25.0
assert round(calc_attr.alpha_conv_outer_win, 1) == 20.0
assert round(calc_attr.weighted_g_value, 3) == 0.789
def test_calc_chain_matrix_three(self):
"""test of calc_chain_matrix"""
from teaser.logic.buildingobjects.calculation.three_element import \
ThreeElement
therm_zone = prj.buildings[-1].thermal_zones[-1]
calc_attr = ThreeElement(therm_zone, merge_windows=False, t_bt=5)
helplist = therm_zone.outer_walls + therm_zone.rooftops + \
therm_zone.ground_floors + therm_zone.inner_walls + \
therm_zone.ceilings + therm_zone.floors + therm_zone.windows
for element in helplist:
element.calc_equivalent_res()
element.calc_ua_value()
omega = (2 * math.pi / 86400 / 5)
helplist_outer_walls = therm_zone.outer_walls + therm_zone.rooftops +\
therm_zone.windows
r1_ow, c1_ow = calc_attr._calc_parallel_connection(
element_list=helplist_outer_walls,
omega=omega)
assert round(r1_ow, 14) == 0.00175779297228
assert round(c1_ow, 5) == 2091259.60825
helplist_inner_walls = therm_zone.inner_walls +\
therm_zone.ceilings + therm_zone.floors
r1_iw, c1_iw = calc_attr._calc_parallel_connection(
element_list=helplist_inner_walls,
omega=omega)
assert round(r1_iw, 13) == 0.0097195611408
assert round(c1_iw, 6) == 319983.518743
def test_sum_building_elements_four(self):
"""test of combine_building_elements"""
prj.set_default()
helptest.building_test2(prj)
from teaser.logic.buildingobjects.calculation.four_element import\
FourElement
therm_zone = prj.buildings[-1].thermal_zones[-1]
calc_attr = FourElement(therm_zone, merge_windows=True, t_bt=5)
helplist = therm_zone.outer_walls + therm_zone.rooftops +\
therm_zone.ground_floors + therm_zone.inner_walls +\
therm_zone.ceilings + therm_zone.floors + therm_zone.windows
for element in helplist:
element.calc_equivalent_res()
element.calc_ua_value()
calc_attr._sum_outer_wall_elements()
calc_attr._sum_ground_floor_elements()
calc_attr._sum_rooftop_elements()
calc_attr._sum_inner_wall_elements()
calc_attr._sum_window_elements()
# innerwall
assert round(calc_attr.ua_value_iw, 16) == 14.286493860845841
assert round(calc_attr.area_iw, 1) == 34.0
assert round(calc_attr.r_conv_inner_iw, 18) == 0.010893246187363833
assert round(calc_attr.r_rad_inner_iw, 19) == 0.0058823529411764705
assert round(calc_attr.r_comb_inner_iw, 19) == 0.003819709702062643
assert round(calc_attr.alpha_conv_inner_iw, 1) == 2.7
assert round(calc_attr.alpha_rad_inner_iw, 1) == 5.0
assert round(calc_attr.alpha_comb_inner_iw, 1) == 7.7
# outerwall
assert round(calc_attr.ua_value_ow, 16) == 19.83577523748189
assert round(calc_attr.area_ow, 1) == 48.0
assert round(calc_attr.r_conv_inner_ow, 19) == 0.007716049382716048
assert round(calc_attr.r_rad_inner_ow, 18) == 0.004166666666666667
assert round(calc_attr.r_comb_inner_ow, 20) == 0.0027056277056277055
assert round(calc_attr.r_conv_outer_ow, 20) == 0.0010416666666666667
assert round(calc_attr.r_rad_outer_ow, 18) == 0.004166666666666667
assert round(calc_attr.r_comb_outer_ow, 20) == 0.0008333333333333334
assert round(calc_attr.alpha_conv_inner_ow, 5) == 2.7
assert round(calc_attr.alpha_rad_inner_ow, 5) == 5.0
assert round(calc_attr.alpha_comb_inner_ow, 5) == 7.7
assert round(calc_attr.alpha_conv_outer_ow, 1) == 20.0
assert round(calc_attr.alpha_rad_outer_ow, 5) == 5.0
assert round(calc_attr.alpha_comb_outer_ow, 1) == 25.0
# groundfloor
assert round(calc_attr.ua_value_gf, 16) == 58.351477449455686
assert round(calc_attr.area_gf, 1) == 140.0
assert round(calc_attr.r_conv_inner_gf, 19) == 0.004201680672268907
assert round(calc_attr.r_rad_inner_gf, 18) == 0.001428571428571429
assert round(calc_attr.r_comb_inner_gf, 20) == 0.0010660980810234541
assert round(calc_attr.alpha_conv_inner_gf, 5) == 1.7
assert round(calc_attr.alpha_rad_inner_gf, 5) == 5.0
assert round(calc_attr.alpha_comb_inner_gf, 5) == 6.7
# outerwall
assert round(calc_attr.ua_value_rt, 16) == 57.394603194028036
assert round(calc_attr.area_rt, 1) == 140.0
assert round(calc_attr.r_conv_inner_rt, 19) == 0.004201680672268907
assert round(calc_attr.r_rad_inner_rt, 18) == 0.001428571428571429
assert round(calc_attr.r_comb_inner_rt, 20) == 0.0010660980810234541
assert round(calc_attr.r_conv_outer_rt, 20) == 0.00035714285714285714
assert round(calc_attr.r_rad_outer_rt, 18) == 0.001428571428571429
assert round(calc_attr.r_comb_outer_rt, 20) == 0.00028571428571428574
assert round(calc_attr.alpha_conv_inner_rt, 5) == 1.7
assert round(calc_attr.alpha_rad_inner_rt, 5) == 5.0
assert round(calc_attr.alpha_comb_inner_rt, 5) == 6.7
assert round(calc_attr.alpha_conv_outer_rt, 1) == 20.0
| |
!HawCyDkL.:,.nlD!jQFrq:-G'UBTs/,"
"LPNf'?nlD,;G'UBTs,HawCyDkL;-.??LPNf';HawCyDkL,?:?;G'UBTs___ "
"jQFrq._!:;HawCyDkL!:_jnjG!:G'UBTs:/;.-tLJKKVH!_-_nlD.LPNf' ,"
"::tdFdQVKM?jQFrq_!;jnjG,:!:-G'UBTs,tdFdQVKM_! "
"HawCyDkL-;tdFdQVKM;?jQFrq,G'UBTs;rPfpL/.?-jnjG,:,"
"-:tLJKKVH.;.nlD_jQFrq_!/;tdFdQVKM;//!tdFdQVKM!.zuPwI_!,,"
"G'UBTs_: "
"_nlD/.?TlVstiy/_:.,tLJKKVH:tdFdQVKM._.tLJKKVH rPfpL : "
"jQFrq!:jnjG/ ,jQFrq/_/ HawCyDkL.HawCyDkL?:-tdFdQVKM_; "
"G'UBTs!-lSnCvsO-./_tdFdQVKM,:tdFdQVKM!.;-,TlVstiy.,"
"tLJKKVH!?nlD_nlD /;lSnCvsO!.;tdFdQVKM?,!,"
"lSnCvsO;TlVstiy?:G'UBTs "
" .jQFrq;tdFdQVKM.HawCyDkL: .nlD_/tdFdQVKM/_?tLJKKVH/ "
".:G'UBTs.?,::lSnCvsO.TlVstiy,"
"!:!nlD;;::tdFdQVKM!.rPfpL;;_!TlVstiy /:tdFdQVKM. "
"-?.jnjG-HawCyDkL-?LPNf'.!-: HawCyDkL?/,"
"_TlVstiy_TlVstiy-.!.tdFdQVKM;.,"
".?TlVstiy.!:?!tdFdQVKM?HawCyDkL:. "
"!!jnjG/?/HawCyDkL-__:G'UBTs,.? tLJKKVH!- _,HawCyDkL.: _jnjG "
";LPNf'?HawCyDkL??;-G'UBTs___!HawCyDkL/TlVstiy - "
"tdFdQVKM;jQFrq,"
"?jnjG.!tLJKKVH/!!_,"
"G'UBTs?__lSnCvsO??:;jnjG;tdFdQVKM._/;jnjG!TlVstiy!G'UBTs"
"/-:lSnCvsO;G'UBTs?-_nlD??:?/tLJKKVH:rPfpL:!;:!nlD;?/,;G'UBTs,"
"-?;HawCyDkL;? ?tLJKKVH,..;tdFdQVKM/ , _lSnCvsO./, "
"rPfpL:jnjG?.?.TlVstiy!;?,?tLJKKVH:,"
"LPNf'/:.?!rPfpL_nlD?_:/-jnjG,"
"rPfpL:: ;;G'UBTs/,tLJKKVH !G'UBTs:..LPNf'.,;-/tLJKKVH;_: "
"jnjG:?_.:jQFrq- ,G'UBTs-..,-jQFrq;/_?!TlVstiy:!_HawCyDkL "
".;tLJKKVH-?-,;"),
["g'ubts", 'tdfdqvkm', 'hawcydkl'])
def test_top_3_words_038(self):
self.assertEqual(top_3_words(
"DAD /-,.bYRzfRW_:ZNLDl'SCu_DAD,;!_.UBmhzEMFQl://?,"
"ZNLDl'SCu!_:?.bYRzfRW/ ZNLDl'SCu;- ;.ZNLDl'SCu;/ZNLDl'SCu,"
"bYRzfRW_?__DAD,?.;ZNLDl'SCu?DAD:/:!ZNLDl'SCu;bYRzfRW,"
";!DXSYmSufvz_Xbfn.;-/.ZNLDl'SCu-,"
"?ZNLDl'SCu.:!;Xbfn;?DXSYmSufvz. "
".,bYRzfRW/ ,; DAD,bYRzfRW:: /YsqUiZV-?DAD.bYRzfRW_,"
"_ DAD-;ZNLDl'SCu..;._igVOM;DXSYmSufvz.?:DAD,ZNLDl'SCu/,"
".UBmhzEMFQl.DXSYmSufvz/;.?bYRzfRW-,_;.Xbfn/;;_bYRzfRW/,"
"_!ZNLDl'SCu/_./-DAD?!!Xbfn/_DAD/ZNLDl'SCu,:ZNLDl'SCu-._/ "
"DAD--.ZNLDl'SCu! .ZNLDl'SCu?,-DAD !:?DAD _DAD:, "
"_UBmhzEMFQl-DXSYmSufvz__/_-ZNLDl'SCu_-.-!bYRzfRW/DXSYmSufvz"
"!!UBmhzEMFQl /Xbfn-DAD.UBmhzEMFQl._UBmhzEMFQl.-igVOM-,"
"DXSYmSufvz.:::-DAD:;,bYRzfRW,;_?UBmhzEMFQl?./ DXSYmSufvz-,,"
"DXSYmSufvz/?: DXSYmSufvz-:.bYRzfRW .DAD,"
"_Xbfn!/Xbfn-/_:-bYRzfRW.;Xbfn,:igVOM_?Xbfn:DAD .;,"
"DAD;:ZNLDl'SCu/-Xbfn-?.bYRzfRW-:_?DXSYmSufvz / Xbfn- : "
";ZNLDl'SCu.:;.,bYRzfRW. "
"_-ZNLDl'SCu:_/Xbfn?;.;?Xbfn/-DXSYmSufvz?/_ZNLDl'SCu_,,"
"!ZNLDl'SCu/ .igVOM:; ;,DAD;-ZNLDl'SCu_: "
"/DXSYmSufvz/!/-/bYRzfRW- "
"_,DXSYmSufvz?UBmhzEMFQl;_?:bYRzfRW--!DXSYmSufvz.UBmhzEMFQl "
"!/_ZNLDl'SCu-ZNLDl'SCu!-!,ZNLDl'SCu-./:.igVOM_UBmhzEMFQl, "
"/UBmhzEMFQl,-:?Xbfn/,--igVOM-_/,"
"ZNLDl'SCu;?DXSYmSufvz!bYRzfRW,,,"
"-:ZNLDl'SCu?_.igVOM,/,!ZNLDl'SCu_?bYRzfRW/ UBmhzEMFQl!DAD,"
"?_/ZNLDl'SCu!"),
["znldl'scu", 'dad', 'byrzfrw'])
def test_top_3_words_039(self):
self.assertEqual(top_3_words(
"lAjRcv ._?kblOVnek'..snPvBpirVm_-,"
"Vgu!snPvBpirVm!aThhNjAKEv?!/VDbDwJhz-mHx /puX./.puX "
"TxGJcCKtu?__ "
"mHx,?:LYhPncUbH_LYhPncUbH:!:_dpJCctE.;GTYOXavD _;PLCEnVsz:,"
"wPq -/_jyTxnpqRHK;/Vgu/?-.kblOVnek'!wPq,! lAjRcv !? "
"aThhNjAKEv,"
"-aThhNjAKEv,?/?lAjRcv_wPq//:Vgu Vgu,'LjFAl "
"!lAjRcv/Vgu_?LYhPncUbH::-!,wPq,: ?!TxGJcCKtu;;wPq?Vgu_,"
".aThhNjAKEv,snPvBpirVm-!Vgu!_..,puX-:lAjRcv!:;/;snPvBpirVm "
".:TxGJcCKtu:;:-puX,!'LjFAl..- aThhNjAKEv/kblOVnek',?lAjRcv; "
"_?LYhPncUbH,/wPq,-,'LjFAl; /_'LjFAl.-!,"
"cNQOQUAaNb/.aThhNjAKEv??_aThhNjAKEv?-!puX!.wPq. "
"!?-aThhNjAKEv_/_lAjRcv-;::TxGJcCKtu,._.,"
"TxGJcCKtu_XPAfaEyQqt: ,"
"dpJCctE;;wPq - -lAjRcv;aThhNjAKEv;wPq_,LYhPncUbH//_,"
":wPq/.LYhPncUbH?-LYhPncUbH,"
";LYhPncUbH;.?::wPq:rTeJFn.lAjRcv--puX/!:;dpJCctE! "
"-aThhNjAKEv;/!Vgu,"
"TxGJcCKtu:?mHx:snPvBpirVm!aThhNjAKEv:?LYhPncUbH,"
":;?:cNQOQUAaNb_.?;-'LjFAl!.;;XPAfaEyQqt/XPAfaEyQqt "
"aThhNjAKEv:Vgu? -dpJCctE/??-snPvBpirVm?!snPvBpirVm:lAjRcv?:/ "
"lAjRcv:-'LjFAl-/wPq?,;,.'LjFAl wPq.LYhPncUbH.?wPq,"
"?!LYhPncUbH,"
"!. ;dpJCctE.TxGJcCKtu?;puX-?TxGJcCKtu_:: "
"?snPvBpirVm/mHx!-.puX!/,!puX_,;kblOVnek': "
"-puX:/!cNQOQUAaNb!dpJCctE;-,rTeJFn_/:?cNQOQUAaNb.: "
"kblOVnek'!:,"
".snPvBpirVm. ?-:cNQOQUAaNb!:'LjFAl/'LjFAl;!.puX/wPq: ; "
";kblOVnek'!/! /mHx:/-rTeJFn-: ?dpJCctE:!snPvBpirVm "
".-snPvBpirVm,"
";_?:aThhNjAKEv//.LYhPncUbH/;:/cNQOQUAaNb!'LjFAl_;!lAjRcv// ;,"
"PLCEnVsz ,-?aThhNjAKEv/:wPq! cNQOQUAaNb ,!/mHx-?;lAjRcv,"
"/!jyTxnpqRHK- mHx:.,!-Vgu-!..mHx,"
"snPvBpirVm;?!/_puX?!LYhPncUbH,"
":_ snPvBpirVm:;_Vgu_'LjFAl,. puX.:;mHx!wPq ,"
":/Vgu:!_snPvBpirVm/;puX_-kblOVnek'?!. 'LjFAl-Vgu!/;TxGJcCKtu,"
"lAjRcv kblOVnek'; "
"XPAfaEyQqt;:/puX;lAjRcv?:snPvBpirVm_.snPvBpirVm-?snPvBpirVm"
"/:!;-XPAfaEyQqt!,kblOVnek'?-?:_Vgu.GTYOXavD!!Vgu.-,"
".!mHx-;:LYhPncUbH :,/!mHx?:-Vgu__PLCEnVsz./!_ lAjRcv/ "
"XPAfaEyQqt,___puX_...;kblOVnek'/?,,XPAfaEyQqt_,,"
"-dpJCctE?kblOVnek'?kblOVnek',- wPq-/snPvBpirVm._!-wPq- "
";?aThhNjAKEv_lAjRcv_, :,LYhPncUbH- ;puX,"
".jyTxnpqRHK-snPvBpirVm/?:,!GTYOXavD;dpJCctE -:?GTYOXavD:?;!,"
"aThhNjAKEv./:LYhPncUbH.aThhNjAKEv/?,?mHx?-kblOVnek' ,./puX "
"-jyTxnpqRHK ,,?snPvBpirVm?,"
"-Vgu.cNQOQUAaNb--.?wPq!_mHx/_;/puX-;./puX;/ ,"
"kblOVnek'!/LYhPncUbH:-TxGJcCKtu_/,"
":;lAjRcv_-LYhPncUbH?!.-GTYOXavD !-puX__/wPq/,"
":_snPvBpirVm.-.;wPq "
"//?-kblOVnek'?:aThhNjAKEv;-!,'LjFAl_ -_?mHx :, "
"kblOVnek';?aThhNjAKEv, "
"puX?Vgu?._.puX??_XPAfaEyQqt_:kblOVnek'!??cNQOQUAaNb;'LjFAl"
"!/;mHx_-:puX- ,puX__kblOVnek'-- !Vgu;?_?_wPq ,? wPq;_,"
"kblOVnek'_wPq:? TxGJcCKtu.puX;/aThhNjAKEv :;,lAjRcv :. "
"/cNQOQUAaNb_puX!,snPvBpirVm?/ /'LjFAl;!,"
"puX:;:/_cNQOQUAaNb?.-aThhNjAKEv!,,Vgu_-_-,jyTxnpqRHK.?!,"
"-puX//dpJCctE:? / Vgu/,-/snPvBpirVm;:?,lAjRcv,; !.jyTxnpqRHK "
"cNQOQUAaNb,;dpJCctE_:snPvBpirVm?.-;?dpJCctE "
":wPq_;aThhNjAKEv!-/!_LYhPncUbH_aThhNjAKEv-/:;-kblOVnek"
"'--LYhPncUbH__ !;wPq,_!,VDbDwJhz,.?aThhNjAKEv.!:,rTeJFn/.-,"
"snPvBpirVm- GTYOXavD!!kblOVnek' ?/_,snPvBpirVm "),
['pux', 'wpq', 'snpvbpirvm'])
def test_top_3_words_040(self):
self.assertEqual(top_3_words(
"pPirXoK ?_XFUUShLbB,, .?pPirXoK_dXvy:;.dXvy/vyerXYKl!: "
"!TfIlhlF-:_! PyltWTwsl !OZxX//?:TfIlhlF!-_,"
";pPirXoK!:/dXvy?.TfIlhlF .,vyerXYKl PyltWTwsl-_ .!pPirXoK-? "
"XFUUShLbB.?,/.vyerXYKl_;_PyltWTwsl PyltWTwsl "
"dXvy?/!OZxX-?vyerXYKl -,/?XFUUShLbB.-!/pPirXoK/:-_hnBMh'hWe,"
";;/,"
"hnBMh'hWe ?lcZ,!.?XFUUShLbB:TfIlhlF- . TfIlhlF "
".pPirXoK-hnBMh'hWe--; .vyerXYKl;:TfIlhlF.;_-vyerXYKl?..;,"
"XFUUShLbB-!,--XFUUShLbB?XFUUShLbB _!/:XFUUShLbB?XFUUShLbB,._,,"
"vyerXYKl!.;pPirXoK?pPirXoK-? !lcZ?_.:_vyerXYKl//?!OZxX "
"pPirXoK./? OZxX/. TfIlhlF,!,"
"XFUUShLbB__:pPirXoK??;/vyerXYKl-?-;!vyerXYKl:..hnBMh'hWe"
"!/-!hnBMh'hWe///:hnBMh'hWe_:?!;pPirXoK/_.-;vyerXYKl, "
"vyerXYKl:;-PyltWTwsl!!:hnBMh'hWe,"
"?pPirXoK--!.vyerXYKl:?!pPirXoK!PyltWTwsl-?!:.TfIlhlF/dXvy"
"!??-/hnBMh'hWe?!.PyltWTwsl -:dXvy/XFUUShLbB:dXvy,"
"_/_XFUUShLbB: "
"PyltWTwsl!!.PyltWTwsl:,?/dXvy:PyltWTwsl?vyerXYKl !-/TfIlhlF,"
"vyerXYKl!:hnBMh'hWe_!:,,XFUUShLbB.,dXvy,dXvy,_,! PyltWTwsl,"
";-hnBMh'hWe:-!:-hnBMh'hWe;!_dXvy/PyltWTwsl?,"
"lcZ/:.dXvy_OZxX?--PyltWTwsl;.:OZxX?!..;XFUUShLbB?OZxX,?,"
"vyerXYKl "
".. vyerXYKl/-:-pPirXoK.-_cLflisYD -TfIlhlF__dXvy!,"
"PyltWTwsl?:_ "
"PyltWTwsl. PyltWTwsl. .PyltWTwsl,;vyerXYKl?! "
":.XFUUShLbB?RIAJou.XFUUShLbB!/_,PyltWTwsl ?!;_XFUUShLbB_,"
"._?vyerXYKl,:__vyerXYKl-.dXvy:,vyerXYKl: ?;/PyltWTwsl;, "
"_/pPirXoK;?!/:XFUUShLbB,/_.!vyerXYKl,,;? vyerXYKl/.XFUUShLbB/ "
"PyltWTwsl,_-XFUUShLbB!XFUUShLbB-XFUUShLbB.;?OZxX-PyltWTwsl"
"?://,"
"XFUUShLbB.XFUUShLbB:-_?!TfIlhlF.-/XFUUShLbB ;vyerXYKl,"
"XFUUShLbB "
"-OZxX,?"),
['xfuushlbb', 'vyerxykl', 'pyltwtwsl'])
def test_top_3_words_041(self):
self.assertEqual(top_3_words(
"TYb!..,_TYb-TYb_!__TYb_/ TYb : TYb:!/.TYb,;!TYb,.TYb?TYb.; "
"!/TYb;/!TYb.?_TYb !-!TYb_ TYb! ,;TYb TYb_TYb,!TYb..TYb :!? "
"TYb: "
"_/TYb!!TYb,TYb;_/TYb: ,?TYb_-;_/"),
['tyb'])
def test_top_3_words_042(self):
self.assertEqual(top_3_words(
"RZa_;,KrWQU:-KrWQU?!,--liAZXoFgu_:. -liAZXoFgu/- "
";;RZa.:liAZXoFgu! BNLaeEi .-.KrWQU-BNLaeEi!//BNLaeEi,"
":!;.liAZXoFgu_KrWQU?RZa;:liAZXoFgu; ,"
".?RZa!!?liAZXoFgu::;liAZXoFgu. liAZXoFgu,:-RZa__?RZa/ ?; "
"BNLaeEi/,;. liAZXoFgu; liAZXoFgu liAZXoFgu ? "
";liAZXoFgu/!-.liAZXoFgu_-.-RZa-?.;;RZa/ ;?RZa "
"liAZXoFgu-liAZXoFgu?-:/RZa:..KrWQU!;_;RZa:!-;!RZa/! -_RZa, "
":RZa-.,!_SOj -liAZXoFgu:liAZXoFgu:/_:!KrWQU ;;,"
"liAZXoFgu:liAZXoFgu:liAZXoFgu/:/,RZa! liAZXoFgu!-BNLaeEi;.,"
"SOj./liAZXoFgu:liAZXoFgu:liAZXoFgu,/liAZXoFgu/liAZXoFgu:,RZa,"
"liAZXoFgu?-._KrWQU;/RZa/ RZa?;.??RZa;-,!RZa ,"
"KrWQU./liAZXoFgu_RZa;-::KrWQU-RZa__._RZa/ KrWQU-.liAZXoFgu.? "
"RZa..liAZXoFgu!?,RZa. :."),
['liazxofgu', 'rza', 'krwqu'])
def test_top_3_words_043(self):
self.assertEqual(top_3_words(
"r'HScYF-,!FmVN ,.,.r'HScYF.r'HScYF/._? FmVN, "
"/!;r'HScYF;!?UQvWIpIkbb.--?-UQvWIpIkbb//.LwlxPU:! "
"!.UQvWIpIkbb "
".r'HScYF;FmVN ?,-FmVN./:UQvWIpIkbb./. r'HScYF,?UQvWIpIkbb_,"
"FmVN-UQvWIpIkbb_ / .FmVN__:?FmVN,;r'HScYF!?UQvWIpIkbb/;.FmVN,"
"r'HScYF:UQvWIpIkbb!!/FmVN?,.;/r'HScYF--FmVN.::?/UQvWIpIkbb,"
"r'HScYF?!!:FmVN?!FmVN/.,,/LwlxPU;_LwlxPU/FmVN-FmVN:FmVN "
"!-UQvWIpIkbb?:FmVN?_LwlxPU?.?--UQvWIpIkbb -/LwlxPU-/_ ,"
"FmVN . _ "
"UQvWIpIkbb. :: r'HScYF;!?!_FmVN?/ ,FmVN,?,"
";FmVN!!UQvWIpIkbb-/:UQvWIpIkbb:!:;UQvWIpIkbb.;,-,"
"UQvWIpIkbb_//;FmVN:r'HScYF,"
"UQvWIpIkbb?FmVN-:?UQvWIpIkbb??:UQvWIpIkbb--/:"),
['fmvn', 'uqvwipikbb', "r'hscyf"])
def test_top_3_words_044(self):
self.assertEqual(top_3_words(
"fXYs_-hxhPtmA'j:/XOipMEZaf-/_./hxhPtmA'j:?HvAf/XOipMEZaf!;,"
";bZyOQLyi??/-_bZyOQLyi.;-hxhPtmA'j//,/,XOipMEZaf.; bZyOQLyi?,"
";;/HvAf!, !fXYs--?HvAf??!bZyOQLyi?bZyOQLyi .XOipMEZaf:? "
"!:bZyOQLyi.bZyOQLyi/ ?_hxhPtmA'j; "
"HvAf::?XOipMEZaf:;bZyOQLyi.,;,"
"-XOipMEZaf ?!HvAf.!_!:fXYs/hxhPtmA'j;. "
"!;UiF__fXYs;.:-/AzHZqqqZd:?:!.UiF!bZyOQLyi-AzHZqqqZd!: "
"??bZyOQLyi;bZyOQLyi--: /bZyOQLyi,"
":---UiF/;hxhPtmA'j!fXYs;;;?fXYs!-hxhPtmA'j_!XOipMEZaf,UiF;,"
"hxhPtmA'j_. XOipMEZaf;hxhPtmA'j;?/.XOipMEZaf__//hxhPtmA'j-?, "
"?bZyOQLyi,/AzHZqqqZd .fXYs/_bZyOQLyi/.,::UiF.UiF_UiF-;HvAf:,"
"!?-bZyOQLyi:;bZyOQLyi,;/,!hxhPtmA'j,hxhPtmA'j? "
"-?!XOipMEZaf:?/: "
"UiF:/_ HvAf/!bZyOQLyi:?.:!HvAf_.bZyOQLyi;,"
"!_ XOipMEZaf.:!_!XOipMEZaf! UiF?-_bZyOQLyi:?AzHZqqqZd,"
";hxhPtmA'j!.::-fXYs UiF fXYs.:HvAf UiF.:;bZyOQLyi HvAf,,"
"XOipMEZaf/hxhPtmA'j;--bZyOQLyi?;bZyOQLyi?/bZyOQLyi,,,"
"?bZyOQLyi: "
"-.-UiF?;- bZyOQLyi -XOipMEZaf,"
"!hxhPtmA'j!;AzHZqqqZd?:bZyOQLyi!XOipMEZaf_XOipMEZaf,HvAf_"),
['bzyoqlyi', 'xoipmezaf', "hxhptma'j"])
def test_top_3_words_045(self):
self.assertEqual(top_3_words(
"yhIYz'vA?? _:igUmN-/:/!MXCLQAqej,vpUQj,"
".?-/yhIYz'vA;:MXCLQAqej,,"
"?shZkx_?,__shZkx!,NekJH /:MXCLQAqej_yhIYz'vA/shZkx,"
"/:_.MXCLQAqej:,// Cfbb'ywTl!/ Cfbb'ywTl::!/ "
"yhIYz'vA_yhIYz'vA_!shZkx_?-shZkx!; -;yhIYz'vA,"
"-.MXCLQAqej--shZkx;;?/yhIYz'vA!-NekJH!yhIYz'vA/?,"
"yhIYz'vA!-!shZkx:yhIYz'vA "
"igUmN_yhIYz'vA/!/?vpUQj?:_:?NekJH.yhIYz'vA;-::!shZkx/;/?:shZkx"
"?.-.Cfbb'ywTl,?: Cfbb'ywTl "
"!?:shZkx!!shZkx//?-shZkx:;vpUQj_;_shZkx!:::/Cfbb'ywTl"
"!..-shZkx "
"?yhIYz'vA- ,yhIYz'vA? :yhIYz'vA.!__ vpUQj yhIYz'vA vpUQj;? "
"/MXCLQAqej/Cfbb'ywTl/.?/MXCLQAqej:./:,shZkx.?shZkx_ "
".??yhIYz'vA;;_!:igUmN? ::;Cfbb'ywTl:yhIYz'vA!-. "
".Cfbb'ywTl-/yhIYz'vA;-!;/pRr'esj?;_/yhIYz'vA_igUmN?.:NekJH"
"!MXCLQAqej-/vpUQj.?_Cfbb'ywTl/; ;!vpUQj_:,"
"Cfbb'ywTl!-_-/Cfbb'ywTl_: pRr'esj-?:yhIYz'vA??-MXCLQAqej "
"shZkx_!//,MXCLQAqej:!/;NekJH??,"
"Cfbb'ywTl/_:Cfbb'ywTl:Cfbb'ywTl "
".,:shZkx-Cfbb'ywTl-yhIYz'vA;?.NekJH.vpUQj-! vpUQj?_-MXCLQAqej "
";?-!MXCLQAqej_Cfbb'ywTl:Cfbb'ywTl,-NekJH?-/Cfbb'ywTl,,"
":/_yhIYz'vA, "
"MXCLQAqej-.!.:NekJH?;!_Cfbb'ywTl!Cfbb'ywTl;/!shZkx, "
"vpUQj:/Cfbb'ywTl?MXCLQAqej_!!Cfbb'ywTl_,igUmN_;.;NekJH?, "
"?_pRr'esj;MXCLQAqej,-?-_vpUQj -igUmN-./-:Cfbb'ywTl:.,,NekJH/ "
".yhIYz'vA/ ;/Cfbb'ywTl ;NekJH/!/yhIYz'vA/ _ NekJH "
"Cfbb'ywTl__/!/vpUQj-:MXCLQAqej??_shZkx?-igUmN "
"NekJH./.-NekJH;/Cfbb'ywTl:/! !shZkx- .yhIYz'vA: "
"?MXCLQAqej;_shZkx/-,,NekJH?;."),
["yhiyz'va", "cfbb'ywtl", 'shzkx'])
def test_top_3_words_046(self):
self.assertEqual(top_3_words(
"nZCcjxE- uOs ;:eevU._JrmCj:-:qkMkv-dMRRMbLV:,"
";nZCcjxE?:/:nZCcjxE!/: wrdfURff; :_MUzNwr jmB'. ?FjACIaTR!_ "
"!_aDILObzfsI_eevU:qkMkv -JrmCj/,qkMkv?uOs :JrmCj;_qkMkv:.,"
":/JrmCj-/nZCcjxE? .CJBueRhCm?-.jmB'-;:-:jNVeNYWYQ.?-gfU''?_ "
"uOs;gfU''?! qkMkv;-;.FjACIaTR.-jNVeNYWYQ;-!,"
"JrmCj.JrmCj/_.:?gfU''-/_nZCcjxE,"
"!- jmB'_jmB'?nZCcjxE.FjACIaTR:jNVeNYWYQ;;!?_eevU/.eevU!nZCcjxE"
";./FjACIaTR_._!qkMkv;; ;,aDILObzfsI .jmB'._JrmCj? "
".?qkMkv!-gfU''-,eevU! ;;gfU''!,"
"jNVeNYWYQ_jmB';cVz'c:_/-FjACIaTR?.:._KZl;cVz'c,FjACIaTR_ "
"-.FjACIaTR/;,_-jmB':jNVeNYWYQ.,"
".cVz'c-_gfU''-?/!FjACIaTR-:_-jNVeNYWYQ??FjACIaTR?jNVeNYWYQ ! "
"_,"
"nZCcjxE!!?qkMkv;-.?/eevU.FjACIaTR?;::zVSFRApHJT,,JrmCj.? ,,"
"jmB'-/-./zVSFRApHJT!qkMkv._ "
";FjACIaTR!_uOs._?jNVeNYWYQ?-;.?jmB':;cVz'c/!,:;aDILObzfsI, "
"qkMkv! jmB'!,_gfU''_ uOs_-jNVeNYWYQ?!MUzNwr_!,/_MUzNwr__,"
"jmB':;,! MUzNwr .?-qkMkv,__aDILObzfsI :!;.jmB',"
"????qkMkv?/-;JrmCj;eevU/!!,"
":aDILObzfsI_-/-eevU:?.;nZCcjxE;gfU''!/!_eevU!:.gfU''__jmB"
"'/--:JrmCj -!jNVeNYWYQ: gfU''__?qkMkv?//,.KZl_? -dMRRMbLV; "
"-qkMkv: /FjACIaTR:/_-?zVSFRApHJT:?-!gfU''//:uOs;!-!JrmCj,-,"
";uOs:??;FjACIaTR./?FjACIaTR;,aDILObzfsI:-;._eevU,"
"aDILObzfsI-.!??jmB':,uOs,!?jNVeNYWYQ:!!CJBueRhCm??_?liUQEzn?-,"
"?:qkMkv;-/?.CJBueRhCm !!/!zVSFRApHJT: "
"::jNVeNYWYQ_jNVeNYWYQ!eevU.aDILObzfsI:nZCcjxE.:MUzNwr_! "
"uOs_;FjACIaTR-_aDILObzfsI-:_eevU_!?:qkMkv;?qkMkv "
"/?//MUzNwr.;!dMRRMbLV_,:.aPCZ-?;-gfU'';?,;MUzNwr.?,.,"
"jmB'.uOs/_:!gfU'':.cVz'c/.JrmCj qkMkv /uOs__?FjACIaTR,"
"_nZCcjxE_,"
";!qkMkv,,_:.cVz'c - _!MUzNwr_: qkMkv,"
"?.dMRRMbLV-FjACIaTR?-!!jmB'_//jmB'-;!?JrmCj,? ,;jmB'_.- "
"JrmCj,,,"
":aDILObzfsI?;MUzNwr- zVSFRApHJT. ,-?MUzNwr uOs_qkMkv-_!-,"
"aPCZ__ :gfU''/?!uOs !.jNVeNYWYQ/._aDILObzfsI?_:!;MUzNwr "
"FjACIaTR "
"!_:dMRRMbLV/,?/!liUQEzn.uOs nZCcjxE;.uOs/gfU'',"
"cVz'c:uOs?uOs!jmB'--!qkMkv . ? qkMkv;-uOs.?,FjACIaTR-JrmCj,,"
"dMRRMbLV;nZCcjxE!,aDILObzfsI;MUzNwr "
"/uOs-?qkMkv?;;liUQEzn.!._JrmCj/.eevU.?-!:cVz'c_;?jmB';MUzNwr,"
"/: /liUQEzn?- nZCcjxE; ,gfU''. jmB'/eevU:CJBueRhCm //.,"
"nZCcjxE?gfU'':_;uOs? !_dMRRMbLV/jNVeNYWYQ,"
":;.-jNVeNYWYQ!MUzNwr?-;:CJBueRhCm? JrmCj-?: -JrmCj; ? "
";uOs;-?!jNVeNYWYQ,uOs _qkMkv?;-FjACIaTR; ?jmB' "
"?dMRRMbLV!!./MUzNwr zVSFRApHJT!. :gfU'';:;!;KZl! qkMkv.?,"
"/ jmB'!;.uOs:.!!?zVSFRApHJT,:-jmB'.jNVeNYWYQ-_:!!zVSFRApHJT,"
"_jmB'/qkMkv//_./eevU-gfU''/_.aPCZ;_ :/gfU'' ? "
"/;zVSFRApHJT_/gfU''!-.-nZCcjxE:?!_eevU:;,/!jNVeNYWYQ??,"
"MUzNwr/,"
"_jNVeNYWYQ!;MUzNwr.!jmB',?!.?jNVeNYWYQ!-nZCcjxE.qkMkv,-;,"
"gfU'',"
"gfU''!//.?MUzNwr_qkMkv-JrmCj-jNVeNYWYQ/:jNVeNYWYQ.?dMRRMbLV!- "
"-qkMkv_CJBueRhCm:;!JrmCj::"),
['qkmkv', "jmb'", 'uos'])
def test_top_3_words_047(self):
self.assertEqual(top_3_words(
"wsrYbzt:.lTQI-?vufUlJhsVF!;,?_Vfjf. _;lTQI?-;,?TtAAkuFglJ,"
"-lTQI/yIOdWF'o-lTQI.!!.yOmciNMdH/_?-wavZ:wsrYbzt_.__wsrYbzt_ "
"Tsb!/XJyUykIbEs_ ;.Vfjf? -TtAAkuFglJ!yRXaASiJ:.;?wavZ_,"
"_yRXaASiJ;;/.wsrYbzt_./yIOdWF'o;.,,"
"yOmciNMdH?;-;yOmciNMdH-KNUdCaFn?:!-,yOmciNMdH!'mEc'sA'U._:,"
"XJyUykIbEs!, XJyUykIbEs?:?yIOdWF'o!:Tsb .,,XJyUykIbEs:wsrYbzt,"
"XJyUykIbEs.__,_XJyUykIbEs !TtAAkuFglJ!-wsrYbzt,,lTQI!:/ "
"!yOmciNMdH./! Vfjf;.,yRXaASiJ _.wavZ_?nUhVBtvY: "
"!/?Tsb;_!nUhVBtvY!?-/TtAAkuFglJ_wavZ wavZ: "
".wavZ-:!/XJyUykIbEs?? "
"XJyUykIbEs:Vfjf;, _ Vfjf.wavZ/?wsrYbzt-!:wavZ;!;,"
"?TtAAkuFglJ./;:wqKcPRspiD/?.?.Tsb:yOmciNMdH?,Tsb_ yOmciNMdH_ "
"TtAAkuFglJ.__,lTQI.Tsb!::!yRXaASiJ: -?,wavZ:XJyUykIbEs:; "
"XJyUykIbEs:-yIOdWF'o?;,_wsrYbzt-yOmciNMdH "
"?_.:Tsb//:?!wqKcPRspiD?TtAAkuFglJ_lTQI-?vufUlJhsVF "
":-wqKcPRspiD! "
" ,:TtAAkuFglJ! /_yIOdWF'o_,.?wsrYbzt//;!/Tsb_wsrYbzt/,"
"yIOdWF'o-_Vfjf__!Vfjf! "
"_!yOmciNMdH_XJyUykIbEs?-/_TtAAkuFglJ?lTQI;?XJyUykIbEs"
";;!XJyUykIbEs- "
";Tsb?TtAAkuFglJ--/_XJyUykIbEs/-/wsrYbzt:TtAAkuFglJ,"
";Tsb__!!.Tsb!?_lTQI-/wsrYbzt/./nUhVBtvY;:lTQI?-_; "
"Vfjf_:wsrYbzt "
"yIOdWF'o:::.!wavZ_;!.XJyUykIbEs:.-wsrYbzt. ,"
"/:yOmciNMdH;;/:XJyUykIbEs,;!?wsrYbzt_!/lTQI:_Vfjf;,/,"
"-Tsb..;wsrYbzt///, Vfjf,-TtAAkuFglJ; yOmciNMdH;Tsb_:.-?wavZ/,"
";:-XJyUykIbEs,:wavZ:-TtAAkuFglJ,-:;_Tsb-.,Tsb-, ?XJyUykIbEs "
":.wsrYbzt?_,XJyUykIbEs,.::/TtAAkuFglJ,-lTQI,"
"-_-yIOdWF'o:;._/Tsb "
",!/Vfjf:,vufUlJhsVF!/ _wavZ,,../Tsb-;.!/wavZ__Vfjf,"
"?;_wavZ_XJyUykIbEs_-,"
"Vfjf:wavZ;?XJyUykIbEs;:-!XJyUykIbEs??/!wavZ::_lTQI,TtAAkuFglJ,"
"-KNUdCaFn_?/. Tsb/-:wsrYbzt :,-yOmciNMdH wsrYbzt,"
"!?:yOmciNMdH;??_TtAAkuFglJ?;,-nUhVBtvY,?:..TtAAkuFglJ?- "
"yRXaASiJ:Tsb wqKcPRspiD.!_/Vfjf:/.,TtAAkuFglJ!_?XJyUykIbEs,"
".;./yOmciNMdH/-TtAAkuFglJ?lTQI?.Vfjf,:/yRXaASiJ, ,"
"yRXaASiJ:;- ,"
"wsrYbzt;!?vufUlJhsVF ??, XJyUykIbEs,.vufUlJhsVF-. "
";'mEc'sA'U/, "
":.yOmciNMdH;/:,:XJyUykIbEs/wqKcPRspiD!?yRXaASiJ_ lTQI;:;?Tsb- "
"_-.lTQI:wsrYbzt__Tsb?.Tsb_!;-!Vfjf//.wsrYbzt:/:XJyUykIbEs"
";/..yOmciNMdH "
"Vfjf_.XJyUykIbEs;TtAAkuFglJ!_;yOmciNMdH:!:?-yOmciNMdH-,"
"?;yIOdWF'o-_lTQI:;yIOdWF'o,"
"_/wsrYbzt_/XJyUykIbEs:;TtAAkuFglJ!lTQI:!,,;TtAAkuFglJ?,!, "
"wsrYbzt;,!Tsb!?//wsrYbzt Tsb; _-yIOdWF'o- .-Vfjf/KNUdCaFn/!,"
"?XJyUykIbEs!_Tsb_!?yIOdWF'o_/"),
['xjyuykibes', 'wsrybzt', 'tsb'])
def test_top_3_words_048(self):
self.assertEqual(top_3_words(
"EwImALXqt!,:?EwImALXqt_ 'pWenIMN.-mzFDTIM;_EwImALXqt,"
"mzFDTIM-.;-:mzFDTIM!mzFDTIM__?:.EwImALXqt,"
"--_:'pWenIMN.'pWenIMN!?/EwImALXqt!-,!mzFDTIM/-./!'pWenIMN "
":'pWenIMN!!'pWenIMN.?::?EwImALXqt-__EwImALXqt!'pWenIMN "
"__/-'pWenIMN, _mzFDTIM ,mzFDTIM,"
".:mzFDTIM:;?/'pWenIMN::?EwImALXqt?/mzFDTIM_mzFDTIM,:,"
"EwImALXqt!:mzFDTIM_-'pWenIMN.'pWenIMN;/EwImALXqt!EwImALXqt./,"
";!mzFDTIM_!.:;mzFDTIM?_;?'pWenIMN:mzFDTIM;_-?mzFDTIM"
"!;/;:mzFDTIM"
".??EwImALXqt: :'pWenIMN;;mzFDTIM ??:mzFDTIM?-/_,"
"EwImALXqt/EwImALXqt_?;'pWenIMN; "
"EwImALXqt_/?--EwImALXqt?;/;'pWenIMN/-!??'pWenIMN_-,"
"/ 'pWenIMN!mzFDTIM? : ;EwImALXqt_?- .EwImALXqt "
"EwImALXqt:EwImALXqt;;_EwImALXqt:_-;,EwImALXqt,"
";-EwImALXqt!-EwImALXqt,"
"'pWenIMN!/:-mzFDTIM??!EwImALXqt_;/?mzFDTIM :'pWenIMN/!,"
"mzFDTIM, "
";,'pWenIMN ,mzFDTIM!'pWenIMN?:._EwImALXqt-,'pWenIMN-!/ "
"'pWenIMN!!EwImALXqt ,_;EwImALXqt-/;; "
"EwImALXqt-!-/_mzFDTIM?mzFDTIM:EwImALXqt,.? mzFDTIM "
"/mzFDTIM!__/,"
"'pWenIMN!!/!/mzFDTIM:/?;mzFDTIM.,:"),
['ewimalxqt', 'mzfdtim', "'pwenimn"])
def test_top_3_words_049(self):
self.assertEqual(top_3_words(
"ebuZ,!:,:lJrCM/?zOV,kgU ,lJrCM;-:/JistV_,:JistV "
"VschNVeHC?/!oZK "
"_:ebuZ?./,VschNVeHC ,DkHJ, /::kgU/ :_,kgU _LjMio! "
"?.LjMio;/.;!zOV.JistV_/_!bCdNl;/TcayBDKdE.;ebuZ /VschNVeHC?,,"
"-_bCdNl rgITudlh;lJrCM?FSWztfhAe!_zOV.;hSML! "
"xbEkltQrS?FSWztfhAe-CHmhfhMK;!!.rgITudlh._!hSML ::_;ebuZ,"
":!-Qnc-::/FSWztfhAe!_ xbEkltQrS.,JistV,;,??LjMio,;-oZK-,"
"_VschNVeHC,.oZK!?JistV:!/:VschNVeHC,?,FSWztfhAe.:hSML/,oZK_,"
"_??kgU/;JistV_-/zOV!:Qnc::!,!tGjLPphwFQ -:hSML_,TcayBDKdE:,"
"lJrCM;/:tGjLPphwFQ.?!:JistV?: :kgU-,!kgU;!//tGjLPphwFQ!!? "
".FSWztfhAe/?_.:TcayBDKdE..._Qnc./ oZK!_qNNflG,,?!;JistV,"
"_JistV! "
"!!xbEkltQrS:/Qnc!bCdNl /:-,bCdNl?rgITudlh_TcayBDKdE!rgITudlh.,"
"tGjLPphwFQ --_.CHmhfhMK/.tGjLPphwFQ_,?.;LjMio ???hSML!? "
"_?CHmhfhMK -;tGjLPphwFQ//-zOV/_:-.CHmhfhMK;bCdNl;,"
"zOV: ?Qnc-_../DkHJ. / ebuZ,!_bCdNl??_/:rgITudlh "
"/??Qnc:.;Qnc_?;.LjMio "
"qNNflG;Qnc!???tGjLPphwFQ?.VschNVeHC?:/;:TcayBDKdE.:..JistV"
":.lJrCM.,:-JistV//VschNVeHC ,VschNVeHC ;!JistV?. VschNVeHC,"
"!!ebuZ,--?rgITudlh- / _JistV;/;?/oZK,bCdNl/??,JistV?LjMio,"
";?-:ebuZ, ebuZ_?;tGjLPphwFQ.:__,xbEkltQrS? ,?tGjLPphwFQ,"
"!TcayBDKdE_zOV!:;TcayBDKdE:, ?zOV?__hSML;?,"
"tGjLPphwFQ:;?_:FSWztfhAe_?kgU;:kgU-rgITudlh?zOV:_oZK:TcayBDKdE"
";!TcayBDKdE,;:zOV/?_.ebuZ!_-!oZK:,"
";:TcayBDKdE.qNNflG_lJrCM.;!Qnc//-FSWztfhAe.,!:bCdNl//JistV,!,"
"JistV.;,-VschNVeHC;/lJrCM./lJrCM/!!.,"
"xbEkltQrS!_:/xbEkltQrS!;_?.TcayBDKdE!CHmhfhMK_/!. "
"JistV/xbEkltQrS ?JistV,.-TcayBDKdE-?.rgITudlh /xbEkltQrS "
"/qNNflG!-?VschNVeHC;._/,lJrCM_xbEkltQrS!?-VschNVeHC !:,"
"TcayBDKdE-_-Qnc-,?;LjMio-!.VschNVeHC:!_TcayBDKdE; "
"ebuZ./.!zOV-/ "
"!LjMio!/kgU.TcayBDKdE,:,LjMio-!:rgITudlh?- ./FSWztfhAe/,zOV-_,"
"JistV bCdNl-? !oZK-_/ebuZ.,?.kgU,"
"xbEkltQrS/_.:FSWztfhAe.:;-ebuZ?_;/_CHmhfhMK.;:/!tGjLPphwFQ"
"?--VschNVeHC. ;,ebuZ,-;ebuZ:,zOV / ??lJrCM?VschNVeHC.?/:oZK,"
"_: -JistV_tGjLPphwFQ;/;oZK?Qnc- ?,"
"zOV/Qnc:/LjMio/??;ebuZ;!qNNflG. !--rgITudlh-oZK/! "
"hSML?/-_/xbEkltQrS.:?/.lJrCM, -..rgITudlh: qNNflG?:.,"
"LjMio?DkHJ;;_kgU.TcayBDKdE/xbEkltQrS /-,,zOV?!; lJrCM__; "
"!VschNVeHC.LjMio!:?!FSWztfhAe? "
"/xbEkltQrS_;hSML._LjMio/_bCdNl/;_bCdNl:.?,"
"kgU? :-lJrCM-/./DkHJ/;??;zOV_oZK--..ebuZ!,VschNVeHC,"
"TcayBDKdE;?VschNVeHC_;!?-qNNflG:/oZK;-;:LjMio/.lJrCM!/zOV"
"!:ebuZ"
"?,qNNflG? zOV_:tGjLPphwFQ-? FSWztfhAe_.:_?lJrCM,"
";/_:xbEkltQrS?LjMio,/ !,TcayBDKdE-LjMio.:,!oZK_-rgITudlh..-? "
"rgITudlh ,zOV,, CHmhfhMK,,/!xbEkltQrS-tGjLPphwFQ!!:tGjLPphwFQ "
"TcayBDKdE?-/:_zOV.oZK,,xbEkltQrS,,tGjLPphwFQ!TcayBDKdE, "
";/LjMio "
"kgU.:;ebuZ!::VschNVeHC!VschNVeHC__?,/bCdNl:-?- Qnc "
"-!?;bCdNl;.,"
"xbEkltQrS::?VschNVeHC_tGjLPphwFQ?;;VschNVeHC;:xbEkltQrS:qNNflG"
".LjMio;/, lJrCM;-CHmhfhMK FSWztfhAe?,"
"; TcayBDKdE.;_.ebuZ:_zOV.;/LjMio_ ,"
"_hSML?::tGjLPphwFQ;..FSWztfhAe;.JistV-LjMio xbEkltQrS,"
"TcayBDKdE_LjMio-/ tGjLPphwFQ?/,!hSML oZK/?: VschNVeHC "
"rgITudlh;tGjLPphwFQ/;!lJrCM,ebuZ::/.lJrCM_/_.tGjLPphwFQ/,"
"/TcayBDKdE/_,/,JistV:?/lJrCM?;ebuZ,_.zOV:-:bCdNl; /?zOV.;? "
"ebuZ "
"hSML:-..tGjLPphwFQ-!bCdNl,,VschNVeHC!/tGjLPphwFQ;,,,"
"/TcayBDKdE_!;.bCdNl;-!/_bCdNl,_,/?FSWztfhAe. "
":TcayBDKdE:-::-FSWztfhAe::lJrCM:,. !rgITudlh,"
": -/rgITudlh:ebuZ:LjMio, "
"-JistV:_!kgU?:FSWztfhAe;lJrCM__-tGjLPphwFQ; ebuZ/kgU . "
"_lJrCM!, "
"_!zOV:_VschNVeHC,:-!xbEkltQrS?,-:.tGjLPphwFQ_VschNVeHC_ "
".!?qNNflG!!;;hSML.,?lJrCM?_ ?,"
"ebuZ.;LjMio::?VschNVeHC/_ebuZ!lJrCM:/.kgU,:!bCdNl,!,,"
"VschNVeHC; "
"_:kgU/!;rgITudlh??;LjMio--hSML/;.?hSML--./ebuZ-ebuZ!.;/oZK"
".::/rgITudlh?__LjMio?::?,lJrCM,"
"bCdNl!tGjLPphwFQ.?-/FSWztfhAe.--lJrCM?:JistV- _- zOV-,"
"-!.xbEkltQrS-//.!LjMio! "
":bCdNl!kgU?Qnc__qNNflG!xbEkltQrS:!-._qNNflG;-__zOV! hSML "
";!._rgITudlh?.,!_LjMio:.oZK bCdNl-tGjLPphwFQ "
":.lJrCM:!lJrCM./?oZK?!,oZK_TcayBDKdE. rgITudlh/:bCdNl.? "
"_xbEkltQrS?-: ,JistV_:-.-lJrCM?;;:-xbEkltQrS/ "
"!?FSWztfhAe-!/bCdNl-?_-_bCdNl--,VschNVeHC-,"
"._zOV.:!LjMio/_rgITudlh!:TcayBDKdE ;,qNNflG;/-.:lJrCM:?; "
"hSML?/?;_tGjLPphwFQ:.:?tGjLPphwFQ!/.?;LjMio,-?kgU "
"/::rgITudlh?rgITudlh_!-zOV--tGjLPphwFQ;_.oZK:-JistV/_._"),
['ljrcm', 'tgjlpphwfq', 'vschnvehc'])
def test_top_3_words_050(self):
self.assertEqual(top_3_words(
"amEuj?.:,XBVhxjXnH..XBVhxjXnH:./XBVhxjXnH;.NegzEM ; NegzEM-_,"
".NegzEM?;?_/NegzEM-NegzEM-?NegzEM?. "
"?_NegzEM-;XBVhxjXnH:/XBVhxjXnH!/-.-NegzEM:NegzEM,"
"!XBVhxjXnH_NegzEM.NegzEM-NegzEM /XBVhxjXnH,:!-_NegzEM "
"NegzEM;NegzEM:;NegzEM?NegzEM/.?!NegzEM;? _NegzEM?,"
":amEuj-!NegzEM-;/NegzEM_ ;?!XBVhxjXnH,!.amEuj,"
"/NegzEM;;/:;NegzEM..? XBVhxjXnH_NegzEM_/XBVhxjXnH.- -"),
['negzem', 'xbvhxjxnh', 'ameuj'])
def test_top_3_words_051(self):
self.assertEqual(top_3_words(
"zwXau/?_gPJ :rTPycRemS!;_:/faVj.nBMCetefvg:.::gPJ?QUoHCyP./ "
"nBMCetefvg_;/qnqnVKz,,;!.bBRYyIER . ?_qyZYyNzaJ,"
":gPJ_ _::yzylD "
"/_faVj : ?rTPycRemS ;gPJ:nBMCetefvg!gPJ/mDkwQV "
"!;//nBMCetefvg_?!qnqnVKz_?gPJ /rTPycRemS qnqnVKz;:_/rTPycRemS "
".;:faVj.:?,"
"bBRYyIER!?/?-UoQUbSmN/!;-:bBRYyIER:;.qnqnVKz!bBRYyIER"
"- faVj.?UoQUbSmN!bBRYyIER_?faVj__bBRYyIER:gPJ.;!:gPJ_rTPycRemS"
"-lkSMoKhFoA.!/LNHwJr,,-!,UoQUbSmN!?!/-qnqnVKz?!..,faVj?,-faVj,"
"!!,,faVj.bBRYyIER-,"
";!bBRYyIER.::-UoQUbSmN!?.bBRYyIER:/UoQUbSmN:zwXau- !,"
"_LNHwJr;zwXau zwXau;!_!mDkwQV!qyZYyNzaJ ;._LNHwJr.nBMCetefvg ,"
"?!.gPJ!:?_rTPycRemS;zwXau;;;.:zwXau!.?faVj._?;zwXau;/?zwXau"
"//LNHwJr-_:bBRYyIER!-;.gPJ,.! bBRYyIER?qnqnVKz,"
"_?YkXsRHprr_!UoQUbSmN,.,gPJ.?_bBRYyIER_,gPJ-;.!nBMCetefvg "
";c'Xx.mDkwQV: ?bBRYyIER!_gPJ:_,_zwXau;UoQUbSmN;; "
"LNHwJr?/._QUoHCyP_.; "
"zwXau!;?LNHwJr;.:LNHwJr;;/rTPycRemS/.bBRYyIER "
"rTPycRemS???QUoHCyP.?qnqnVKz;qyZYyNzaJ;??faVj:;,rTPycRemS!;:? "
"cBpUSKAK,__.:LNHwJr qnqnVKz! ,rTPycRemS?.-QUoHCyP?--,zwXau,"
"_rTPycRemS!zwXau.;:,;faVj?. ?;nBMCetefvg; "
"lkSMoKhFoA?ahc'ZP?_._LNHwJr?nBMCetefvg!,!faVj!-,"
"yzylD:bBRYyIER?:;;YkXsRHprr_ -UoQUbSmN_-! bBRYyIER?:,_faVj ,"
"UoQUbSmN_ rTPycRemS;// lkSMoKhFoA?qyZYyNzaJ/?LNHwJr "
"-UoQUbSmN.?zwXau, /mDkwQV?;;?zwXau "
".UoQUbSmN-/:UoQUbSmN;_UoQUbSmN;.!!;bBRYyIER ;:cBpUSKAK_,"
"?..faVj_:gPJ:,bBRYyIER yzylD/zwXau,,"
"LNHwJr-lkSMoKhFoA:qnqnVKz/!LNHwJr_./._qnqnVKz,mDkwQV_:,,"
"/zwXau:: "
".mDkwQV??nBMCetefvg !.?.gPJ,"
"YkXsRHprr?_faVj-zwXau:UoQUbSmN!;!?_qyZYyNzaJ?!//lkSMoKhFoA -/,"
"qnqnVKz_:-:_bBRYyIER/.__bBRYyIER./.- LNHwJr:?/;;nBMCetefvg "
"!c'Xx-,faVj,"
";bBRYyIER;._..bBRYyIER:LNHwJr;_:?nBMCetefvg:?zwXau:_"
"!:!gPJ_-_: c'Xx-UoQUbSmN.-,?-zwXau?_?,gPJ-;_ "
"qnqnVKz_.:qnqnVKz? "
":faVj :;:nBMCetefvg-,,!:gPJ--,,,qyZYyNzaJ,;-.zwXau:,-,,"
"nBMCetefvg?.?qyZYyNzaJ!/qyZYyNzaJ/faVj_! ,LNHwJr.--bBRYyIER:!,"
"gPJ;.;!/bBRYyIER nBMCetefvg.faVj!.;-c'Xx_?zwXau,"
"_-mDkwQV!-QUoHCyP_ qnqnVKz,"
"/.bBRYyIER!rTPycRemS.-;;/lkSMoKhFoA/;; ,zwXau,;?!:qyZYyNzaJ,"
"; yzylD? -gPJ!zwXau,"
";-UoQUbSmN?:/?:qyZYyNzaJ.;!dxNRTJVC_!//-bBRYyIER_,"
"._ UoQUbSmN?:_;qyZYyNzaJ/,_;;bBRYyIER,"
". LNHwJr/QUoHCyP!-nBMCetefvg-yzylD,- UoQUbSmN;LNHwJr-?? "
"-qnqnVKz?-gPJ!? LNHwJr.:_rTPycRemS,:.;nBMCetefvg.;,,"
"/LNHwJr.;;qnqnVKz?;nBMCetefvg!/.qnqnVKz/:.; mDkwQV,qnqnVKz- "
";;;gPJ-.;LNHwJr.; ?-rTPycRemS-LNHwJr! rTPycRemS;.:faVj;-- "
":cBpUSKAK-!,nBMCetefvg_,?qnqnVKz -nBMCetefvg -gPJ!;,"
"yzylD -gPJ,"
"?/zwXau-:?;qyZYyNzaJ_:,qnqnVKz:.-:/UoQUbSmN,,bBRYyIER, "
"/nBMCetefvg!?,/ QUoHCyP_.nBMCetefvg?qnqnVKz "
"_LNHwJr/.gPJ._gPJ-qnqnVKz:bBRYyIER?://_faVj,LNHwJr "
"zwXau/_.yzylD!zwXau:-,-rTPycRemS./"),
['bbryyier', 'zwxau', 'gpj'])
def test_top_3_words_052(self):
self.assertEqual(top_3_words(
"LNfFtuLAhW,-vVmIe.-;tIa!:- vVmIe_ZTDRPOaMe. !/ vVmIe:,;/:JKg "
"?tIa?.vVmIe:/_ vVmIe!?-,;UAItLonUS_ "
";/UAItLonUS-;tIa-vVmIe?;!vVmIe ZTDRPOaMe/,:tIa,"
"! !!UAItLonUS:!:?_tIa-,; -LPZaSDDjr? ,,vVmIe/;JKg; "
".tIa?vVmIe-.!?vVmIe!:_tIa?.?vVmIe?_/ _LPZaSDDjr /tIa/!,"
"_tIa:-/BOLbk .:vVmIe;-:!UAItLonUS-/?.;ZTDRPOaMe.? UAItLonUS "
";tIa,-BOLbk:/_:vVmIe-:vVmIe_..!JKg?-!JKg!;vVmIe__JKg.?_;:vVmIe"
"/:///JKg/LPZaSDDjr-?_-?BOLbk;;ZTDRPOaMe?;:LPZaSDDjr!-!!BOLbk_"
"//tIa?;_ttAdLPxXZ??.-ttAdLPxXZ:ttAdLPxXZ?//BOLbk!!.JKg"
"?:ttAdLPxXZ-;,:_vVmIe!vVmIe,.!-?vVmIe.ttAdLPxXZ?,.UAItLonUS// "
"UAItLonUS,ttAdLPxXZ!JKg_ _UAItLonUS_ ,;,tIa ; "
".vVmIe;:ZTDRPOaMe,"
": JKg/--/ttAdLPxXZ_vVmIe.ZTDRPOaMe JKg,tIa..:-,"
"UAItLonUS_?/-.ttAdLPxXZ?:ZTDRPOaMe!_?.;ZTDRPOaMe::: | |
self.center = center
self.width = width
self.lower = center - width / 2.0
self.upper = center + width / 2.0
self.flux = flux
class SpectralGrid(object):
"""
A grid of spectra for quick interpolation.
Attributes
----------
teff_bds : iterable
The lower and upper bounds of the model temperatures to load.
logg_bds : iterable
The lower and upper bounds of the model logg values to load.
feh_bds : iterable
The lower and upper bounds of the model [Fe/H] to load.
wavelength : `~astropy.units.Quantity`
Wavelengths of the interpolated spectrum.
fluxes : dict
The fluxes of the model grid. Sorted by fluxes[teff][logg][feh].
model_grid : str
Name of the model grid. Only `phoenix` is currently supported.
Methods
-------
get_spectrum(teff, logg, feh)
Returns a binned spectrum for the given teff, logg, and feh.
"""
def __init__(
self,
teff_bds,
logg_bds,
feh_bds,
wavelength=None,
spectral_resolution=None,
model_grid="phoenix",
**kwargs,
):
"""
Parameters
----------
teff_bds : iterable
The lower and upper bounds of the model temperatures to load.
logg_bds : iterable
The lower and upper bounds of the model logg values to load.
feh_bds : iterable
The lower and upper bounds of the model [Fe/H] to load.
wavelength : `~astropy.units.Quantity`, optional
Wavelengths of the interpolated spectrum.
spectral_resolution : `~astropy.units.Quantity`
The spectral resolution.
model_grid : str, optional
Name of the model grid. Only `phoenix` is currently supported.
"""
# First check that the model_grid is valid.
self.model_grid = model_grid.lower()
if self.model_grid == "phoenix":
# Grid of effective temperatures
grid_teffs = np.append(
np.arange(2300, 7100, 100), np.arange(7200, 12200, 200)
)
# Grid of surface gravities
grid_loggs = np.arange(0.0, 6.5, 0.5)
# Grid of metallicities
grid_fehs = np.array([-4.0, -3.0, -2.0, -1.5, -1.0, -0.5, -0.0, +0.5, +1.0])
else:
raise NotImplementedError(
f'"{model_grid}" model grid not found. '
+ "Only PHOENIX models are currently supported."
)
# Then ensure that the bounds given are valid.
teff_bds = np.array(teff_bds)
teff_bds = (
grid_teffs[grid_teffs <= teff_bds.min()].max(),
grid_teffs[grid_teffs >= teff_bds.max()].min(),
)
self.teff_bds = teff_bds
logg_bds = np.array(logg_bds)
logg_bds = (
grid_loggs[grid_loggs <= logg_bds.min()].max(),
grid_loggs[grid_loggs >= logg_bds.max()].min(),
)
self.logg_bds = logg_bds
feh_bds = np.array(feh_bds)
feh_bds = (
grid_fehs[grid_fehs <= feh_bds.min()].max(),
grid_fehs[grid_fehs >= feh_bds.max()].min(),
)
self.feh_bds = feh_bds
# Define the values covered in the grid
subset = np.logical_and(
grid_teffs >= self.teff_bds[0], grid_teffs <= self.teff_bds[1]
)
self.teffs = grid_teffs[subset]
subset = np.logical_and(
grid_loggs >= self.logg_bds[0], grid_loggs <= self.logg_bds[1]
)
self.loggs = grid_loggs[subset]
subset = np.logical_and(
grid_fehs >= self.feh_bds[0], grid_fehs <= self.feh_bds[1]
)
self.fehs = grid_fehs[subset]
# Load the fluxes
fluxes = {}
for teff in self.teffs:
fluxes[teff] = {}
for logg in self.loggs:
fluxes[teff][logg] = {}
for feh in self.fehs:
spec = Spectrum.from_grid(teff, logg, feh, **kwargs)
# Set spectral resolution if specified
if spectral_resolution is not None:
spec = spec.regularize()
spec = spec.set_spectral_resolution(spectral_resolution)
# Resample the spectrum to the desired wavelength array
if wavelength is not None:
spec = spec.resample(wavelength)
fluxes[teff][logg][feh] = spec.flux
self.fluxes = fluxes
# Save the wavelength array
self.wavelength = spec.wavelength
def get_spectrum(self, teff, logg, feh):
"""
Parameters
----------
teff : float
Effective temperature of the model in Kelvin.
logg : float
Surface gravity of the model in cgs units.
feh : float
[Fe/H] of the model.
Returns
-------
flux : `~astropy.units.Quantity`
The interpolated flux array.
"""
# First check that the values are within the grid
teff_in_grid = self.teff_bds[0] <= teff <= self.teff_bds[1]
logg_in_grid = self.logg_bds[0] <= logg <= self.logg_bds[1]
feh_in_grid = self.feh_bds[0] <= feh <= self.feh_bds[1]
booleans = [teff_in_grid, logg_in_grid, feh_in_grid]
params = ["teff", "logg", "feh"]
inputs = [teff, logg, feh]
ranges = [self.teff_bds, self.logg_bds, self.feh_bds]
if not all(booleans):
message = "Input values are out of grid range.\n\n"
for b, p, i, r in zip(booleans, params, inputs, ranges):
if not b:
message += f"\tInput {p}: {i}. Valid range: {r}\n"
raise ValueError(message)
# Identify nearest values in grid
flanking_teffs = (
self.teffs[self.teffs <= teff].max(),
self.teffs[self.teffs >= teff].min(),
)
flanking_loggs = (
self.loggs[self.loggs <= logg].max(),
self.loggs[self.loggs >= logg].min(),
)
flanking_fehs = (
self.fehs[self.fehs <= feh].max(),
self.fehs[self.fehs >= feh].min(),
)
# Define the points for interpolation
params000 = (flanking_teffs[0], flanking_loggs[0], flanking_fehs[0])
params100 = (flanking_teffs[1], flanking_loggs[0], flanking_fehs[0])
params010 = (flanking_teffs[0], flanking_loggs[1], flanking_fehs[0])
params110 = (flanking_teffs[1], flanking_loggs[1], flanking_fehs[0])
params001 = (flanking_teffs[0], flanking_loggs[0], flanking_fehs[1])
params101 = (flanking_teffs[1], flanking_loggs[0], flanking_fehs[1])
params011 = (flanking_teffs[0], flanking_loggs[1], flanking_fehs[1])
params111 = (flanking_teffs[1], flanking_loggs[1], flanking_fehs[1])
# Interpolate trilinearly
# https://en.wikipedia.org/wiki/Trilinear_interpolation
if not params000 == params100:
c000 = self.fluxes[params000[0]][params000[1]][params000[2]]
c100 = self.fluxes[params100[0]][params100[1]][params100[2]]
c00 = interpolate([c000, c100], flanking_teffs, teff)
else:
c00 = self.fluxes[params000[0]][params000[1]][params000[2]]
if not params010 == params110:
c010 = self.fluxes[params010[0]][params010[1]][params010[2]]
c110 = self.fluxes[params110[0]][params110[1]][params110[2]]
c10 = interpolate([c010, c110], flanking_teffs, teff)
else:
c10 = self.fluxes[params010[0]][params010[1]][params010[2]]
if not params001 == params101:
c001 = self.fluxes[params001[0]][params001[1]][params001[2]]
c101 = self.fluxes[params101[0]][params101[1]][params101[2]]
c01 = interpolate([c001, c101], flanking_teffs, teff)
else:
c01 = self.fluxes[params001[0]][params001[1]][params001[2]]
if not params011 == params111:
c011 = self.fluxes[params011[0]][params011[1]][params011[2]]
c111 = self.fluxes[params111[0]][params111[1]][params111[2]]
c11 = interpolate([c011, c111], flanking_teffs, teff)
else:
c11 = self.fluxes[params011[0]][params011[1]][params011[2]]
if not params000 == params010:
c0 = interpolate([c00, c10], flanking_loggs, logg)
c1 = interpolate([c01, c11], flanking_loggs, logg)
else:
c0 = c00
c1 = c01
if not params000 == params001:
flux = interpolate([c0, c1], flanking_fehs, feh)
else:
flux = c0
return flux
class BinnedSpectralGrid(object):
"""
A grid of binned spectra for quick interpolation.
Attributes
----------
teff_bds : iterable
The lower and upper bounds of the model temperatures to load.
logg_bds : iterable
The lower and upper bounds of the model logg values to load.
feh_bds : iterable
The lower and upper bounds of the model [Fe/H] to load.
center : `~astropy.units.Quantity`
The centers of the wavelength bins.
width : `~astropy.units.Quantity`
The widths of the wavelength bins.
lower : `~astropy.units.Quantity`
The lower bounds of the wavelength bins.
upper : `~astropy.units.Quantity`
The upper bounds of the wavelength bins.
fluxes : dict
The fluxes of the model grid. Sorted by fluxes[teff][logg][feh].
model_grid : str
Name of the model grid. Only `phoenix` is currently supported.
Methods
-------
get_spectrum(teff, logg, feh)
Returns a binned spectrum for the given teff, logg, and feh.
"""
def __init__(
self, teff_bds, logg_bds, feh_bds, center, width, model_grid="phoenix", **kwargs
):
"""
Parameters
----------
teff_bds : iterable
The lower and upper bounds of the model temperatures to load.
logg_bds : iterable
The lower and upper bounds of the model logg values to load.
feh_bds : iterable
The lower and upper bounds of the model [Fe/H] to load.
center : `~astropy.units.Quantity`
The centers of the wavelength bins.
width : `~astropy.units.Quantity`
The widths of the wavelength bins.
model_grid : str, optional
Name of the model grid. Only `phoenix` is currently supported.
"""
# First check that the model_grid is valid.
self.model_grid = model_grid.lower()
if self.model_grid == "phoenix":
# Grid of effective temperatures
grid_teffs = np.append(
np.arange(2300, 7100, 100), np.arange(7200, 12200, 200)
)
# Grid of surface gravities
grid_loggs = np.arange(0.0, 6.5, 0.5)
# Grid of metallicities
grid_fehs = np.array([-4.0, -3.0, -2.0, -1.5, -1.0, -0.5, -0.0, +0.5, +1.0])
else:
raise NotImplementedError(
f'"{model_grid}" model grid not found. '
+ "Only PHOENIX models are currently supported."
)
# Then ensure that the bounds given are valid.
teff_bds = np.array(teff_bds)
teff_bds = (
grid_teffs[grid_teffs <= teff_bds.min()].max(),
grid_teffs[grid_teffs >= teff_bds.max()].min(),
)
self.teff_bds = teff_bds
logg_bds = np.array(logg_bds)
logg_bds = (
grid_loggs[grid_loggs <= logg_bds.min()].max(),
grid_loggs[grid_loggs >= logg_bds.max()].min(),
)
self.logg_bds = logg_bds
feh_bds = np.array(feh_bds)
feh_bds = (
grid_fehs[grid_fehs <= feh_bds.min()].max(),
grid_fehs[grid_fehs >= feh_bds.max()].min(),
)
self.feh_bds = feh_bds
# Define the values covered in the grid
subset = np.logical_and(
grid_teffs >= self.teff_bds[0], grid_teffs <= self.teff_bds[1]
)
self.teffs = grid_teffs[subset]
subset = np.logical_and(
grid_loggs >= self.logg_bds[0], grid_loggs <= self.logg_bds[1]
)
self.loggs = grid_loggs[subset]
subset = np.logical_and(
grid_fehs >= self.feh_bds[0], grid_fehs <= self.feh_bds[1]
)
self.fehs = grid_fehs[subset]
# Load the fluxes
self.center = center
self.width = width
self.lower = center - width / 2.0
self.upper = center + width / 2.0
fluxes = {}
for teff in self.teffs:
fluxes[teff] = {}
for logg in self.loggs:
fluxes[teff][logg] = {}
for feh in self.fehs:
bs = Spectrum.from_grid(teff, logg, feh, **kwargs).bin(
center, width
)
fluxes[teff][logg][feh] = bs.flux
self.fluxes = fluxes
def get_spectrum(self, teff, logg, feh):
"""
| |
import dgl
import torch
import math
import time
import numpy as np
import dgl.nn.pytorch as dglnn
import torch.nn as nn
from utils import *
from metrics import *
class Perceptron(torch.nn.Module):
def __init__(self, in_dim, out_dim, dropout=0, norm=False, act=True):
super(Perceptron, self).__init__()
self.weight = torch.nn.Parameter(torch.empty(in_dim, out_dim))
torch.nn.init.xavier_uniform_(self.weight.data)
self.bias = torch.nn.Parameter(torch.empty(out_dim))
torch.nn.init.zeros_(self.bias.data)
self.norm = norm
if norm:
self.norm = torch.nn.BatchNorm1d(out_dim, eps=1e-9, track_running_stats=True)
self.dropout = torch.nn.Dropout(dropout)
self.act = act
def forward(self, f_in):
f_in = self.dropout(f_in)
f_in = torch.mm(f_in, self.weight) + self.bias
if self.act:
f_in = torch.nn.functional.relu(f_in)
if self.norm:
f_in = self.norm(f_in)
return f_in
def reset_parameters():
torch.nn.init.xavier_uniform_(self.weight.data)
torch.nn.init.zeros_(self.bias.data)
class TimeEnc(nn.Module):
# generate time encoding (TGAT, ICLR'21) from node number (nid)
def __init__(self, dim_t, nume):
super(TimeEnc, self).__init__()
self.dim_t = dim_t
self.nume = nume
self.basis_freq = torch.nn.Parameter((torch.from_numpy(1 / 10 ** np.linspace(0, 9, self.dim_t))).float())
self.phase = torch.nn.Parameter(torch.zeros(self.dim_t).float())
def forward(self, nid, ts):
t = ts - (nid // self.nume)
t = t.view(-1, 1) * self.basis_freq + self.phase
return torch.cos(t)
class EmbModule(nn.Module):
def __init__(self, dim_in, dim_out, dim_t, numr, nume, g, dropout=0, deepth=2, sampling=None, granularity=1, r_limit=None):
super(EmbModule, self).__init__()
self.dim_in = dim_in
self.dim_out = dim_out
self.dim_t = dim_t
self.numr = numr
self.nume = nume
self.deepth = deepth
self.g = g
self.granularity = granularity
mods = dict()
mods['time_enc'] = TimeEnc(dim_t, nume)
mods['entity_emb'] = nn.Embedding(nume, dim_in)
if r_limit is None:
r_limit = numr
for l in range(self.deepth):
mods['norm' + str(l)] = nn.LayerNorm(dim_in + dim_t)
# mods['dropout' + str(l)] = nn.Dropout(dropout)
conv_dict = dict()
for r in range(r_limit):
conv_dict['r' + str(r)] = dglnn.GATConv(dim_in + dim_t, dim_out // 4, 4, feat_drop=dropout, attn_drop=dropout, residual=False)
conv_dict['-r' + str(r)] = dglnn.GATConv(dim_in + dim_t, dim_out // 4, 4, feat_drop=dropout, attn_drop=dropout, residual=False)
conv_dict['self'] = dglnn.GATConv(dim_in + dim_t, dim_out // 4, 4, feat_drop=dropout, attn_drop=dropout, residual=False)
# conv_dict['r' + str(r)] = dglnn.GraphConv(dim_in + dim_t, dim_out)
# conv_dict['-r' + str(r)] = dglnn.GraphConv(dim_in + dim_t, dim_out)
# conv_dict['self'] = dglnn.GraphConv(dim_in + dim_t, dim_out)
mods['conv' + str(l)] = dglnn.HeteroGraphConv(conv_dict, aggregate='mean')
mods['act' + str(l)] = nn.ReLU()
dim_in = dim_out
self.mods = nn.ModuleDict(mods)
if sampling is not None:
fanouts = [int(d) for d in sampling.split('/')]
self.sampler = dgl.dataloading.MultiLayerNeighborSampler(fanouts = fanouts)
else:
self.sampler = dgl.dataloading.MultiLayerFullNeighborSampler(self.deepth)
def forward(self, ent, hist_ts, ts, log=True, phi_offset=0):
tss = time.time()
offset = (hist_ts // self.granularity) * self.nume
ent = ent.repeat_interleave(offset.shape[0]).view(ent.shape[0], -1).cpu()
root = torch.flatten(ent + offset)
# return self.mods['entity_emb'](torch.remainder(root.cuda(), self.nume))
# dgl sampler need input to be unique
root, root_idx = torch.unique(root, sorted=True, return_inverse=True)
blocks = self.sampler.sample_blocks(self.g, root)
blk = [blk.to('cuda:0') for blk in blocks]
if log:
get_writer().add_scalar('time_sampling', time.time() - tss, get_global_step('time_sampling'))
tss = time.time()
# print(root.shape[0], blk[0].srcdata['_ID'].shape[0])
h = self.mods['entity_emb'](torch.remainder(blk[0].srcdata['_ID'], self.nume))
for l in range(self.deepth):
phi = self.mods['time_enc'](blk[l].srcdata['_ID'], ts + phi_offset)
h = torch.cat([h, phi], dim=1)
h = self.mods['norm' + str(l)](h)
# h = self.mods['dropout' + str(l)](h)
h = self.mods['conv' + str(l)](blk[l], {'entity': h})['entity']
h = h.view(h.shape[0], -1)
h = self.mods['act' + str(l)](h)
h = h[root_idx].view(-1, offset.shape[0], h.shape[-1])
if log:
get_writer().add_scalar('time_emb', time.time() - tss, get_global_step('time_emb'))
return h.view(h.shape[0], -1)
class AttentionLayer(torch.nn.Module):
def __init__(self, in_dim, out_dim, dropout=0, h_att=8):
super(AttentionLayer, self).__init__()
self.h_att = h_att
mods = dict()
for h in range(h_att):
mods['dropout' + str(h)] = nn.Dropout(p=dropout)
mods['w_v_' + str(h)] = nn.Linear(in_dim, out_dim // h_att)
self.mods = nn.ModuleDict(mods)
def forward(self, hid, adj):
out = list()
for h in range(self.h_att):
hidd = self.mods['dropout' + str(h)](hid)
v = self.mods['w_v_' + str(h)](hidd)
out.append(torch.matmul(adj[h], v))
out = torch.cat(out, dim=1)
return torch.nn.functional.relu(out)
class Copy(torch.nn.Module):
# copy module used in AAAI'21 CyGNet
def __init__(self, in_dim, dim_r, nume, numr, dropout=0):
super(Copy, self).__init__()
mods = dict()
mods['subject_relation_emb'] = nn.Embedding(numr, dim_r)
mods['object_relation_emb'] = nn.Embedding(numr, dim_r)
mods['object_classifier'] = Perceptron(in_dim + dim_r, nume, act=False, dropout=dropout)
mods['subject_classifier'] = Perceptron(in_dim + dim_r, nume, act=False, dropout=dropout)
self.mods = nn.ModuleDict(mods)
def forward(self, sub_emb, obj_emb, rel, copy_mask):
sub_rel_emb = self.mods['subject_relation_emb'](rel)
obj_rel_emb = self.mods['object_relation_emb'](rel)
raw_sub_predict = self.mods['subject_classifier'](torch.cat([obj_emb, obj_rel_emb], 1))
raw_obj_predict = self.mods['object_classifier'](torch.cat([sub_emb, sub_rel_emb], 1))
masked_predict = torch.tensor([-100.0]).cuda().repeat(raw_sub_predict.shape[0] * 2, raw_sub_predict.shape[1])
raw_predict = torch.cat([raw_sub_predict, raw_obj_predict], dim=0)
masked_predict[copy_mask] = raw_predict[copy_mask]
return masked_predict[:masked_predict.shape[0] // 2], masked_predict[masked_predict.shape[0] // 2:]
class Attention(torch.nn.Module):
def __init__(self, in_dim, out_dim, h_att=8):
super(Attention, self).__init__()
self.h_att = h_att
mods = dict()
for h in range(h_att):
mods['w_q_' + str(h)] = nn.Linear(in_dim, out_dim // h_att)
mods['w_k_' + str(h)] = nn.Linear(in_dim, out_dim // h_att)
mods['softmax' + str(h)] = nn.Softmax(dim=1)
self.mods = nn.ModuleDict(mods)
def forward(self, hid):
out = list()
# trick from Transformer paper: to avoid gradient vanishing.
# var_norm = math.sqrt(self.mods['w_k_0'].weight.shape[-1])
for h in range(self.h_att):
q = self.mods['w_q_' + str(h)](hid)
k = self.mods['w_k_' + str(h)](hid)
a = torch.nn.functional.leaky_relu(torch.matmul(q, torch.transpose(k, -1, -2)), negative_slope=0.1)
out.append(self.mods['softmax' + str(h)](a))
# out.append(self.mods['softmax' + str(h)](torch.matmul(q, torch.transpose(k, -1, -2)) / var_norm))
return out
class SelfAttention(torch.nn.Module):
def __init__(self, in_dim, hist_l, emb_dim, h_att=8, dropout=0):
super(SelfAttention, self).__init__()
self.emb_dim = emb_dim
self.in_dim = in_dim
self.h_att = h_att
self.hist_l = hist_l
mods = dict()
mods['attention'] = Attention(in_dim // hist_l, emb_dim * h_att, h_att=h_att)
for h in range(h_att):
mods['dropout' + str(h)] = nn.Dropout(p=dropout)
mods['w_v_' + str(h)] = nn.Linear(in_dim // hist_l, emb_dim // h_att)
self.mods = nn.ModuleDict(mods)
def forward(self, hid):
hid = hid.view(hid.shape[0], self.hist_l, -1)
att = self.mods['attention'](hid)
ans = list()
for h in range(self.h_att):
hidd = self.mods['dropout' + str(h)](hid)
v = self.mods['w_v_' + str(h)](hidd)
ans.append(torch.matmul(att[h], v))
ans = torch.cat(ans, dim=-1)
# import pdb; pdb.set_trace()
return torch.nn.functional.relu(ans).view(ans.shape[0], -1)
class Conv(torch.nn.Module):
def __init__(self, in_dim, emb_dim, dropout=0):
super(Conv, self).__init__()
self.emb_dim = emb_dim
mods = dict()
mods['conv'] = torch.nn.Conv2d(1, 1, kernel_size=3, stride=1)
mods['dropout'] = torch.nn.Dropout(dropout)
self.mods = nn.ModuleDict(mods)
def forward(self, hid):
hid = hid.view(hid.shape[0], 1, -1, self.emb_dim)
hid = self.mods['dropout'](hid)
hid = self.mods['conv'](hid)
hid = hid.view(hid.shape[0], -1)
return torch.nn.functional.relu(hid)
class RNN(nn.Module):
def __init__(self, in_dim, emb_dim, out_dim, dropout=0):
super(RNN, self).__init__()
self.emb_dim = emb_dim
mods = dict()
mods['rnn'] = torch.nn.RNN(emb_dim, hidden_size=out_dim, num_layers=2, batch_first=True, dropout=dropout)
self.mods = nn.ModuleDict(mods)
def forward(self, hid):
hid = hid.view(hid.shape[0], -1, self.emb_dim)
hid = self.mods['rnn'](hid)
return hid[0][:,-1,:]
class LSTM(nn.Module):
def __init__(self, in_dim, emb_dim, out_dim, dropout=0):
super(LSTM, self).__init__()
self.emb_dim = emb_dim
mods = dict()
mods['lstm'] = torch.nn.LSTM(emb_dim, hidden_size=out_dim, num_layers=2, batch_first=True, dropout=dropout)
self.mods = nn.ModuleDict(mods)
def forward(self, hid):
hid = hid.view(hid.shape[0], -1, self.emb_dim)
hid = self.mods['lstm'](hid)
return hid[0][:,-1,:]
class GRU(nn.Module):
def __init__(self, in_dim, emb_dim, out_dim, dropout=0):
super(GRU, self).__init__()
self.emb_dim = emb_dim
mods = dict()
mods['gru'] = torch.nn.GRU(emb_dim, hidden_size=out_dim, num_layers=2, batch_first=True, dropout=dropout)
self.mods = nn.ModuleDict(mods)
def forward(self, hid):
hid = hid.view(hid.shape[0], -1, self.emb_dim)
hid = self.mods['gru'](hid)
return hid[0][:,-1,:]
class FixStepModel(torch.nn.Module):
def __init__(self, emb_conf, gen_conf, train_conf, g, nume, numr, step, s_dist=None, o_dist=None):
super(FixStepModel, self).__init__()
# self.copy = gen_conf['copy']
self.emb_dim = emb_conf['dim']
self.gen_dim = [int(d) for d in gen_conf['dim'].split('-')]
self.gen_arch = gen_conf['arch'].split('-')
self.gen_att_h = [int(h) for h in gen_conf['att_head'].split('-')]
self.gen_l = len(self.gen_dim)
self.gen_hist = torch.tensor([int(x) for x in gen_conf['history'].split()]).cpu()
self.inf_step = step
self.train_conf = train_conf
self.norm_loss = False
if 'norm_loss' in train_conf:
if train_conf['norm_loss']:
self.norm_loss = True
self.s_dist = torch.from_numpy(s_dist).cuda()
self.o_dist = torch.from_numpy(o_dist).cuda()
mods = dict()
self.time_emb = False
if 'dim_t' in gen_conf:
self.time_emb = True
self.time_emb_vec = torch.nn.Parameter(torch.Tensor(1, gen_conf['dim_t']), requires_grad=False)
torch.nn.init.xavier_uniform_(self.time_emb_vec, gain=torch.nn.init.calculate_gain('relu'))
r_limit = None if not 'r_limit' in emb_conf else emb_conf['r_limit']
mods['emb'] = EmbModule(emb_conf['dim_e'], emb_conf['dim'], emb_conf['dim_t'], numr, nume, g, train_conf['dropout'], emb_conf['layer'], sampling=emb_conf['sample'], granularity=emb_conf['granularity'], r_limit=r_limit)
# if self.copy > 0:
mods['copy'] = Copy(self.emb_dim, gen_conf['dim_r'], nume, numr, dropout=train_conf['dropout'])
mods['subject_relation_emb'] = nn.Embedding(numr, gen_conf['dim_r'])
mods['object_relation_emb'] = nn.Embedding(numr, gen_conf['dim_r'])
in_dim = emb_conf['dim'] * self.gen_hist.shape[0]
for arch, out_dim, att_h, l in zip(self.gen_arch, self.gen_dim, self.gen_att_h, list(range(self.gen_l))):
mods['norm_' + str(l)] = nn.LayerNorm(in_dim)
if arch == 'dense':
mods['layer_' + str(l)] = Perceptron(in_dim, out_dim, dropout=train_conf['dropout'])
elif arch == 'selfatt':
mods['layer_' + str(l)] = SelfAttention(in_dim, self.gen_hist.shape[0], out_dim // self.gen_hist.shape[0], att_h, dropout=train_conf['dropout'])
elif arch == 'conv':
mods['layer_' + str(l)] = Conv(in_dim, emb_conf['dim'], dropout=train_conf['dropout'])
elif arch == 'rnn':
mods['layer_' + str(l)] = RNN(in_dim, emb_conf['dim'], out_dim, dropout=train_conf['dropout'])
elif arch == 'lstm':
mods['layer_' + str(l)] = LSTM(in_dim, emb_conf['dim'], out_dim, dropout=train_conf['dropout'])
elif arch == 'gru':
mods['layer_' + str(l)] = GRU(in_dim, emb_conf['dim'], out_dim, dropout=train_conf['dropout'])
else:
raise NotImplementedError
in_dim = out_dim
rediual_dim = sum(self.gen_dim)
dim_t = 0 if not self.time_emb else gen_conf['dim_t']
mods['object_classifier'] = Perceptron(rediual_dim + gen_conf['dim_r'] + dim_t, nume, act=False, dropout=train_conf['dropout'])
mods['subject_classifier'] = Perceptron(rediual_dim + gen_conf['dim_r'] + dim_t, nume, act=False, dropout=train_conf['dropout'])
self.mods = nn.ModuleDict(mods)
self.loss_fn = nn.CrossEntropyLoss(reduction='none')
self.copy_loss_fn = nn.CrossEntropyLoss(reduction='none')
self.optimizer = torch.optim.Adam(self.parameters(), lr=train_conf['lr'], weight_decay=train_conf['weight_decay'], amsgrad=False)
self.copy_optimizer = torch.optim.Adam(self.mods['copy'].parameters(), lr=train_conf['lr'], weight_decay=train_conf['weight_decay'], amsgrad=False)
self._deb=0
def reset_gen_parameters(self):
# reset parameters for generation network and optimizers
for l in range(self.gen_l):
for m in [self.mods['norm_' + str(l)]]:
if hasattr(m, 'reset_parameters'):
m.reset_parameters()
for | |
tbl = DialogTableWidget(
df=pd.DataFrame(columns=cols),
col_widths=col_widths,
name=name,
scroll_bar=True,
**kw)
else:
# use full TableView for filters/line highlighting etc
from smseventlog.gui import tables as tbls
if query is None:
raise RuntimeError('Must pass query obj if not using simple table')
self.query = query
tbl = tbls.TableView(parent=self, default_headers=cols, editable=editable)
self.v_layout.addWidget(tbl)
add_okay_cancel(dlg=self, layout=self.v_layout)
f.set_self(vars())
if _show:
self.load_table(df=df)
if maximized:
self.showMaximized()
def load_table(self, df):
tbl = self.tbl
tbl.display_data(df, resize_cols=self.resize_cols)
tbl.setFocus()
self.adjustSize()
def add_buttons(self, *btns, func=None):
"""Add button widgets to top of table
Parameters
----------
*btns : ff.FormFields
button widgets to add
func : callable, optional
function to trigger on value changed, default None
- NOTE could make this a dict of name : func
"""
btn_controls = QHBoxLayout()
for item in btns:
# add trigger function
if not func is None:
item.changed.connect(func)
btn_controls.addWidget(item)
btn_controls.addStretch(1) # force buttons to right side
self.v_layout.insertLayout(0, btn_controls)
f.set_self(vars())
class ACInspectionsDialog(TableDialog):
"""Show SMR history per unit"""
name = 'ACInspections'
def __init__(self, parent=None, **kw):
query = qr.ACMotorInspections(theme='dark')
df = query.get_df()
super().__init__(parent=parent, df=df, query=query, simple_table=False,
window_title='AC Motor Inspections', maximized=True, **kw)
# cols need to change when sorted
# NOTE this isn't super dry, could define this somewhere else?
self.tbl.mcols['sort_filter'] = ('Unit', 'fc_number_next')
self.load_table(df=df)
class UnitSMRDialog(TableDialog):
"""Show SMR history per unit"""
def __init__(self, parent=None, unit=None, **kw):
cols = dict(Unit=60, DateSMR=90, SMR=90)
super().__init__(parent=parent, cols=cols, simple_table=True, window_title='Unit SMR History', **kw)
df_unit = db.get_df_unit()
minesite = gbl.get_minesite()
cb_unit = ff.ComboBox()
items = f.clean_series(df_unit[df_unit.MineSite == minesite].Unit)
cb_unit.set_items(items)
d = dt.now()
de_lower = ff.DateEdit(date=d + delta(days=-60))
de_upper = ff.DateEdit(date=d)
self.add_buttons(cb_unit, de_lower, de_upper, func=self.load_table)
f.set_self(vars())
if not unit is None:
cb_unit.val = unit
self.load_table()
def load_table(self):
"""Reload table data when unit or dates change"""
query = qr.UnitSMR(
unit=self.cb_unit.val,
d_rng=(self.de_lower.val, self.de_upper.val))
df = query.get_df() \
.sort_values(by='DateSMR', ascending=False)
super().load_table(df=df)
class UnitOpenFC(TableDialog):
"""Show table widget of unit's open FCs"""
def __init__(self, unit, parent=None):
cols = {
'FC Number': 80,
'Type': 40,
'Subject': 300,
'ReleaseDate': 90,
'ExpiryDate': 90,
'Age': 60,
'Remaining': 70}
super().__init__(parent=parent, cols=cols, name='fc_table', window_title='Open FCs')
query = qr.FCOpen()
df_fc = db.get_df_fc()
df_unit = db.get_df_unit()
minesite = gbl.get_minesite()
# btn_controls = QHBoxLayout()
cb_minesite = ff.ComboBox(items=db.get_list_minesite(), default=minesite)
cb_unit = ff.ComboBox()
self.add_buttons(cb_minesite, cb_unit)
f.set_self(vars())
self.load_table(unit=unit)
self.set_units_list()
cb_unit.val = unit
cb_minesite.currentIndexChanged.connect(self.set_units_list)
cb_unit.currentIndexChanged.connect(self._load_table)
def set_units_list(self):
"""Change units list when minesite changes"""
cb_unit, cb_minesite, df = self.cb_unit, self.cb_minesite, self.df_unit
items = f.clean_series(df[df.MineSite == cb_minesite.val].Unit)
cb_unit.set_items(items)
def _load_table(self):
"""Load table from cb_unit value"""
self.load_table(unit=self.cb_unit.val)
def load_table(self, unit):
"""Reload table data when unit changes"""
tbl, df, query, layout = self.tbl, self.df_fc, self.query, self.v_layout
df = df.pipe(query.df_open_fc_unit, unit=unit)
super().load_table(df=df)
class Parts(TableDialog):
def __init__(self, unit: str, parent=None):
cols = {
'PartNo': 100,
'PartName': 400,
'Model': 80}
query = qr.Parts()
super().__init__(
parent=parent,
cols=cols,
name='parts',
window_title='Parts',
query=query,
simple_table=False)
df = db.get_df_parts()
if not unit is None:
model_base = db.get_unit_val(unit=unit, field='ModelBase')
df = df.query('ModelBase == @model_base')
# cb_part_no = ff.ComboBox(items=f.clean_series(df.PartNo))
# cb_part_name = ff.ComboBox(items=f.clean_series(df.PartName))
# cb_model = ff.ComboBox(items=f.clean_series(df.Model))
# self.add_buttons(cb_part_no, cb_part_name, cb_model)
f.set_self(vars())
self.load_table(df=df)
class DetailsView(QDialog):
def __init__(self, parent=None, df=None):
super().__init__(parent)
self.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding)
self.setWindowTitle('Details View')
self.setMinimumSize(QSize(800, 1000))
tbl = self.create_table(df=df)
v_layout = QVBoxLayout(self)
v_layout.addWidget(tbl)
add_okay_cancel(dlg=self, layout=v_layout)
f.set_self(vars())
def create_table(self, df):
tbl = QTableWidget()
# tbl.setSizeAdjustPolicy(QAbstractScrollArea.SizeAdjustPolicy.AdjustToContents)
tbl.setFixedSize(QSize(800, 1000))
tbl.setColumnWidth(0, 200)
tbl.setRowCount(df.shape[0])
tbl.setColumnCount(df.shape[1])
tbl.setHorizontalHeaderLabels(list(df.columns))
tbl.setVerticalHeaderLabels(list(df.index))
tbl.horizontalHeader().setStretchLastSection(True)
df_array = df.values
for row in range(df.shape[0]):
for col in range(df.shape[1]):
val = df_array[row, col]
val = str(val) if not val is None else ''
tbl.setItem(row, col, QTableWidgetItem(val))
tbl.resizeRowsToContents()
tbl.cellChanged.connect(self.onCellChanged)
return tbl
@pyqtSlot(int, int)
def onCellChanged(self, irow, icol):
if gbl.check_read_only():
return
df, parent = self.df, self.parent
val = self.tbl.item(irow, icol).text()
row, col = df.index[irow], df.columns[icol]
# update database
dbtable = parent.get_dbtable(header=row) # transposed table
db_row = dbt.Row(df=df, col='Value', dbtable=dbtable, title=parent.title)
if f.isnum(val):
val = float(val)
db_row.update_single(val=val, header=row)
class FailureReport(BaseDialog):
"""
Dialog to select pictures, and set cause/correction text to create pdf failure report.
"""
def __init__(
self,
parent: QWidget = None,
p_start: Path = None,
text: dict = None,
unit: str = None,
e: dbt.SQLAQuery = None):
super().__init__(parent=parent, window_title='Create Failure Report')
# self.resize(QSize(800, 1000))
self.setSizeGripEnabled(True)
v_layout = self.v_layout
self.parent = parent
text_fields = {}
if text is None:
text = {} # default text for text fields
if p_start is None:
p_start = cf.desktop
elif not p_start.exists():
self.update_statusbar(f'Couldn\'t find event images path: {p_start}', warn=True)
p_start = cf.desktop
# create file dialog to select images
file_dlg = FileDialogPreview(directory=p_start, standalone=False, parent=self)
v_layout.addWidget(file_dlg)
add_linesep(v_layout)
# word/pdf radio buttons
bg1 = QButtonGroup(self)
btn_pdf = QRadioButton('PDF', self)
btn_pdf.setChecked(True)
btn_word = QRadioButton('Word', self)
bg1.addButton(btn_pdf)
bg1.addButton(btn_word)
f.set_self(vars())
names = ['complaint', 'cause', 'correction', 'details']
self.add_textbox(names=names)
add_linesep(v_layout)
# oil samples
oil_form = QFormLayout()
oil_form.setLabelAlignment(Qt.AlignmentFlag.AlignLeft)
oil_form.setFieldGrowthPolicy(QFormLayout.FieldGrowthPolicy.AllNonFixedFieldsGrow)
# oil_form.setMax
oil_layout_comp = QHBoxLayout()
oil_box = ff.ComboBox()
oil_box.setFixedSize(QSize(300, oil_box.sizeHint().height()))
oil_cb = ff.CheckBox(checked=False)
oil_cb.stateChanged.connect(self.toggle_oil_components)
oil_box.setEnabled(False)
oil_layout_comp.addWidget(oil_cb)
oil_layout_comp.addWidget(oil_box)
# oil_layout_comp.addStretch(1)
oil_form.addRow(QLabel('Component:'), oil_layout_comp)
# oil date
oil_layout_date = QHBoxLayout()
d = dt.now() + delta(days=-365)
oil_date = ff.DateEdit(date=d, enabled=False)
oil_date_cb = ff.CheckBox(checked=False, enabled=False)
oil_date_cb.stateChanged.connect(self.toggle_oil_date)
oil_layout_date.addWidget(oil_date_cb)
oil_layout_date.addWidget(oil_date)
# oil_layout_date.addStretch(1)
oil_form.addRow(QLabel('Date Lower:'), oil_layout_date)
oil_h_layout = QHBoxLayout()
oil_h_layout.addLayout(oil_form)
oil_h_layout.addStretch(1)
v_layout.addWidget(QLabel('Oil Samples'))
v_layout.addLayout(oil_h_layout)
# v_layout.addLayout(oil_layout)
# v_layout.addLayout(oil_layout_date)
add_linesep(v_layout)
# PLM - NOTE lots of duplication here (I'm lazy)
plm_form = QFormLayout()
plm_form.setLabelAlignment(Qt.AlignmentFlag.AlignLeft)
plm_form.setFieldGrowthPolicy(QFormLayout.FieldGrowthPolicy.AllNonFixedFieldsGrow)
plm_cb = ff.CheckBox(checked=False)
plm_form.addRow(QLabel('PLM Report:'), plm_cb)
v_layout.addLayout(plm_form)
add_linesep(v_layout)
btn_layout = QHBoxLayout()
btn_layout.addWidget(QLabel('Report type:'))
btn_layout.addWidget(btn_pdf)
btn_layout.addWidget(btn_word)
btn_layout.addStretch(1)
v_layout.addLayout(btn_layout)
add_okay_cancel(dlg=self, layout=v_layout)
f.set_self(vars())
# TODO Faults?
@classmethod
def example(cls, uid: int = 163093319526):
from smseventlog import eventfolders as efl
e = dbt.Row.example(uid=uid)
ef = efl.EventFolder.example(e=e)
dlg = cls(p_start=ef.p_pics, unit=e.Unit, e=e)
dlg.exec()
return dlg
def toggle_oil_components(self, state):
"""Toggle components when oil cb checked"""
oil_box = self.oil_box
if Qt.CheckState(state) == Qt.CheckState.Checked:
oil_box.setEnabled(True)
self.oil_date_cb.setEnabled(True)
# self.oil_date.setEnabled(True)
df_comp = db.get_df_oil_components(unit=self.unit)
items = f.clean_series(df_comp.combined)
oil_box.set_items(items)
oil_box.select_all()
else:
oil_box.setEnabled(False)
self.oil_date_cb.setEnabled(False)
self.oil_date_cb.setChecked(False)
# self.oil_date.setEnabled(False)
def toggle_oil_date(self, state):
"""Toggle components when oil cb checked"""
oil_date = self.oil_date
if Qt.CheckState(state) == Qt.CheckState.Checked:
oil_date.setEnabled(True)
else:
oil_date.setEnabled(False)
def add_textbox(self, names):
def _add_textbox(name):
layout = QVBoxLayout()
layout.addWidget(QLabel(f'{name.title()}:'))
textbox = QTextEdit()
textbox.setText(self.text.get(name, ''))
setattr(self, name, textbox)
self.text_fields[name] = textbox
layout.addWidget(textbox)
self.v_layout.addLayout(layout)
if not isinstance(names, list):
names = [names]
for name in names:
_add_textbox(name)
def accept(self):
pics = self.file_dlg.selectedFiles()
word_report = self.btn_word.isChecked()
# convert dict of textbox objects to their plaintext (could also use html)
for name, textbox in self.text_fields.items():
self.text[name] = textbox.toPlainText()
# save oil sample component/modifier
oil_samples = False
if self.oil_cb.isChecked():
oil_samples = True
val = self.oil_box.val
lst = val.split(' - ')
component = lst[0]
# may or may not have modifier
if len(lst) > 1:
modifier = lst[1]
else:
modifier = None
if self.oil_date_cb.isChecked():
d_lower = self.oil_date.val
else:
d_lower = None
plm_report = False
if self.plm_cb.isChecked():
plm_report = True
# check if PLM needs to be updated first
from smseventlog.data.internal import plm
unit = self.unit
maxdate = plm.max_date_plm(unit=unit)
if (maxdate + delta(days=-5)).date() < self.e.DateAdded:
msg = f'Max date in db: {maxdate:%Y-%m-%d}. ' \
+ 'Importing haul cylce files from network drive, this may take a few minutes...'
self.update_statusbar(msg=msg)
plm.update_plm_single_unit(unit=unit, maxdate=maxdate)
f.set_self(vars())
super().accept()
class FileDialogPreview(QFileDialog):
"""
Create QFileDialog with image preview
"""
def __init__(
self,
parent: QWidget = None,
caption: str = '',
directory: Union[Path, str] = None,
filter: str = None,
standalone: bool = True,
options=QFileDialog.Option.DontUseNativeDialog,
**kw):
super().__init__(parent, caption, str(directory), filter, options=options, **kw)
box = QHBoxLayout()
if not standalone:
self.disable_buttons()
self.setFixedSize(self.width() + 400, self.height() - 100)
self.setFileMode(QFileDialog.FileMode.ExistingFiles)
self.setViewMode(QFileDialog.ViewMode.Detail)
self.setWindowFlags(self.windowFlags() & ~Qt.WindowType.Dialog) # needed to use inside other dialog
self.setSizeGripEnabled(False)
mpPreview = QLabel('Preview', self)
mpPreview.setFixedSize(400, 400)
mpPreview.setAlignment(Qt.AlignmentFlag.AlignCenter)
mpPreview.setObjectName('labelPreview')
box.addWidget(mpPreview)
box.addStretch() # not sure if necessary
# Add extra column to FileDialog's gridLayout for image preview
# row=0, column=3, rowSpan=4, colSpan=1
self.layout().addLayout(box, 0, 3, 4, 1)
self.currentChanged.connect(self.onChange)
self.fileSelected.connect(self.onFileSelected)
self.filesSelected.connect(self.onFilesSelected)
# used to change picture on hover changed, to messy, dont need
# for view in self.findChildren(QTreeView):
# if isinstance(view.data_model, QFileSystemModel):
# tree_view = view
# break
# tree_view.setMouseTracking(True)
# tree_view.entered.connect(self.onChange)
self._fileSelected = None
self._filesSelected = None
f.set_self(vars())
# close FailureReport dialog when esc pressed
if not parent is None:
self.rejected.connect(parent.close)
self.select_files()
def select_files(self, expr: str = r'^\d+$') -> None:
"""Select files where file name (excluding .ext) match regex pattern
Parameters
----------
expr : str, optional
regex expression, by default r'^\d+$' (match only digits)
"""
_p = Path(self.directory)
sel_files = [p for p in _p.iterdir() if re.match(expr, p.stem)]
# get list view
file_view = self.findChild(QListView, 'listView')
# get selection model
sel_model = file_view.selectionModel()
for | |
a numpy array.
:param inplace:
:return:
"""
obs_set = self if inplace else self.copy()
select_indices = obs_set.search_names(names)
if select_indices.size == 0:
obs_set.obs = np.asarray([])
obs_set.length_vector = np.asarray([])
obs_set.names_vector = np.asarray([])
return obs_set
return obs_set.select_observations(select_indices, inplace=True)
def axis1d(self, axis):
"""
Returns a 1-D array with the observations in the given axes, hiding the non-existent (-1) values.
:param axis: Axis to keep.
:return: 1-D array with the observations in axis.
"""
ar_axis = np.asarray(axis)
if ar_axis.ndim == 0:
ar_axis = np.asarray([ar_axis])
obs_1d = np.empty(self.length_vector.sum() * ar_axis.shape[0])
length_cumsum = np.hstack((0,np.cumsum(self.length_vector))) * ar_axis.shape[0]
for n in range(0, self.num_observations()):
start_pos = length_cumsum[n]
end_pos = length_cumsum[n+1]
obs_1d[start_pos:end_pos] = self.obs[n,ar_axis,:self.length_vector[n]].reshape(-1)
return obs_1d
def _window_limits(self):
"""
Return the frame number limit for each window. The very first windows can
have 1 more frame because of the remaining frames.
:param nWindow: Number of desired window.
:return: ndarray where result[0] is the first frame of the second window.
"""
n_frames_window = int(self.min_length() / self.n_window)
remaining = self.min_length() - n_frames_window * self.n_window
limits = np.empty((self.n_window,), dtype=np.int32)
current_limit = 0
for i in range(0, remaining):
current_limit += n_frames_window + 1
limits[i] = current_limit
for i in range(remaining, self.n_window):
current_limit += n_frames_window
limits[i] = current_limit
return limits
def _update_window_limits(self):
"""
Updates the window limits when there are some changes in the length of observations.
:return: Updates window limits.
"""
self.window_limits = self._window_limits()
self.window_start = np.empty(self.window_limits.shape, self.window_limits.dtype)
self.window_start[0] = 0
self.window_start[1:] = self.window_limits[:-1]
def _invalidate_window(self):
"""
Invalidates the execution of temporal windows because of some change in the ObservationSet incompatible with
the temporal window model.
:return:
"""
self.n_window = -1
self.window_limits = None
self.window_start = None
def get_window_observation(self, index, inplace=False):
"""
Gets an ObservationSet with the observations in the index-th temporal window.
:param index: Index of temporal window.
:param inplace: Make inplace changes.
:return: Cropped ObservationSet.
"""
cropped_window = self._crop_between_indices(self.window_start[index], self.window_limits[index], inplace)
cropped_window.n_window = 1
cropped_window._update_window_limits()
return cropped_window
def get_selection(self, min_col, max_col, min_row, max_row):
"""
Gets the indices of the workpieces and its temporal moments where the position is between min_col and max_col for
the columns axis and between min_row and max_row for the rows axis.
:param min_col: Minimum value (inclusive) for the columns axis.
:param max_col: Maximum value (exclusive) for the columns axis.
:param min_row: Minimum value (inclusive) for the rows axis.
:param max_row: Maximum value (exclusive) for the rows axis.
:return: Indices of the workpieces,
Indices of the temporal moments.
"""
axis_col_boolean = np.logical_and(self.obs[:,0,:] >= min_col, self.obs[:, 0, :] < max_col)
axis_row_boolean = np.logical_and(self.obs[:,1,:] >= min_row, self.obs[:, 1, :] < max_row)
index_workpiece, index_frame = np.where(np.logical_and(axis_col_boolean, axis_row_boolean))
return index_workpiece, index_frame
def get_valid_selection(self, min_col, max_col, min_row, max_row):
"""
Gets the indices of the observations in the bounding box defined by [min_col, max_col) and [min_row, max_row). It only
returns a valid selection, so observations in the bounds of the ObservationSet are not considered.
:param min_col: Minimum value (inclusive) for the columns axis.
:param max_col: Maximum value (exclusive) for the columns axis.
:param min_row: Minimum value (inclusive) for the rows axis.
:param max_row: Maximum value (exclusive) for the rows axis.
:return: Indices of the workpieces,
Indices of the temporal moments.
"""
index_workpiece_origin, index_frame_origin = self.get_selection(min_col, max_col, min_row, max_row)
index_frame_destination = index_frame_origin + 1
length_workpiece = self.length_vector[index_workpiece_origin]
wrong_index = np.where(index_frame_destination >= length_workpiece)[0]
if wrong_index.size:
index_workpiece_origin = np.delete(index_workpiece_origin, wrong_index)
index_frame_origin = np.delete(index_frame_origin, wrong_index)
return index_workpiece_origin, index_frame_origin
def get_valid_window_array(self):
"""
Gets the observations in a window in a 2-D array [2, N]. This method collapses all the observations in a single
array, omitting all the observations out of the window.
:return: 2-D array containing the observations for the current single-window ObservationSet
"""
if self.n_window > 1:
raise ValueError(
"The ObservationSet contains more than 1 window. Select a window before calling get_valid_window_array()")
return self.obs[:,:,self.window_start[0]:self.window_limits[0]].swapaxes(0,1).reshape(2,-1)
def space_bounds(self):
"""
Returns the space bounds for this ObservationSet. The first argument returned contains the columns axis [min max] value.
Same format is applied for the rows axis in the second argument returned.
:return: [min_col, max_col], [min_row, max_row] values.
"""
min_col = np.inf
max_col = -np.inf
min_row = np.inf
max_row = -np.inf
for n in range(0,self.num_observations()):
len_obs = self.length_vector[n]
if self[n,0,:len_obs].min() < min_col:
min_col = self[n,0,:len_obs].min()
if self[n,0,:len_obs].max() > max_col:
max_col = self[n,0,:len_obs].max()
if self[n, 1, :len_obs].min() < min_row:
min_row = self[n, 1, :len_obs].min()
if self[n, 1, :len_obs].max() > max_row:
max_row = self[n, 1, :len_obs].max()
return np.asarray([min_col, max_col]), \
np.asarray([min_row, max_row])
def num_observations(self):
return self.obs.shape[0]
def num_windows(self):
"""
Returns the number of defined temporal windows.
:return: Number of temporal windows.
"""
return self.n_window
def copy(self):
return ObservationSet(self.obs.copy(),
self.length_vector.copy(),
self.names_vector.copy(),
n_window=self.n_window)
class ObservationROISet(ObservationSet):
"""
This class implements an ObservationSet with a ROI (region of interest applied). It implements the following
additional attributes:
- ini_obstacle[1/2]: 1-D arrays with the initial frame for the [first/second] obstacle. If there is no such obstacle, nan values
are used.
- end_obstacle[1/2]: 1-D arrays with the last frame for the [first/second] obstacle. If there is no such obstacle, nan values
are used.
- has_obstacle[1/2]: A boolean indicating if the ObservationROISet has the [first/second] obstacle.
"""
def __init__(self, observations, length_vector, names_vector, ini_obstacle1, end_obstacle1, ini_obstacle2, end_obstacle2):
super(ObservationROISet, self).__init__(observations, length_vector, names_vector, n_window=0)
self.ini_obstacle1 = ini_obstacle1
self.end_obstacle1 = end_obstacle1
self.ini_obstacle2 = ini_obstacle2
self.end_obstacle2 = end_obstacle2
self.has_obstacle1 = False
self.has_obstacle2 = False
self.n_window = 1
if not np.isnan(self.ini_obstacle1).sum() and not np.isnan(self.end_obstacle1).sum():
self.has_obstacle1 = True
self.n_window += 2
if not np.isnan(self.ini_obstacle2).sum() and not np.isnan(self.end_obstacle2).sum():
self.has_obstacle2 = True
self.n_window += 2
self.valid_start = np.full((self.num_observations(),), 0)
self.valid_end = self.length_vector
self._update_window_limits()
@classmethod
def fromfolder(self, data_folder):
"""
Loads the data from a folder name. The data is a collection of npz files with the differentation videos and a
metadata.pkl file with the metadata. From the differentiation videos, the laser spot positions are computed
and an ObservationROISet is initialized.
:param data_folder: Name of the folder containing the data.
:return: ObservationROISet of the data.
"""
with open(os.path.join(data_folder, 'metadata.pkl'), 'rb') as f:
metadata = pickle.load(f)
max_lines = max([metadata[file]['SizeInFrames'] for file in metadata.keys()])
n_files = len(metadata)
observations = np.full((n_files, 2, max_lines), -1, dtype=np.float)
length_vector = np.empty((n_files,), dtype=np.int)
names_vector = np.empty((n_files,), dtype='object')
ini_obstacle1 = np.full((n_files,), np.nan)
end_obstacle1 = np.full((n_files,), np.nan)
ini_obstacle2 = np.full((n_files,), np.nan)
end_obstacle2 = np.full((n_files,), np.nan)
# Reads each file.
for n, file_key in enumerate(metadata.keys()):
file_info = metadata[file_key]
diff_video = np.load(data_folder + "/" + file_key + ".npz")['image']
x, y = misc.weighted_gravity_centers(diff_video, threshold=0)
x = misc.fix_nan(x)
y = misc.fix_nan(y)
observations[n, 0, 0:x.shape[0]] = x
observations[n, 1, 0:y.shape[0]] = y
length_vector[n] = file_info['SizeInFrames']
names_vector[n] = os.path.basename(file_key)
if file_info['KeyfIniObstacle1'] != -1 and file_info['KeyfEndObstacle1'] != -1:
ini_obstacle1[n] = file_info['KeyfIniObstacle1']
end_obstacle1[n] = file_info['KeyfEndObstacle1']
if file_info['KeyfIniObstacle2'] != -1 and file_info['KeyfEndObstacle2'] != -1:
ini_obstacle2[n] = file_info['KeyfIniObstacle2']
end_obstacle2[n] = file_info['KeyfEndObstacle2']
ObservationROISet._check_nan_obstacles(ini_obstacle1)
ObservationROISet._check_nan_obstacles(end_obstacle1)
ObservationROISet._check_nan_obstacles(ini_obstacle2)
ObservationROISet._check_nan_obstacles(end_obstacle2)
return ObservationROISet(observations, length_vector, names_vector, ini_obstacle1, end_obstacle1, ini_obstacle2, end_obstacle2)
@classmethod
def _check_nan_obstacles(cls, obstacle_frames):
"""
Checks that the obstacle frames array have nan values on every position of the array or a concrete value for the
obstacle frame. A mix of concrete values and nan values are not allowed.
:param obstacle_frames: 1-D array containing information about the start and end of the obstacle.
:return: A warning is returned if an error is found.
"""
sum_nans = np.isnan(obstacle_frames).sum()
if sum_nans > 0 and sum_nans < sum_nans.size:
warnings.warn("Some obstacle frames has nan values while other obstacle frames have a correct value.")
sys.exit("An error ocurred while processing obstacle info.")
def crop_to_min_index(self, inplace=True):
"""
Crops the observations to the length of the shortest observation. Destructive operation.
:param inplace:
:return:
"""
obs_set = super(ObservationROISet, self).crop_to_min_index(inplace)
obs_set.valid_end = obs_set.length_vector
return obs_set
def crop_between_indices(self, low, high, inplace=False):
"""
Crops the length (time) of the observations between the index low (inclusive) and between (exclusive)
:param low: Lower bound of the cropped range.
:param high: Upper bound of the cropped range.
:param inplace:
:return:
"""
obs_set = super(ObservationROISet, self).crop_between_indices(low, high, inplace)
obs_set.ini_obstacle1 = ObservationROISet._update_obstacle_limits(low, high, obs_set.ini_obstacle1)
obs_set.end_obstacle1 = ObservationROISet._update_obstacle_limits(low, high, obs_set.end_obstacle1)
obs_set.ini_obstacle2 = ObservationROISet._update_obstacle_limits(low, high, obs_set.ini_obstacle2)
obs_set.end_obstacle2 = ObservationROISet._update_obstacle_limits(low, high, obs_set.end_obstacle2)
obs_set.valid_start[:] = 0
obs_set.valid_end = obs_set.length_vector
return obs_set
@classmethod
def | |
0:
#Download file completed successfully
self.playlist_src.remove(0)
#self.playlist_src.save(RootDir + downloads_queue)
self.playlist_src.save(datapaths + downloads_queue)
counter += 1
elif self.state == -1:
#Downlaod failed
dialog = xbmcgui.Dialog()
if dialog.yesno("Error",str(self.playlist_src.list[0].name),"Download failed. Retry?") == False:
self.playlist_src.remove(0)
#self.playlist_src.save(RootDir + downloads_queue)
self.playlist_src.save(datapaths + downloads_queue)
counter += 1
#Display the updated Queue playlist
if (self.MainWindow.pl_focus == self.MainWindow.downloadqueue) or \
(self.MainWindow.pl_focus == self.MainWindow.incompletelist) or \
(self.MainWindow.pl_focus == self.MainWindow.downloadslist):
self.MainWindow.ParsePlaylist(reload=False) #display download list
if (self.shutdown == True) and (self.killed == False) and (self.running == True):
self.MainWindow.onSaveSettings()
self.MainWindow.delFiles(cacheDir) #clear the cache first
self.MainWindow.bkgndloadertask.kill()
self.MainWindow.bkgndloadertask.join(10) #timeout after 10 seconds
xbmc.shutdown() #shutdown XBMC
self.running = False #disable downloading
self.MainWindow.dlinfotekst.setVisible(0)
self.MainWindow.download_logo.setVisible(0)
######################################################################
# Description: Downloads a URL to local disk
# Parameters : entry = mediaitem to download
# header = header to display (1 of x)
# Return : -
######################################################################
def download_file(self, entry, header=""):
self.state = 0 #success
URL = entry.URL
localfile = entry.DLloc
#download of FTP file is handled in a separte function
if URL[:3] == 'ftp':
self.download_fileFTP(entry, header)
return
if URL[:4] != 'http':
self.state = -1 #URL does not point to internet file.
return
#Continue with HTTP download
self.MainWindow.dlinfotekst.setLabel('(' + header + ')' + " Retrieving file info...")
# set custom headers if specified
URL, headers=parse_headers(URL, entry)
try:
cookies = ''
if URL.find(nxserver_URL) != -1:
cookies='platform='+platform+'; version='+Version+'.'+SubVersion
cookies=cookies+'; nxid='+nxserver.user_id
headers={'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.4) Gecko/2008102920 Firefox/3.0.4','Cookie':cookies}
#headers={'User-Agent':'Mozilla/4.0 (compatible;MSIE 7.0;Windows NT 6.0)','Cookie':cookies}
except Exception,e:
print('ERROR line 397 cookies ' +str(e))
#print 'headers = ' + str(headers)
#Get the direct URL to the mediaitem given URL
urlopener = CURLLoader()
self.processed=urlopener.processed #### needed or will fault the next line at times
entry.processed=self.processed
try:
result = urlopener.urlopen(URL, entry)
if result["code"] != 0:
self.state = -1 #failed to open the file
print("urlopener.urlopen failed line 408 " + str(result))
line2 = '%s %s' % ('failed to open', str(entry.name))
self.MainWindow.dlinfotekst.setLabel(line2)
return
except Exception,e:
self.state = -1 #failed to open the file
print("urlopener.urlopen failed line 414 " + str(e))
line2 = '%s %s' % ('failed to open', str(entry.name))
self.MainWindow.dlinfotekst.setLabel(line2)
return
URL = urlopener.loc_url
# oldtimeout=socket_getdefaulttimeout()
# socket_setdefaulttimeout(url_open_timeout)
existSize=0 #existing size = 0 Bytes
if os.path.exists(localfile):
existSize = os.path.getsize(localfile)
#Message("Exist size: " + str(existSize))
#If the file exists, then only download the remainder
NoRangeEntry = headers
for RangeEntry in 'Ranges','Range','':
if RangeEntry != '':
try: #### test for range support
headers[RangeEntry] = 'bytes=%s-' % existSize
req = urllib2.Request(URL, None, headers)
f = urllib2.urlopen(req)
break
except: pass #Expected error: HTTP Error 416: Requested Range Not Satisfiable'
else: #### if ranges are not supported
try:
req = urllib2.Request(URL, None, NoRangeEntry)
f = urllib2.urlopen(req)
except Exception as e:
self.state = -1; print('ERROR URL= ' + str(URL)); print('failed to open the URL file line 444 '+ str(e))
line2 = '%s %s' % ('failed to open', str(entry.name))
self.MainWindow.dlinfotekst.setLabel(line2); return
else: # if the file does not exist
#print('URL = ' +str(URL)); print('headers = ' + str(headers))
try:
req = urllib2.Request(URL, None, headers)
f = urllib2.urlopen(req)
except Exception as e:
self.state = -1; print ('failed to open the URL file line 454', str(e))
line2 = '%s %s' % ('failed to download', str(entry.name))
self.MainWindow.dlinfotekst.setLabel(line2); return
try: size_string,size_raw = self.file_size(0,req) #### gets size of remote URL file or sets size_string = Unknown and size_raw = 0
except Exception as e:
self.state = -1; print ('failed to open the URL file line460', str(e))
line2 = '%s %s' % ('failed to download', str(entry.name))
self.MainWindow.dlinfotekst.setLabel(line2); return
#If the file exists, but we already have the whole thing, don't download again
size = size_raw #The remaining bytes
file = open(localfile,'ab+') #### opens and/or creates the destination file
#Message("Remaining: " + str(size))
if ((size > 0) and (size != existSize)) or size == 0:
bytes = existSize #bytes downloaded already
size = int(size) + int(existSize) #total size
#Message("Total: " + str(size))
total_chunks = 0
#DL-speed calculation
starttime=time.time()
startSize = bytes
deltatime = 0
deltasize = 0
dlspeed = 0
self.add_list(entry,'incdl') #### add to incomplete downloads, removing existing duplicate entries
try:
self.MainWindow.dlinfotekst.setLabel('(' + header + ')' + " Downloading file...")
#download in chunks of 100kBytes
while ((bytes < size) or (size == 0) or (size_string == 'Unknown')) and (self.killed == False) and (self.running == True):
chunk = 100 * 1024 #100kBytes chunks
total_chunks += chunk #### total chunks read
if ((bytes + chunk) > size and size!=0) and (size_string != 'Unknown'):
chunk = size-bytes #remainder
data = f.read(chunk)
#### if total_chunks <= whats already downloaded dont write it for unknown file size (append issue)
if data !='' and (size_string == 'Unknown') and (total_chunks > os.path.getsize(localfile)): file.write(data)
elif data !='' and (size_string != 'Unknown'): file.write(data) #### write statement for files of known size
bytes = bytes + chunk
if size == 0 or size_string == 'Unknown' : percent = 'Unknown %'
else: percent = str(100 * bytes / size) + '%'
size_string,r_size = self.file_size(size,req)
done,r_size = self.file_size(bytes,'')
deltatime = time.time() - starttime
if deltatime >=5: #update every 5 seconds
#calculate the download speed
deltasize = bytes - startSize
dlspeed = (deltasize / 1024) / deltatime
starttime = time.time()
startSize = bytes
line2 = '(%s) %s of %s - %s - %dkB/s' % (header, done, size_string, percent, dlspeed)
self.MainWindow.dlinfotekst.setLabel(line2)
if (size >= 0 or size_string == 'Unknown') and data == '': break
f.close() #close the URL
except Exception as e:
self.state = -1; print ('failed to download the file CDLline 517', str(e))
line2 = '%s %s' % ('failed to download', str(entry.name))
self.MainWindow.dlinfotekst.setLabel(line2)
if (self.killed == True) or (self.running == False):
self.state = -1 #failed to download the file
file.close() #close the destination file
# socket_setdefaulttimeout(oldtimeout)
#add the downloaded file to the download list
if self.state == 0:
self.add_list(entry,'cmpdl')
#### remove from Incomplete Downloads
#pos = 0; incdl = RootDir + incomplete_downloads
pos = 0; incdl = datapaths + incomplete_downloads
for line in open(incdl,'r'):
if line == '#\n' : pos+=1
elif entry.DLloc in line: self.playlist_inc.remove(pos-1)
self.playlist_inc.save(incdl)
#end of function
######################################################################
# Description: Downloads a FTP URL to local disk
# Parameters : entry = mediaitem to download
# shutdown = true is shutdown after download
# header = header to display (1 of x)
# Return : -
######################################################################
def download_fileFTP(self, entry, header=""):
self.state = 0 #success
URL = entry.URL
localfile = entry.DLloc
self.header = header
self.MainWindow.dlinfotekst.setLabel('(' + header + ')')
#@todo: move URLparse to another function.
########################
#Parse URL according RFC 1738: ftp://user:password@host:port/path
#There is no standard Python funcion to split these URL's.
username=''
password=''
port=21
#check for username, password
index = URL.find('@')
if index != -1:
index2 = URL.find(':',6,index)
if index2 != -1:
username = URL[6:index2]
print ('user: ' + username)
password = URL[index2+1:index]
print ('password: ' + password)
URL = URL[index+1:]
else:
URL = URL[6:]
#check for host
index = URL.find('/')
if index != -1:
host = URL[:index]
path = URL[index:]
else:
host = URL
path = ''
#retrieve the port
index = host.find(':')
if index != -1:
port = int(host[index+1:])
host = host[:index]
print ('host: ' + host)
print ('port: ' + str(port))
#split path and file
index = path.rfind('/')
if index != -1:
file = path[index+1:]
path = path[:index]
else:
file = ''
print ('path: ' + path)
print ('file: ' + file)
########################
try:
self.f = ftplib.FTP()
self.f.connect(host,port)
except (socket.error, socket.gaierror) as e:
print ('ERROR: cannot reach "%s"' % host)
self.state = -1 #failed to download the file
return
print ('*** Connected to host "%s"' % host)
try:
if username != '':
self.f.login(username, password)
else:
self.f.login()
except ftplib.error_perm:
print ('ERROR: cannot login anonymously')
self.f.quit()
self.state = -1 #failed to download the file
return
print ('*** Logged in as "anonymous"')
try:
self.f.cwd(path)
except ftplib.error_perm:
print ('ERROR: cannot CD to "%s"' | |
2579, 2580, 2586, 2585)
model.createElement(2640, 4700, 4701, 4707, 4706, 2580, 2581, 2587, 2586)
model.createElement(2641, 4701, 4702, 4708, 4707, 2581, 2582, 2588, 2587)
model.createElement(2642, 4702, 2927, 2928, 4708, 2582, 563, 564, 2588)
model.createElement(2643, 2233, 4703, 4709, 2232, 428, 2583, 2589, 429)
model.createElement(2644, 4703, 4704, 4710, 4709, 2583, 2584, 2590, 2589)
model.createElement(2645, 4704, 4705, 4711, 4710, 2584, 2585, 2591, 2590)
model.createElement(2646, 4705, 4706, 4712, 4711, 2585, 2586, 2592, 2591)
model.createElement(2647, 4706, 4707, 4713, 4712, 2586, 2587, 2593, 2592)
model.createElement(2648, 4707, 4708, 4714, 4713, 2587, 2588, 2594, 2593)
model.createElement(2649, 4708, 2928, 2929, 4714, 2588, 564, 565, 2594)
model.createElement(2650, 2232, 4709, 4715, 2231, 429, 2589, 2595, 430)
model.createElement(2651, 4709, 4710, 4716, 4715, 2589, 2590, 2596, 2595)
model.createElement(2652, 4710, 4711, 4717, 4716, 2590, 2591, 2597, 2596)
model.createElement(2653, 4711, 4712, 4718, 4717, 2591, 2592, 2598, 2597)
model.createElement(2654, 4712, 4713, 4719, 4718, 2592, 2593, 2599, 2598)
model.createElement(2655, 4713, 4714, 4720, 4719, 2593, 2594, 2600, 2599)
model.createElement(2656, 4714, 2929, 2930, 4720, 2594, 565, 566, 2600)
model.createElement(2657, 2231, 4715, 4721, 2230, 430, 2595, 2601, 431)
model.createElement(2658, 4715, 4716, 4722, 4721, 2595, 2596, 2602, 2601)
model.createElement(2659, 4716, 4717, 4723, 4722, 2596, 2597, 2603, 2602)
model.createElement(2660, 4717, 4718, 4724, 4723, 2597, 2598, 2604, 2603)
model.createElement(2661, 4718, 4719, 4725, 4724, 2598, 2599, 2605, 2604)
model.createElement(2662, 4719, 4720, 4726, 4725, 2599, 2600, 2606, 2605)
model.createElement(2663, 4720, 2930, 2931, 4726, 2600, 566, 567, 2606)
model.createElement(2664, 2230, 4721, 4727, 2229, 431, 2601, 2607, 432)
model.createElement(2665, 4721, 4722, 4728, 4727, 2601, 2602, 2608, 2607)
model.createElement(2666, 4722, 4723, 4729, 4728, 2602, 2603, 2609, 2608)
model.createElement(2667, 4723, 4724, 4730, 4729, 2603, 2604, 2610, 2609)
model.createElement(2668, 4724, 4725, 4731, 4730, 2604, 2605, 2611, 2610)
model.createElement(2669, 4725, 4726, 4732, 4731, 2605, 2606, 2612, 2611)
model.createElement(2670, 4726, 2931, 2932, 4732, 2606, 567, 568, 2612)
model.createElement(2671, 2229, 4727, 4733, 2228, 432, 2607, 2613, 433)
model.createElement(2672, 4727, 4728, 4734, 4733, 2607, 2608, 2614, 2613)
model.createElement(2673, 4728, 4729, 4735, 4734, 2608, 2609, 2615, 2614)
model.createElement(2674, 4729, 4730, 4736, 4735, 2609, 2610, 2616, 2615)
model.createElement(2675, 4730, 4731, 4737, 4736, 2610, 2611, 2617, 2616)
model.createElement(2676, 4731, 4732, 4738, 4737, 2611, 2612, 2618, 2617)
model.createElement(2677, 4732, 2932, 2933, 4738, 2612, 568, 569, 2618)
model.createElement(2678, 2228, 4733, 4739, 2227, 433, 2613, 2619, 434)
model.createElement(2679, 4733, 4734, 4740, 4739, 2613, 2614, 2620, 2619)
model.createElement(2680, 4734, 4735, 4741, 4740, 2614, 2615, 2621, 2620)
model.createElement(2681, 4735, 4736, 4742, 4741, 2615, 2616, 2622, 2621)
model.createElement(2682, 4736, 4737, 4743, 4742, 2616, 2617, 2623, 2622)
model.createElement(2683, 4737, 4738, 4744, 4743, 2617, 2618, 2624, 2623)
model.createElement(2684, 4738, 2933, 2934, 4744, 2618, 569, 570, 2624)
model.createElement(2685, 2227, 4739, 4745, 2226, 434, 2619, 2625, 435)
model.createElement(2686, 4739, 4740, 4746, 4745, 2619, 2620, 2626, 2625)
model.createElement(2687, 4740, 4741, 4747, 4746, 2620, 2621, 2627, 2626)
model.createElement(2688, 4741, 4742, 4748, 4747, 2621, 2622, 2628, 2627)
model.createElement(2689, 4742, 4743, 4749, 4748, 2622, 2623, 2629, 2628)
model.createElement(2690, 4743, 4744, 4750, 4749, 2623, 2624, 2630, 2629)
model.createElement(2691, 4744, 2934, 2935, 4750, 2624, 570, 571, 2630)
model.createElement(2692, 2226, 4745, 4751, 2225, 435, 2625, 2631, 436)
model.createElement(2693, 4745, 4746, 4752, 4751, 2625, 2626, 2632, 2631)
model.createElement(2694, 4746, 4747, 4753, 4752, 2626, 2627, 2633, 2632)
model.createElement(2695, 4747, 4748, 4754, 4753, 2627, 2628, 2634, 2633)
model.createElement(2696, 4748, 4749, 4755, 4754, 2628, 2629, 2635, 2634)
model.createElement(2697, 4749, 4750, 4756, 4755, 2629, 2630, 2636, 2635)
model.createElement(2698, 4750, 2935, 2936, 4756, 2630, 571, 572, 2636)
model.createElement(2699, 2225, 4751, 4757, 2224, 436, 2631, 2637, 437)
model.createElement(2700, 4751, 4752, 4758, 4757, 2631, 2632, 2638, 2637)
model.createElement(2701, 4752, 4753, 4759, 4758, 2632, 2633, 2639, 2638)
model.createElement(2702, 4753, 4754, 4760, 4759, 2633, 2634, 2640, 2639)
model.createElement(2703, 4754, 4755, 4761, 4760, 2634, 2635, 2641, 2640)
model.createElement(2704, 4755, 4756, 4762, 4761, 2635, 2636, 2642, 2641)
model.createElement(2705, 4756, 2936, 2937, 4762, 2636, 572, 573, 2642)
model.createElement(2706, 2224, 4757, 4763, 2223, 437, 2637, 2643, 438)
model.createElement(2707, 4757, 4758, 4764, 4763, 2637, 2638, 2644, 2643)
model.createElement(2708, 4758, 4759, 4765, 4764, 2638, 2639, 2645, 2644)
model.createElement(2709, 4759, 4760, 4766, 4765, 2639, 2640, 2646, 2645)
model.createElement(2710, 4760, 4761, 4767, 4766, 2640, 2641, 2647, 2646)
model.createElement(2711, 4761, 4762, 4768, 4767, 2641, 2642, 2648, 2647)
model.createElement(2712, 4762, 2937, 2938, 4768, 2642, 573, 574, 2648)
model.createElement(2713, 2223, 4763, 4769, 2222, 438, 2643, 2649, 439)
model.createElement(2714, 4763, 4764, 4770, 4769, 2643, 2644, 2650, 2649)
model.createElement(2715, 4764, 4765, 4771, 4770, 2644, 2645, 2651, 2650)
model.createElement(2716, 4765, 4766, 4772, 4771, 2645, 2646, 2652, 2651)
model.createElement(2717, 4766, 4767, 4773, 4772, 2646, 2647, 2653, 2652)
model.createElement(2718, 4767, 4768, 4774, 4773, 2647, 2648, 2654, 2653)
model.createElement(2719, 4768, 2938, 2939, 4774, 2648, 574, 575, 2654)
model.createElement(2720, 2222, 4769, 4775, 2221, 439, 2649, 2655, 440)
model.createElement(2721, 4769, 4770, 4776, 4775, 2649, 2650, 2656, 2655)
model.createElement(2722, 4770, 4771, 4777, 4776, 2650, 2651, 2657, 2656)
model.createElement(2723, 4771, 4772, 4778, 4777, 2651, 2652, 2658, 2657)
model.createElement(2724, 4772, 4773, 4779, 4778, 2652, 2653, 2659, 2658)
model.createElement(2725, 4773, 4774, 4780, 4779, 2653, 2654, 2660, 2659)
model.createElement(2726, 4774, 2939, 2940, 4780, 2654, 575, 576, 2660)
model.createElement(2727, 2221, 4775, 1094, 37, 440, 2655, 220, 7)
model.createElement(2728, 4775, 4776, 1095, 1094, 2655, 2656, 219, 220)
model.createElement(2729, 4776, 4777, 1096, 1095, 2656, 2657, 218, 219)
model.createElement(2730, 4777, 4778, 1097, 1096, 2657, 2658, 217, 218)
model.createElement(2731, 4778, 4779, 1098, 1097, 2658, 2659, 216, 217)
model.createElement(2732, 4779, 4780, 1099, 1098, 2659, 2660, 215, 216)
model.createElement(2733, 4780, 2940, 225, 1099, 2660, 576, 11, 215)
model.createElement(2734, 524, 3107, 4781, 2300, 525, 3101, 4541, 2260)
model.createElement(2735, 3107, 3108, 4782, 4781, 3101, 3102, 4542, 4541)
model.createElement(2736, 3108, 3109, 4783, 4782, 3102, 3103, 4543, 4542)
model.createElement(2737, 3109, 3110, 4784, 4783, 3103, 3104, 4544, 4543)
model.createElement(2738, 3110, 3111, 4785, 4784, 3104, 3105, 4545, 4544)
model.createElement(2739, 3111, 3112, 4786, 4785, 3105, 3106, 4546, 4545)
model.createElement(2740, 3112, 624, 2941, 4786, 3106, 623, 2901, 4546)
model.createElement(2741, 2300, 4781, 4787, 2299, 2260, 4541, 4547, 2259)
model.createElement(2742, 4781, 4782, 4788, 4787, 4541, 4542, 4548, 4547)
model.createElement(2743, 4782, 4783, 4789, 4788, 4542, 4543, 4549, 4548)
model.createElement(2744, 4783, 4784, 4790, 4789, 4543, 4544, 4550, 4549)
model.createElement(2745, 4784, 4785, 4791, 4790, 4544, 4545, 4551, 4550)
model.createElement(2746, 4785, 4786, 4792, 4791, 4545, 4546, 4552, 4551)
model.createElement(2747, 4786, 2941, 2942, 4792, 4546, 2901, 2902, 4552)
model.createElement(2748, 2299, 4787, 4793, 2298, 2259, 4547, 4553, 2258)
model.createElement(2749, 4787, 4788, 4794, 4793, 4547, 4548, 4554, 4553)
model.createElement(2750, 4788, 4789, 4795, 4794, 4548, 4549, 4555, 4554)
model.createElement(2751, 4789, 4790, 4796, 4795, 4549, 4550, 4556, 4555)
model.createElement(2752, 4790, 4791, 4797, 4796, 4550, 4551, 4557, 4556)
model.createElement(2753, 4791, 4792, 4798, 4797, 4551, 4552, 4558, 4557)
model.createElement(2754, 4792, 2942, 2943, 4798, 4552, 2902, 2903, 4558)
model.createElement(2755, 2298, 4793, 4799, 2297, 2258, 4553, 4559, 2257)
model.createElement(2756, 4793, 4794, 4800, 4799, 4553, 4554, 4560, 4559)
model.createElement(2757, 4794, 4795, 4801, 4800, 4554, 4555, 4561, 4560)
model.createElement(2758, 4795, 4796, 4802, 4801, 4555, 4556, 4562, 4561)
model.createElement(2759, 4796, 4797, 4803, 4802, 4556, 4557, 4563, 4562)
model.createElement(2760, 4797, 4798, 4804, 4803, 4557, 4558, 4564, 4563)
model.createElement(2761, 4798, 2943, 2944, 4804, 4558, 2903, 2904, 4564)
model.createElement(2762, 2297, 4799, 4805, 2296, 2257, 4559, 4565, 2256)
model.createElement(2763, 4799, 4800, 4806, 4805, 4559, 4560, 4566, 4565)
model.createElement(2764, 4800, 4801, 4807, 4806, 4560, 4561, 4567, 4566)
model.createElement(2765, 4801, 4802, 4808, 4807, 4561, 4562, 4568, 4567)
model.createElement(2766, 4802, 4803, 4809, 4808, 4562, 4563, 4569, 4568)
model.createElement(2767, 4803, 4804, 4810, 4809, 4563, 4564, 4570, 4569)
model.createElement(2768, 4804, 2944, 2945, 4810, 4564, 2904, 2905, 4570)
model.createElement(2769, 2296, 4805, 4811, 2295, 2256, 4565, 4571, 2255)
model.createElement(2770, 4805, 4806, 4812, 4811, 4565, 4566, 4572, 4571)
model.createElement(2771, 4806, 4807, 4813, 4812, 4566, 4567, 4573, 4572)
model.createElement(2772, 4807, 4808, 4814, 4813, 4567, 4568, 4574, 4573)
model.createElement(2773, 4808, 4809, 4815, 4814, 4568, 4569, 4575, 4574)
model.createElement(2774, 4809, 4810, 4816, 4815, 4569, 4570, 4576, 4575)
model.createElement(2775, 4810, 2945, 2946, 4816, 4570, 2905, 2906, 4576)
model.createElement(2776, 2295, 4811, 4817, 2294, 2255, 4571, 4577, 2254)
model.createElement(2777, 4811, 4812, 4818, 4817, 4571, 4572, 4578, 4577)
model.createElement(2778, 4812, 4813, 4819, 4818, 4572, 4573, 4579, 4578)
model.createElement(2779, 4813, 4814, 4820, 4819, 4573, 4574, 4580, 4579)
model.createElement(2780, 4814, 4815, 4821, 4820, 4574, 4575, 4581, 4580)
model.createElement(2781, 4815, 4816, 4822, 4821, 4575, 4576, 4582, 4581)
model.createElement(2782, 4816, 2946, 2947, 4822, 4576, 2906, 2907, 4582)
model.createElement(2783, 2294, 4817, 4823, 2293, 2254, 4577, 4583, 2253)
model.createElement(2784, 4817, 4818, 4824, 4823, 4577, 4578, 4584, 4583)
model.createElement(2785, 4818, 4819, 4825, 4824, 4578, 4579, 4585, 4584)
model.createElement(2786, 4819, 4820, 4826, 4825, 4579, 4580, 4586, 4585)
model.createElement(2787, 4820, 4821, 4827, 4826, 4580, 4581, 4587, 4586)
model.createElement(2788, 4821, 4822, 4828, 4827, 4581, 4582, 4588, 4587)
model.createElement(2789, 4822, 2947, 2948, 4828, 4582, 2907, 2908, 4588)
model.createElement(2790, 2293, 4823, 4829, 2292, 2253, 4583, 4589, 2252)
model.createElement(2791, 4823, 4824, 4830, 4829, 4583, 4584, 4590, 4589)
model.createElement(2792, 4824, 4825, 4831, 4830, 4584, 4585, 4591, 4590)
model.createElement(2793, 4825, 4826, 4832, 4831, 4585, 4586, 4592, 4591)
model.createElement(2794, 4826, 4827, 4833, 4832, 4586, 4587, 4593, 4592)
model.createElement(2795, 4827, 4828, 4834, 4833, 4587, 4588, 4594, 4593)
model.createElement(2796, 4828, 2948, 2949, 4834, 4588, 2908, 2909, 4594)
model.createElement(2797, 2292, 4829, 4835, 2291, 2252, 4589, 4595, 2251)
model.createElement(2798, 4829, 4830, 4836, 4835, 4589, 4590, 4596, 4595)
model.createElement(2799, 4830, 4831, 4837, 4836, | |
:
### existing units
vPowerOutput_Ex = 0
# dispatchable output
for TechDisp in model.setProcBaseDisp_TCD:
if (sZone + "/") in TechDisp:
vPowerOutput_Ex = vPowerOutput_Ex + model.vExProcDispPwOutNetTest_TCD_TS[TechDisp, sTimeSlice]
# storage output
for TechStor in model.setProcBaseStor_TCS:
if (sZone + "/") in TechStor:
vPowerOutput_Ex = vPowerOutput_Ex + model.vExProcStorPwOutTest_TCS_TS[TechStor, sTimeSlice]
# hydropower output
for TechHydro in model.setProcBaseHydr_TCH:
if (sZone + "/") in TechHydro:
vPowerOutput_Ex = vPowerOutput_Ex + model.vExProcHydrPwOutTest_TCH_TS[TechHydro, sTimeSlice]
# non-dispatchable renewables
vPowerOutput_Ex = vPowerOutput_Ex + model.pNonDispGenTest_ZNL_TS[sZone, sTimeSlice]
### new units
vPowerOutput_New = 0
# dispatchable output
for TechDisp in model.setProcNewDisp_TCD:
if (sZone + "/") in TechDisp:
vPowerOutput_New = vPowerOutput_New + model.vNewProcDispPwOutNetTest_TCD_TS[TechDisp, sTimeSlice]
# storage output
for TechStor in model.setProcNewStor_TCS:
if (sZone + "/") in TechStor:
vPowerOutput_New = vPowerOutput_New + model.vNewProcStorPwOutTest_TCS_TS[TechStor, sTimeSlice]
# hydropower output
for TechHydro in model.setProcNewHydr_TCH:
if (sZone + "/") in TechHydro:
vPowerOutput_New = vPowerOutput_New + model.vNewProcHydrPwOutTest_TCH_TS[TechHydro, sTimeSlice]
# non-dispatchable renewables
for TechRenew in model.setProcNewRE_TCR:
if (sZone + "/") in TechRenew:
vPowerOutput_New = vPowerOutput_New + model.vNewProcRenewPwOutTest_TCR_TS[TechRenew, sTimeSlice]
return model.vSupplyZoneTest_ZNL_TS[sZone, sTimeSlice] == vPowerOutput_Ex + vPowerOutput_New
setattr(model, "conLDZPowerSupplyTest_ZNL_TS", pe.Constraint(model.setLDZone_ZNL, \
model.setTSRT_TS, rule = ruleLDZProcPowerSupply))
### power balance of land zones
def rulePowerBalanceLandZone(model, sZone, sTimeSlice) :
# transmission input (into lines, export)
vTransZoneInput = 0
for TransLine in model.setTransLDZ_TRL:
if (sZone + "/") in TransLine:
vTransZoneInput = vTransZoneInput + model.vTransLDZInTest_TRL_TS[TransLine, sTimeSlice]
# transmission output (from transmission lines, import)
vTransZoneOutput = 0
for TransLine in model.setTransLDZ_TRL:
TragetZone = str(TransLine).split("/")[1]
if sZone == TragetZone:
vTransZoneOutput = vTransZoneOutput + model.vTransLDZOutTest_TRL_TS[TransLine, sTimeSlice]
# transmission output from offshore zones (from transmission lines)
vTransOffshoreOutput = 0
for TransLine in model.setTransOFZ_TRF:
TragetZone = str(TransLine).split("/")[1]
if sZone == TragetZone:
vTransOffshoreOutput = vTransOffshoreOutput + model.vTransOFZOutTest_TRF_TS[TransLine, sTimeSlice]
# supply - spill + line output - line input = demand
return model.vSupplyZoneTest_ZNL_TS[sZone, sTimeSlice] - model.vSpillZoneTest_ZNL_TS[sZone, sTimeSlice] \
+ vTransZoneOutput + vTransOffshoreOutput - vTransZoneInput \
== model.pDemandTest_ZNL_TS[sZone, sTimeSlice]
setattr(model, "conPowerBalanceZoneTest_ZNL_TS", pe.Constraint(model.setLDZone_ZNL, \
model.setTSRT_TS, rule = rulePowerBalanceLandZone))
# --- offshore zones ------------------------------
### power supply of offshore zones
def ruleOFZProcPowerSupply(model, sZone, sTimeSlice) :
vPowerOutput = 0
# existing units
vPowerOutput = vPowerOutput + model.pNonDispGenOffTest_ZNF_TS[sZone, sTimeSlice]
# new installation non-dispatchable renewables
for TechRenew in model.setProcNewRE_Offs_TCR:
if (sZone + "/") in TechRenew:
vPowerOutput = vPowerOutput + model.vNewProcRenewPwOutOffsTest_TCR_TS[TechRenew, sTimeSlice]
# only non-dispatchable generation
return model.vSupplyOffsTest_ZNF_TS[sZone, sTimeSlice] == vPowerOutput
setattr(model, "conOFZPowerSupplyTest_ZNF_TS", pe.Constraint(model.setOFZone_ZNF, model.setTSRT_TS, rule = ruleOFZProcPowerSupply))
### all transmission(line) input + spill = total supply
def ruleTransOFZInput(model, sZone, sTimeSlice) :
vAllTransOffshoreInput_TS = 0
for TransLine in model.setTransOFZ_TRF:
if (sZone + "/") in TransLine:
vAllTransOffshoreInput_TS = vAllTransOffshoreInput_TS + model.vTransOFZInTest_TRF_TS[TransLine, sTimeSlice]
return model.vSupplyOffsTest_ZNF_TS[sZone, sTimeSlice] == vAllTransOffshoreInput_TS + model.vSpillOffsTest_ZNF_TS[sZone, sTimeSlice]
setattr(model, "conOFZBalanceZoneTest_ZNF_TS", pe.Constraint(model.setOFZone_ZNF, model.setTSRT_TS, rule = ruleTransOFZInput))
return
##### ---- transmission ------------------------------------------------ #####
def constTransOpr(model, objMarket):
''' transmission constraints '''
# transmission(line) input limited by capacity
def ruleTransZoneInputCap(model, TransZone, sTimeSlice) :
return model.vTransLDZIn_TRL_TS[TransZone, sTimeSlice] \
<= model.pExTransLDZCap_TRL[TransZone] + model.vNewProcTransCap_TRL[TransZone]
setattr(model, "conTransZoneInputCap_TRL_TS", pe.Constraint(model.setTransLDZ_TRL, \
model.setTimeSlice_TS, rule = ruleTransZoneInputCap))
# transmission(line) output consider losses
def ruleTransZoneOutput(model, TransZone, sTimeSlice) :
fLineLoss = model.pTransLDZLoss_TRL[TransZone]
return model.vTransLDZOut_TRL_TS[TransZone, sTimeSlice] \
== model.vTransLDZIn_TRL_TS[TransZone, sTimeSlice] * (1-fLineLoss)
setattr(model, "conTransZoneOutput_TRL_TS", pe.Constraint(model.setTransLDZ_TRL, \
model.setTimeSlice_TS, rule = ruleTransZoneOutput))
# transmission(line) input offshore limited by capacity
def ruleTransOffshoreInputCap(model, TransZone, sTimeSlice) :
return model.vTransOFZIn_TRF_TS[TransZone, sTimeSlice] \
<= model.pExTransOFZCap_TRF[TransZone] + model.vNewProcTransOffCap_TRF[TransZone]
setattr(model, "conTransOffsInputCap_TRF_TS", pe.Constraint(model.setTransOFZ_TRF, \
model.setTimeSlice_TS, rule = ruleTransOffshoreInputCap))
# transmission(line) output offshore consider lossess
def ruleTransOffshoreOutput(model, TransZone, sTimeSlice) :
fLineLoss = model.pTransOFZLoss_TRF[TransZone]
return model.vTransOFZOut_TRF_TS[TransZone, sTimeSlice] \
== model.vTransOFZIn_TRF_TS[TransZone, sTimeSlice] * (1-fLineLoss)
setattr(model, "conTransOffsOutput_TRF_TS", pe.Constraint(model.setTransOFZ_TRF, \
model.setTimeSlice_TS, rule = ruleTransOffshoreOutput))
return
##### ---- transmission (Testing TS)------------------------------------ #####
def constTransOpr_RT(model, objMarket):
''' transmission constraints in testing TS '''
# transmission(line) input limited by capacity
def ruleTransZoneInputCap(model, TransZone, sTimeSlice) :
return model.vTransLDZInTest_TRL_TS[TransZone, sTimeSlice] \
<= model.pExTransLDZCap_TRL[TransZone] + model.vNewProcTransCap_TRL[TransZone]
setattr(model, "conTransZoneInputCapTest_TRL_TS", pe.Constraint(model.setTransLDZ_TRL, \
model.setTSRT_TS, rule = ruleTransZoneInputCap))
# transmission(line) output consider losses
def ruleTransZoneOutput(model, TransZone, sTimeSlice) :
fLineLoss = model.pTransLDZLoss_TRL[TransZone]
return model.vTransLDZOutTest_TRL_TS[TransZone, sTimeSlice] \
== model.vTransLDZInTest_TRL_TS[TransZone, sTimeSlice] * (1-fLineLoss)
setattr(model, "conTransZoneOutputTest_TRL_TS", pe.Constraint(model.setTransLDZ_TRL, \
model.setTSRT_TS, rule = ruleTransZoneOutput))
# transmission(line) input offshore limited by capacity
def ruleTransOffshoreInputCap(model, TransZone, sTimeSlice) :
return model.vTransOFZInTest_TRF_TS[TransZone, sTimeSlice] \
<= model.pExTransOFZCap_TRF[TransZone] + model.vNewProcTransOffCap_TRF[TransZone]
setattr(model, "conTransOffsInputCapTest_TRF_TS", pe.Constraint(model.setTransOFZ_TRF, \
model.setTSRT_TS, rule = ruleTransOffshoreInputCap))
# transmission(line) output offshore consider lossess
def ruleTransOffshoreOutput(model, TransZone, sTimeSlice) :
fLineLoss = model.pTransOFZLoss_TRF[TransZone]
return model.vTransOFZOutTest_TRF_TS[TransZone, sTimeSlice] \
== model.vTransOFZInTest_TRF_TS[TransZone, sTimeSlice] * (1-fLineLoss)
setattr(model, "conTransOffsOutputTest_TRF_TS", pe.Constraint(model.setTransOFZ_TRF, \
model.setTSRT_TS, rule = ruleTransOffshoreOutput))
return
##### ----- minimal CF of existing units ------------------------------- #####
def constMinBaseUnitGen(model, objMarket, fMinBaseGen):
''' constraints on minimum generation '''
def ruleProcBaseMinGen_Disp(model, sProcDisp):
BaseCF = model.pExProcBaseGenCF_TCD[sProcDisp]
if BaseCF == 0:
return pe.Constraint.Skip
elif model.pExProcDispCap_TCD[sProcDisp] == 0:
return pe.Constraint.Skip
elif sProcDisp.split("/")[1] in ["BIO_ST", "BIGCC_CCS"]:
return pe.Constraint.Skip
else:
BaseCF = BaseCF * fMinBaseGen
# target generation
targetGen = 0
for sTSIndex in model.setTimeSlice_TS:
targetGen = targetGen + (model.pExProcDispCap_TCD[sProcDisp] \
* BaseCF) * model.pTSRepHourYear_TS[sTSIndex]
# annual generettion
dayGen = 0
for sTSIndex in model.setTimeSlice_TS:
dayGen = dayGen + model.vExProcDispPwOutGrs_TCD_TS[sProcDisp, sTSIndex] \
* model.pTSRepHourYear_TS[sTSIndex]
return dayGen >= targetGen
setattr(model, "conProcBaseAnnualMinGen_TCD", \
pe.Constraint(model.setProcBaseDisp_TCD, rule = ruleProcBaseMinGen_Disp))
return
##### ----- max biomass supply limit ----------------------------------- #####
def constMaxBiomassSupply(model, objMarket):
''' constraints on maximum biomass fuel supply '''
def ruleProcBiomassMaxSupply(model, sCountry):
fTotalBiomassDemand = 0
for TechDisp in model.setProcBaseDisp_TCD:
if TechDisp[0:3] == sCountry:
sTech = str(TechDisp).split("/")[1]
if sTech in ["BIO_ST", "BIGCC_CCS"]:
for sTS in model.setTimeSlice_TS:
fTotalBiomassDemand += model.vExProcDispPwOutGrs_TCD_TS[TechDisp, sTS] \
* model.pTSRepHourYear_TS[sTS] / model.pExProcDispEff_TCD[TechDisp] * 0.0036
# MW * (TS) / Eff * 0.0036 = TJ
for TechDisp in model.setProcNewDisp_TCD:
if TechDisp[0:3] == sCountry:
sTech = str(TechDisp).split("/")[1]
if sTech in ["BIO_ST", "BIGCC_CCS"]:
for sTS in model.setTimeSlice_TS:
fTotalBiomassDemand += model.vNewProcDispPwOutGrs_TCD_TS[TechDisp, sTS] \
* model.pTSRepHourYear_TS[sTS] / model.pNewProcDispEff_TCD[TechDisp] * 0.0036
# MW * (TS) / Eff * 0.0036 = TJ
return fTotalBiomassDemand <= model.pBiomassSupply_CN[sCountry]
setattr(model, "conProcBiomassMaxSupply_CN", pe.Constraint(model.setCountryCode_CN, rule = ruleProcBiomassMaxSupply))
return
##### ----- fixed new build of dispathcable units ---------------------- #####
def constFixedNewBuildDisp(instance, model, iYear):
''' constraints on addition of dispatchable process '''
def ruleFixedNewBuildDisp(model, sCountry, sProcDisp):
for objCountry in instance.lsCountry:
if objCountry.sCountry == sCountry:
for objProcCountry in objCountry.lsProcessAssump:
if objProcCountry.sProcessName == sProcDisp:
if str(iYear) in objProcCountry.dicProcDispFixedNewBuild:
TotalDispCap = 0
for sProc in model.setProcNewDisp_TCD:
if sProc[0:3] == sCountry and sProc.split("/")[1] == sProcDisp:
TotalDispCap += model.vNewProcDispCap_TCD[sProc]
return TotalDispCap == objProcCountry.dicProcDispFixedNewBuild[str(iYear)]
else:
return pe.Constraint.Skip
return pe.Constraint.Skip
setattr(model, "conFixedNewBuildDisp_CN_TCD", pe.Constraint(model.setCountryCode_CN, \
model.setDispatchableProc, rule = ruleFixedNewBuildDisp))
return
##### ----- new renewable -------------------------------------------- #####
def constRenewAddMax(model, instance, objMarket):
''' constraints on addition of renewable process '''
def ruleRenewAddMax(model, sCountry, sProcRenew):
for objCountry in instance.lsCountry:
if objCountry.sCountry == sCountry:
# [ "WND_ON", "WND_OFF", "PV", "CSP", "HYD", "GEO_hydro", "BIO_ST" ]
if sProcRenew not in objCountry.dicRenewMaxCapAdd:
return pe.Constraint.Skip
else:
### bio mass
if sProcRenew in ["BIO_ST"]:
if objCountry.dicRenewMaxCapAdd[sProcRenew] >= 0:
totalNewCap = 0
bProcAvailable = False
for sProc in model.setProcNewDisp_TCD:
if sProc[0:3] == sCountry and sProc.split("/")[1] == sProcRenew:
totalNewCap += model.vNewProcDispCap_TCD[sProc]
bProcAvailable = True
if bProcAvailable == True:
return totalNewCap <= objCountry.dicRenewMaxCapAdd[sProcRenew]
else:
return pe.Constraint.Skip
else:
return pe.Constraint.Skip
### hydropower
elif sProcRenew in ["HYD"]:
if objCountry.dicRenewMaxCapAdd[sProcRenew] >= 0:
totalNewCap = 0
bProcAvailable = False
for sProc in model.setProcNewHydr_TCH:
if sProc[0:3] == sCountry:
totalNewCap += model.vNewProcHydrCap_TCH[sProc]
bProcAvailable = True
if bProcAvailable == True:
return totalNewCap <= objCountry.dicRenewMaxCapAdd[sProcRenew]
else:
return pe.Constraint.Skip
else:
return pe.Constraint.Skip
### terrestrial renewables
elif sProcRenew in ["WND_ON", "PV", "CSP", "GEO_hydro"]:
if objCountry.dicRenewMaxCapAdd[sProcRenew] >= 0:
totalNewCap = 0
bProcAvailable = False
for sProc in model.setProcNewRE_TCR:
if sProc[0:3] == sCountry:
sProcessName = sProc.split("/")[1]
if sProcessName[0:len(sProcRenew)] == sProcRenew:
totalNewCap += model.vNewProcRenewCap_TCR[sProc]
bProcAvailable = True
if bProcAvailable == True:
return totalNewCap <= objCountry.dicRenewMaxCapAdd[sProcRenew]
else:
return pe.Constraint.Skip
else:
return pe.Constraint.Skip
### offshore renewables
elif sProcRenew in ["WND_OFF"]:
if objCountry.dicRenewMaxCapAdd[sProcRenew] >= 0:
totalNewCap = 0
bProcAvailable = False
for sProc in model.setProcNewRE_Offs_TCR:
if sProc[0:3] == sCountry:
totalNewCap += model.vNewProcRenewCapOffs_TCR[sProc]
bProcAvailable = True
if bProcAvailable == True:
return totalNewCap <= objCountry.dicRenewMaxCapAdd[sProcRenew]
else:
return pe.Constraint.Skip
else:
return pe.Constraint.Skip
return pe.Constraint.Skip
setattr(model, | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# NetApp HANA Integration Script
#
# This script allows an SAP HANA administrator to take advantage
# of the data management features offered by the Azure NetApp Files Service.
#
# These include application-consistent instant snapshots, restore, and
# cloning.
#
# This is sample code, provided as-is without any maintenance or support, and
# subject to the Apache License 2.0.
#
# © 2019, 2020 NetApp, Inc. All Rights Reserved. NETAPP, the NETAPP logo, and
# the marks listed at http://www.netapp.com/TM are trademarks of NetApp, Inc.
# in the U.S. and/or other countries. Other company and product names may be
# trademarks of their respective owners.
#
#
# Azure Installation
#
# 1) pip
#
# pip3 --version
#
# if you don't have pip installed, install it
#
# python3 -m ensurepip --default-pip
#
# to learn more, see:
#
# https://packaging.python.org/tutorials/installing-packages/
#
# 2) install the ANF components of the Azure SDK
#
# pip3 install azure-mgmt-netapp==0.3
#
# 3) copy this script to your host and insure it is executable by root
#
# python3 is required
#
# 4) when onboarded to Azure, you received credentials with the following or
# similar format:
#
# {
# "appId": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
# "displayName": "SAP",
# "name": "http://SAP",
# "password": "<PASSWORD>",
# "tenant": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
# }
#
# save these credentials in a file called "key.json"
#
# 5) insure that the userstore contains a key with the permissions required by
# the script
#
# for example, as <SID>adm:
#
# hdbuserstore set BACKUP "<hostname>:30013" System "<password>"
#
# 6) configuration file
#
# optionally, create a config file such as:
#
# {
# "SID": "<SID>",
# "userstore_key": "BACKUP",
# "cloud_volumes": ["hana-data", "hana-shared"],
# "subscription_id": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
# }
#
# and save it as config.json or <SID>_config.json
#
#
# HANA and OS Interface Functions
#
import os, sys
import argparse
import datetime, time
import subprocess
from subprocess import check_call, check_output, CalledProcessError
#
# Dynamically detect the platform we're running on by looking for the
# proper libraries
#
try:
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.subscription import SubscriptionClient
from azure.mgmt.netapp import AzureNetAppFilesManagementClient
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.netapp.models import Snapshot
from azure.mgmt.netapp.models import Volume
except:
print("Error - expected libraries not found, see installation instructions")
sys.exit(2)
#
# Function for running commands
#
# We run commands as root and as the hdbuser, depending on the context.
#
HANA_NOT_RUNNING = "HANA not running"
def run_command(command, verbose, return_result=False, suppress_error=False,
system_id=False):
try:
if system_id:
hdbuser = system_id.lower() + "adm"
command = ['su', '-', hdbuser, '-c'] + \
[" ".join(str(x) for x in command)]
if verbose:
print("calling: " + " ".join(str(x) for x in command))
if return_result:
bytestring = check_output(command)
output = bytestring.decode('utf-8')
print(output)
return output
else:
check_call(command)
else:
with open(os.devnull, 'w') as DEVNULL:
if return_result:
bytestring = check_output(command, stderr=DEVNULL)
output = bytestring.decode('utf-8')
return output
else:
check_call(command, stdout=DEVNULL, stderr=DEVNULL)
except CalledProcessError as ex:
if suppress_error:
return HANA_NOT_RUNNING
print("Error code: " + str(ex.returncode))
sys.exit(2)
#
# Define SQL prefix
#
HDBSQL = ['hdbsql', '-U']
def is_hana_running(system_id, userstore_key, verbose):
GET_HANA_STATUS = "SELECT ACTIVE_STATUS FROM SYS.M_DATABASES"
output = run_command(HDBSQL + [userstore_key] + [GET_HANA_STATUS],
verbose, True, True, system_id=system_id)
if output == HANA_NOT_RUNNING:
return False
output = output.split()[1]
if output == '"YES"':
return True
print("Error - database in unexpected state: " + output)
sys.exit(2)
def is_tenant_running(system_id, userstore_key, verbose):
GET_TENANT_STATUS = "SELECT ACTIVE_STATUS FROM SYS.M_DATABASES WHERE " + \
"DATABASE_NAME = \"'" + system_id + "'\""
output = run_command(HDBSQL + [userstore_key] + [GET_TENANT_STATUS],
verbose, True, True, system_id=system_id)
if output == HANA_NOT_RUNNING:
return False
output = output.split()[1]
if output == '"YES"':
return True
elif output == '"NO"':
return False
print("Error - tenant database in unexpected state: " + output)
sys.exit(2)
#
# To close an open backup, we first need to have the backup id
#
def get_backup_id(system_id, userstore_key, verbose):
GET_BACKUP_ID = "SELECT BACKUP_ID FROM M_BACKUP_CATALOG WHERE " + \
"ENTRY_TYPE_NAME = \"'data snapshot'\" AND STATE_NAME = \"'prepared'\""
output = run_command(HDBSQL + [userstore_key] + [GET_BACKUP_ID], verbose,
return_result=True, system_id=system_id)
backup_id = output.split()[1]
if int(backup_id) == 0:
print("Error: failed to find open snapshot")
sys.exit(2)
return backup_id
#
# Open a HANA backup
#
# We use this helper function so it can be called by other functions
#
def open_backup_internal(ebid, system_id, userstore_key, verbose):
OPEN_BACKUP = "BACKUP DATA FOR FULL SYSTEM CREATE SNAPSHOT COMMENT"
if ebid:
comment = "'" + ebid + "'"
else:
comment = "'" + create_snapshot_name() + "'"
output = run_command(HDBSQL + [userstore_key] + [OPEN_BACKUP + " \"" + \
comment + "\""], verbose, system_id=system_id)
#
# This is the entry point for the command line option
#
def open_backup(ebid, system_id, userstore_key, verbose):
if not system_id:
print("Error - no SID specified, specify with " + \
"--SID or in configuration file")
sys.exit(2)
open_backup_internal(ebid, system_id, userstore_key, verbose)
backup_id = get_backup_id(system_id, userstore_key, verbose)
print("Opened backup: " + backup_id)
#
# Close a HANA backup
#
# We use this helper function so it can be called by other functions
#
def close_backup_internal(ebid, system_id, userstore_key, successful, verbose):
CLOSE_BACKUP = "BACKUP DATA FOR FULL SYSTEM CLOSE SNAPSHOT BACKUP_ID"
backup_id = get_backup_id(system_id, userstore_key, verbose)
if successful:
if ebid:
comment = "\"'" + ebid + "'\""
else:
comment = "\"'NetApp snapshot successful'\""
run_command(HDBSQL + [userstore_key] + [CLOSE_BACKUP] + [backup_id] + \
["SUCCESSFUL"] + [comment], verbose, system_id=system_id)
else:
comment = "\"'NetApp snapshot creation timed out'\""
run_command(HDBSQL + [userstore_key] + [CLOSE_BACKUP] + [backup_id] + \
["UNSUCCESSFUL"] + [comment], verbose, system_id=system_id)
return backup_id
#
# This is the entry point for the command line option
#
def close_backup(ebid, system_id, userstore_key, verbose):
if not system_id:
print("Error - no SID specified, specify with " + \
"--SID or in configuration file")
sys.exit(2)
backup_id = close_backup_internal(ebid, system_id, userstore_key, True, \
verbose)
print("Closed backup: " + backup_id)
#
# This helper function handles the OS-related restore steps
#
def restore_internal(mount_point, snapshot, verbose):
RSYNC = ["rsync", "-axhv", "--delete", "--progress"]
if not mount_point:
print("Error - volume '" + cloud_volume + "' not found: " +
"insure the volume is mounted on the host where this command is " +
"executed")
sys.exit(2)
source = mount_point + "/.snapshot/" + snapshot + "/"
destination = mount_point + "/"
if not os.path.exists(source):
print("Error - snapshot '" + snapshot + "' not found")
sys.exit(2)
run_command(RSYNC + [source] + [destination], verbose)
print("Restore complete")
#
# Generate a default snapshot name
#
def create_snapshot_name():
# snapshot names may not contain ":" or "." characters, so remove
date = datetime.datetime.now().isoformat()
snapshot_name = date.replace(":","-").replace(".","-")
return snapshot_name
import json
DEFAULT_SERVICE_ACCOUNT_FILE_NAME = 'key.json'
DEFAULT_CONFIG_FILE_NAME = 'config.json'
DEFAULT_USERSTORE_KEY = 'SYSTEM'
DEFAULT_TIMEOUT = 5
#
# Azure API Integration
#
# We implement snapshot, restore and clone.
#
class ANF():
#
# Get the credentials from the key file and construct the
# ServicePrincipalCredentials
#
def get_auth(self, key_file, verbose):
if not key_file:
key_file = DEFAULT_SERVICE_ACCOUNT_FILE_NAME
try:
with open(key_file) as file:
service_principal = json.load(file)
except:
print("File '" + key_file + "' not found or failed to load")
return ""
credentials = ServicePrincipalCredentials(
client_id = service_principal.get("appId"),
secret = service_principal.get("password"),
tenant = service_principal.get("tenant")
)
return credentials
#
# Lookup the subscription id
# - if there are more than one, warn the user and use the first
#
def get_subscription_id(self, key_file, verbose):
credentials = self.get_auth(key_file, verbose)
subscription_id = ""
subscription_client = SubscriptionClient(credentials)
for item in subscription_client.subscriptions.list():
if not subscription_id:
subscription_id = item.subscription_id
elif verbose:
print("You have more than one subscription id, " + \
"using the first one returned; consider setting " + \
"subscription_id in configuration file")
return subscription_id
#
# Read parameters out of the config file
#
def get_config(self, config_file, key_file, system_id, userstore_key,
cloud_volumes, verbose):
# command line argument takes precedence over file name based on SID
# which takes precedence over the default file name
if not config_file:
if system_id:
config_file = system_id + "_" + DEFAULT_CONFIG_FILE_NAME
else:
config_file = DEFAULT_CONFIG_FILE_NAME
if verbose:
print("Loading configuration from '" + config_file + "'")
try:
with open(config_file) as file:
config = json.load(file)
except:
if verbose:
print("File '" + config_file + "' not found or failed to load")
subscription_id = self.get_subscription_id(key_file, verbose)
if not userstore_key:
userstore_key = DEFAULT_USERSTORE_KEY
if cloud_volumes:
cloud_volumes = cloud_volumes.split(",")
return subscription_id, system_id, userstore_key, cloud_volumes, ""
subscription_id = config.get("subscription_id")
if not subscription_id:
subscription_id = self.get_subscription_id(key_file, verbose)
if not system_id:
system_id = config.get("SID")
if not userstore_key:
userstore_key = config.get("userstore_key")
if not userstore_key:
userstore_key = DEFAULT_USERSTORE_KEY
if not cloud_volumes:
cloud_volumes = config.get("cloud_volumes")
else:
cloud_volumes = cloud_volumes.split(",")
return subscription_id, system_id, userstore_key, cloud_volumes, ""
#
# examine an untyped member of a resource group and return true if it is a
# volume
#
| |
}
resource = f'/networks/{networkId}/switch/dscpToCosMappings'
return await self._session.get(metadata, resource)
async def updateNetworkSwitchDscpToCosMappings(self, networkId: str, mappings: list):
"""
**Update the DSCP to CoS mappings**
https://developer.cisco.com/docs/meraki-api-v1/#!update-network-switch-dscp-to-cos-mappings
- networkId (string)
- mappings (array): An array of DSCP to CoS mappings. An empty array will reset the mappings to default.
"""
kwargs = locals()
metadata = {
'tags': ['switch', 'configure', 'dscpToCosMappings'],
'operation': 'updateNetworkSwitchDscpToCosMappings',
}
resource = f'/networks/{networkId}/switch/dscpToCosMappings'
body_params = ['mappings']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return await self._session.put(metadata, resource, payload)
async def getNetworkSwitchLinkAggregations(self, networkId: str):
"""
**List link aggregation groups**
https://developer.cisco.com/docs/meraki-api-v1/#!get-network-switch-link-aggregations
- networkId (string)
"""
metadata = {
'tags': ['switch', 'configure', 'linkAggregations'],
'operation': 'getNetworkSwitchLinkAggregations',
}
resource = f'/networks/{networkId}/switch/linkAggregations'
return await self._session.get(metadata, resource)
async def createNetworkSwitchLinkAggregation(self, networkId: str, **kwargs):
"""
**Create a link aggregation group**
https://developer.cisco.com/docs/meraki-api-v1/#!create-network-switch-link-aggregation
- networkId (string)
- switchPorts (array): Array of switch or stack ports for creating aggregation group. Minimum 2 and maximum 8 ports are supported.
- switchProfilePorts (array): Array of switch profile ports for creating aggregation group. Minimum 2 and maximum 8 ports are supported.
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'linkAggregations'],
'operation': 'createNetworkSwitchLinkAggregation',
}
resource = f'/networks/{networkId}/switch/linkAggregations'
body_params = ['switchPorts', 'switchProfilePorts']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return await self._session.post(metadata, resource, payload)
async def updateNetworkSwitchLinkAggregation(self, networkId: str, linkAggregationId: str, **kwargs):
"""
**Update a link aggregation group**
https://developer.cisco.com/docs/meraki-api-v1/#!update-network-switch-link-aggregation
- networkId (string)
- linkAggregationId (string)
- switchPorts (array): Array of switch or stack ports for updating aggregation group. Minimum 2 and maximum 8 ports are supported.
- switchProfilePorts (array): Array of switch profile ports for updating aggregation group. Minimum 2 and maximum 8 ports are supported.
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'linkAggregations'],
'operation': 'updateNetworkSwitchLinkAggregation',
}
resource = f'/networks/{networkId}/switch/linkAggregations/{linkAggregationId}'
body_params = ['switchPorts', 'switchProfilePorts']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return await self._session.put(metadata, resource, payload)
async def deleteNetworkSwitchLinkAggregation(self, networkId: str, linkAggregationId: str):
"""
**Split a link aggregation group into separate ports**
https://developer.cisco.com/docs/meraki-api-v1/#!delete-network-switch-link-aggregation
- networkId (string)
- linkAggregationId (string)
"""
metadata = {
'tags': ['switch', 'configure', 'linkAggregations'],
'operation': 'deleteNetworkSwitchLinkAggregation',
}
resource = f'/networks/{networkId}/switch/linkAggregations/{linkAggregationId}'
return await self._session.delete(metadata, resource)
async def getNetworkSwitchMtu(self, networkId: str):
"""
**Return the MTU configuration**
https://developer.cisco.com/docs/meraki-api-v1/#!get-network-switch-mtu
- networkId (string)
"""
metadata = {
'tags': ['switch', 'configure', 'mtu'],
'operation': 'getNetworkSwitchMtu',
}
resource = f'/networks/{networkId}/switch/mtu'
return await self._session.get(metadata, resource)
async def updateNetworkSwitchMtu(self, networkId: str, **kwargs):
"""
**Update the MTU configuration**
https://developer.cisco.com/docs/meraki-api-v1/#!update-network-switch-mtu
- networkId (string)
- defaultMtuSize (integer): MTU size for the entire network. Default value is 9578.
- overrides (array): Override MTU size for individual switches or switch profiles. An empty array will clear overrides.
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'mtu'],
'operation': 'updateNetworkSwitchMtu',
}
resource = f'/networks/{networkId}/switch/mtu'
body_params = ['defaultMtuSize', 'overrides']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return await self._session.put(metadata, resource, payload)
async def getNetworkSwitchPortSchedules(self, networkId: str):
"""
**List switch port schedules**
https://developer.cisco.com/docs/meraki-api-v1/#!get-network-switch-port-schedules
- networkId (string)
"""
metadata = {
'tags': ['switch', 'configure', 'portSchedules'],
'operation': 'getNetworkSwitchPortSchedules',
}
resource = f'/networks/{networkId}/switch/portSchedules'
return await self._session.get(metadata, resource)
async def createNetworkSwitchPortSchedule(self, networkId: str, name: str, **kwargs):
"""
**Add a switch port schedule**
https://developer.cisco.com/docs/meraki-api-v1/#!create-network-switch-port-schedule
- networkId (string)
- name (string): The name for your port schedule. Required
- portSchedule (object): The schedule for switch port scheduling. Schedules are applied to days of the week.
When it's empty, default schedule with all days of a week are configured.
Any unspecified day in the schedule is added as a default schedule configuration of the day.
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'portSchedules'],
'operation': 'createNetworkSwitchPortSchedule',
}
resource = f'/networks/{networkId}/switch/portSchedules'
body_params = ['name', 'portSchedule']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return await self._session.post(metadata, resource, payload)
async def deleteNetworkSwitchPortSchedule(self, networkId: str, portScheduleId: str):
"""
**Delete a switch port schedule**
https://developer.cisco.com/docs/meraki-api-v1/#!delete-network-switch-port-schedule
- networkId (string)
- portScheduleId (string)
"""
metadata = {
'tags': ['switch', 'configure', 'portSchedules'],
'operation': 'deleteNetworkSwitchPortSchedule',
}
resource = f'/networks/{networkId}/switch/portSchedules/{portScheduleId}'
return await self._session.delete(metadata, resource)
async def updateNetworkSwitchPortSchedule(self, networkId: str, portScheduleId: str, **kwargs):
"""
**Update a switch port schedule**
https://developer.cisco.com/docs/meraki-api-v1/#!update-network-switch-port-schedule
- networkId (string)
- portScheduleId (string)
- name (string): The name for your port schedule.
- portSchedule (object): The schedule for switch port scheduling. Schedules are applied to days of the week.
When it's empty, default schedule with all days of a week are configured.
Any unspecified day in the schedule is added as a default schedule configuration of the day.
"""
kwargs.update(locals())
metadata = {
'tags': ['switch', 'configure', 'portSchedules'],
'operation': 'updateNetworkSwitchPortSchedule',
}
resource = f'/networks/{networkId}/switch/portSchedules/{portScheduleId}'
body_params = ['name', 'portSchedule']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return await self._session.put(metadata, resource, payload)
async def getNetworkSwitchQosRules(self, networkId: str):
"""
**List quality of service rules**
https://developer.cisco.com/docs/meraki-api-v1/#!get-network-switch-qos-rules
- networkId (string)
"""
metadata = {
'tags': ['switch', 'configure', 'qosRules'],
'operation': 'getNetworkSwitchQosRules',
}
resource = f'/networks/{networkId}/switch/qosRules'
return await self._session.get(metadata, resource)
async def createNetworkSwitchQosRule(self, networkId: str, vlan: int, **kwargs):
"""
**Add a quality of service rule**
https://developer.cisco.com/docs/meraki-api-v1/#!create-network-switch-qos-rule
- networkId (string)
- vlan (integer): The VLAN of the incoming packet. A null value will match any VLAN.
- protocol (string): The protocol of the incoming packet. Can be one of "ANY", "TCP" or "UDP". Default value is "ANY"
- srcPort (integer): The source port of the incoming packet. Applicable only if protocol is TCP or UDP.
- srcPortRange (string): The source port range of the incoming packet. Applicable only if protocol is set to TCP or UDP. Example: 70-80
- dstPort (integer): The destination port of the incoming packet. Applicable only if protocol is TCP or UDP.
- dstPortRange (string): The destination port range of the incoming packet. Applicable only if protocol is set to TCP or UDP. Example: 70-80
- dscp (integer): DSCP tag. Set this to -1 to trust incoming DSCP. Default value is 0
"""
kwargs.update(locals())
if 'protocol' in kwargs:
options = ['ANY', 'TCP', 'UDP']
assert kwargs['protocol'] in options, f'''"protocol" cannot be "{kwargs['protocol']}", & must be set to one of: {options}'''
metadata = {
'tags': ['switch', 'configure', 'qosRules'],
'operation': 'createNetworkSwitchQosRule',
}
resource = f'/networks/{networkId}/switch/qosRules'
body_params = ['vlan', 'protocol', 'srcPort', 'srcPortRange', 'dstPort', 'dstPortRange', 'dscp']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return await self._session.post(metadata, resource, payload)
async def getNetworkSwitchQosRulesOrder(self, networkId: str):
"""
**Return the quality of service rule IDs by order in which they will be processed by the switch**
https://developer.cisco.com/docs/meraki-api-v1/#!get-network-switch-qos-rules-order
- networkId (string)
"""
metadata = {
'tags': ['switch', 'configure', 'qosRules', 'order'],
'operation': 'getNetworkSwitchQosRulesOrder',
}
resource = f'/networks/{networkId}/switch/qosRules/order'
return await self._session.get(metadata, resource)
async def updateNetworkSwitchQosRulesOrder(self, networkId: str, ruleIds: list):
"""
**Update the order in which the rules should be processed by the switch**
https://developer.cisco.com/docs/meraki-api-v1/#!update-network-switch-qos-rules-order
- networkId (string)
- ruleIds (array): A list of quality of service rule IDs arranged in order in which they should be processed by the switch.
"""
kwargs = locals()
metadata = {
'tags': ['switch', 'configure', 'qosRules', 'order'],
'operation': 'updateNetworkSwitchQosRulesOrder',
}
resource = f'/networks/{networkId}/switch/qosRules/order'
body_params = ['ruleIds']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return await self._session.put(metadata, resource, payload)
async def getNetworkSwitchQosRule(self, networkId: str, qosRuleId: str):
"""
**Return a quality of service rule**
https://developer.cisco.com/docs/meraki-api-v1/#!get-network-switch-qos-rule
- networkId (string)
- qosRuleId (string)
"""
metadata = {
'tags': ['switch', 'configure', 'qosRules'],
'operation': 'getNetworkSwitchQosRule',
}
resource = f'/networks/{networkId}/switch/qosRules/{qosRuleId}'
return await self._session.get(metadata, resource)
async def deleteNetworkSwitchQosRule(self, networkId: str, qosRuleId: str):
"""
**Delete a quality of service rule**
https://developer.cisco.com/docs/meraki-api-v1/#!delete-network-switch-qos-rule
- networkId (string)
- qosRuleId (string)
"""
metadata = {
'tags': ['switch', 'configure', 'qosRules'],
'operation': 'deleteNetworkSwitchQosRule',
}
resource = f'/networks/{networkId}/switch/qosRules/{qosRuleId}'
return await self._session.delete(metadata, resource)
async def updateNetworkSwitchQosRule(self, networkId: str, qosRuleId: str, **kwargs):
"""
**Update a quality of service rule**
https://developer.cisco.com/docs/meraki-api-v1/#!update-network-switch-qos-rule
- networkId (string)
- qosRuleId (string)
- vlan (integer): The VLAN of the incoming packet. A null value will match any | |
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.run import job_pb2
from google3.cloud.graphite.mmv2.services.google.run import job_pb2_grpc
from typing import List
class Job(object):
def __init__(
self,
name: str = None,
uid: str = None,
generation: int = None,
labels: dict = None,
annotations: dict = None,
create_time: str = None,
update_time: str = None,
delete_time: str = None,
expire_time: str = None,
creator: str = None,
last_modifier: str = None,
client: str = None,
client_version: str = None,
launch_stage: str = None,
binary_authorization: dict = None,
template: dict = None,
observed_generation: int = None,
terminal_condition: dict = None,
conditions: list = None,
execution_count: int = None,
latest_succeeded_execution: dict = None,
latest_created_execution: dict = None,
reconciling: bool = None,
etag: str = None,
project: str = None,
location: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.annotations = annotations
self.client = client
self.client_version = client_version
self.launch_stage = launch_stage
self.binary_authorization = binary_authorization
self.template = template
self.project = project
self.location = location
self.service_account_file = service_account_file
def apply(self):
stub = job_pb2_grpc.RunAlphaJobServiceStub(channel.Channel())
request = job_pb2.ApplyRunAlphaJobRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.annotations):
request.resource.annotations = Primitive.to_proto(self.annotations)
if Primitive.to_proto(self.client):
request.resource.client = Primitive.to_proto(self.client)
if Primitive.to_proto(self.client_version):
request.resource.client_version = Primitive.to_proto(self.client_version)
if JobLaunchStageEnum.to_proto(self.launch_stage):
request.resource.launch_stage = JobLaunchStageEnum.to_proto(
self.launch_stage
)
if JobBinaryAuthorization.to_proto(self.binary_authorization):
request.resource.binary_authorization.CopyFrom(
JobBinaryAuthorization.to_proto(self.binary_authorization)
)
else:
request.resource.ClearField("binary_authorization")
if JobTemplate.to_proto(self.template):
request.resource.template.CopyFrom(JobTemplate.to_proto(self.template))
else:
request.resource.ClearField("template")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
request.service_account_file = self.service_account_file
response = stub.ApplyRunAlphaJob(request)
self.name = Primitive.from_proto(response.name)
self.uid = Primitive.from_proto(response.uid)
self.generation = Primitive.from_proto(response.generation)
self.labels = Primitive.from_proto(response.labels)
self.annotations = Primitive.from_proto(response.annotations)
self.create_time = Primitive.from_proto(response.create_time)
self.update_time = Primitive.from_proto(response.update_time)
self.delete_time = Primitive.from_proto(response.delete_time)
self.expire_time = Primitive.from_proto(response.expire_time)
self.creator = Primitive.from_proto(response.creator)
self.last_modifier = Primitive.from_proto(response.last_modifier)
self.client = Primitive.from_proto(response.client)
self.client_version = Primitive.from_proto(response.client_version)
self.launch_stage = JobLaunchStageEnum.from_proto(response.launch_stage)
self.binary_authorization = JobBinaryAuthorization.from_proto(
response.binary_authorization
)
self.template = JobTemplate.from_proto(response.template)
self.observed_generation = Primitive.from_proto(response.observed_generation)
self.terminal_condition = JobTerminalCondition.from_proto(
response.terminal_condition
)
self.conditions = JobConditionsArray.from_proto(response.conditions)
self.execution_count = Primitive.from_proto(response.execution_count)
self.latest_succeeded_execution = JobLatestSucceededExecution.from_proto(
response.latest_succeeded_execution
)
self.latest_created_execution = JobLatestCreatedExecution.from_proto(
response.latest_created_execution
)
self.reconciling = Primitive.from_proto(response.reconciling)
self.etag = Primitive.from_proto(response.etag)
self.project = Primitive.from_proto(response.project)
self.location = Primitive.from_proto(response.location)
def delete(self):
stub = job_pb2_grpc.RunAlphaJobServiceStub(channel.Channel())
request = job_pb2.DeleteRunAlphaJobRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.annotations):
request.resource.annotations = Primitive.to_proto(self.annotations)
if Primitive.to_proto(self.client):
request.resource.client = Primitive.to_proto(self.client)
if Primitive.to_proto(self.client_version):
request.resource.client_version = Primitive.to_proto(self.client_version)
if JobLaunchStageEnum.to_proto(self.launch_stage):
request.resource.launch_stage = JobLaunchStageEnum.to_proto(
self.launch_stage
)
if JobBinaryAuthorization.to_proto(self.binary_authorization):
request.resource.binary_authorization.CopyFrom(
JobBinaryAuthorization.to_proto(self.binary_authorization)
)
else:
request.resource.ClearField("binary_authorization")
if JobTemplate.to_proto(self.template):
request.resource.template.CopyFrom(JobTemplate.to_proto(self.template))
else:
request.resource.ClearField("template")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
response = stub.DeleteRunAlphaJob(request)
@classmethod
def list(self, project, location, service_account_file=""):
stub = job_pb2_grpc.RunAlphaJobServiceStub(channel.Channel())
request = job_pb2.ListRunAlphaJobRequest()
request.service_account_file = service_account_file
request.Project = project
request.Location = location
return stub.ListRunAlphaJob(request).items
def to_proto(self):
resource = job_pb2.RunAlphaJob()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.annotations):
resource.annotations = Primitive.to_proto(self.annotations)
if Primitive.to_proto(self.client):
resource.client = Primitive.to_proto(self.client)
if Primitive.to_proto(self.client_version):
resource.client_version = Primitive.to_proto(self.client_version)
if JobLaunchStageEnum.to_proto(self.launch_stage):
resource.launch_stage = JobLaunchStageEnum.to_proto(self.launch_stage)
if JobBinaryAuthorization.to_proto(self.binary_authorization):
resource.binary_authorization.CopyFrom(
JobBinaryAuthorization.to_proto(self.binary_authorization)
)
else:
resource.ClearField("binary_authorization")
if JobTemplate.to_proto(self.template):
resource.template.CopyFrom(JobTemplate.to_proto(self.template))
else:
resource.ClearField("template")
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
resource.location = Primitive.to_proto(self.location)
return resource
class JobBinaryAuthorization(object):
def __init__(self, use_default: bool = None, breakglass_justification: str = None):
self.use_default = use_default
self.breakglass_justification = breakglass_justification
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = job_pb2.RunAlphaJobBinaryAuthorization()
if Primitive.to_proto(resource.use_default):
res.use_default = Primitive.to_proto(resource.use_default)
if Primitive.to_proto(resource.breakglass_justification):
res.breakglass_justification = Primitive.to_proto(
resource.breakglass_justification
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return JobBinaryAuthorization(
use_default=Primitive.from_proto(resource.use_default),
breakglass_justification=Primitive.from_proto(
resource.breakglass_justification
),
)
class JobBinaryAuthorizationArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [JobBinaryAuthorization.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [JobBinaryAuthorization.from_proto(i) for i in resources]
class JobTemplate(object):
def __init__(
self,
labels: dict = None,
annotations: dict = None,
parallelism: int = None,
task_count: int = None,
template: dict = None,
):
self.labels = labels
self.annotations = annotations
self.parallelism = parallelism
self.task_count = task_count
self.template = template
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = job_pb2.RunAlphaJobTemplate()
if Primitive.to_proto(resource.labels):
res.labels = Primitive.to_proto(resource.labels)
if Primitive.to_proto(resource.annotations):
res.annotations = Primitive.to_proto(resource.annotations)
if Primitive.to_proto(resource.parallelism):
res.parallelism = Primitive.to_proto(resource.parallelism)
if Primitive.to_proto(resource.task_count):
res.task_count = Primitive.to_proto(resource.task_count)
if JobTemplateTemplate.to_proto(resource.template):
res.template.CopyFrom(JobTemplateTemplate.to_proto(resource.template))
else:
res.ClearField("template")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return JobTemplate(
labels=Primitive.from_proto(resource.labels),
annotations=Primitive.from_proto(resource.annotations),
parallelism=Primitive.from_proto(resource.parallelism),
task_count=Primitive.from_proto(resource.task_count),
template=JobTemplateTemplate.from_proto(resource.template),
)
class JobTemplateArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [JobTemplate.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [JobTemplate.from_proto(i) for i in resources]
class JobTemplateTemplate(object):
def __init__(
self,
containers: list = None,
volumes: list = None,
max_retries: int = None,
timeout: str = None,
service_account: str = None,
execution_environment: str = None,
encryption_key: str = None,
vpc_access: dict = None,
):
self.containers = containers
self.volumes = volumes
self.max_retries = max_retries
self.timeout = timeout
self.service_account = service_account
self.execution_environment = execution_environment
self.encryption_key = encryption_key
self.vpc_access = vpc_access
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = job_pb2.RunAlphaJobTemplateTemplate()
if JobTemplateTemplateContainersArray.to_proto(resource.containers):
res.containers.extend(
JobTemplateTemplateContainersArray.to_proto(resource.containers)
)
if JobTemplateTemplateVolumesArray.to_proto(resource.volumes):
res.volumes.extend(
JobTemplateTemplateVolumesArray.to_proto(resource.volumes)
)
if Primitive.to_proto(resource.max_retries):
res.max_retries = Primitive.to_proto(resource.max_retries)
if Primitive.to_proto(resource.timeout):
res.timeout = Primitive.to_proto(resource.timeout)
if Primitive.to_proto(resource.service_account):
res.service_account = Primitive.to_proto(resource.service_account)
if JobTemplateTemplateExecutionEnvironmentEnum.to_proto(
resource.execution_environment
):
res.execution_environment = (
JobTemplateTemplateExecutionEnvironmentEnum.to_proto(
resource.execution_environment
)
)
if Primitive.to_proto(resource.encryption_key):
res.encryption_key = Primitive.to_proto(resource.encryption_key)
if JobTemplateTemplateVPCAccess.to_proto(resource.vpc_access):
res.vpc_access.CopyFrom(
JobTemplateTemplateVPCAccess.to_proto(resource.vpc_access)
)
else:
res.ClearField("vpc_access")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return JobTemplateTemplate(
containers=JobTemplateTemplateContainersArray.from_proto(
resource.containers
),
volumes=JobTemplateTemplateVolumesArray.from_proto(resource.volumes),
max_retries=Primitive.from_proto(resource.max_retries),
timeout=Primitive.from_proto(resource.timeout),
service_account=Primitive.from_proto(resource.service_account),
execution_environment=JobTemplateTemplateExecutionEnvironmentEnum.from_proto(
resource.execution_environment
),
encryption_key=Primitive.from_proto(resource.encryption_key),
vpc_access=JobTemplateTemplateVPCAccess.from_proto(resource.vpc_access),
)
class JobTemplateTemplateArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [JobTemplateTemplate.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [JobTemplateTemplate.from_proto(i) for i in resources]
class JobTemplateTemplateContainers(object):
def __init__(
self,
name: str = None,
image: str = None,
command: list = None,
args: list = None,
env: list = None,
resources: dict = None,
ports: list = None,
volume_mounts: list = None,
):
self.name = name
self.image = image
self.command = command
self.args = args
self.env = env
self.resources = resources
self.ports = ports
self.volume_mounts = volume_mounts
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = job_pb2.RunAlphaJobTemplateTemplateContainers()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if Primitive.to_proto(resource.image):
res.image = Primitive.to_proto(resource.image)
if Primitive.to_proto(resource.command):
res.command.extend(Primitive.to_proto(resource.command))
if Primitive.to_proto(resource.args):
res.args.extend(Primitive.to_proto(resource.args))
if JobTemplateTemplateContainersEnvArray.to_proto(resource.env):
res.env.extend(JobTemplateTemplateContainersEnvArray.to_proto(resource.env))
if JobTemplateTemplateContainersResources.to_proto(resource.resources):
res.resources.CopyFrom(
JobTemplateTemplateContainersResources.to_proto(resource.resources)
)
else:
res.ClearField("resources")
if JobTemplateTemplateContainersPortsArray.to_proto(resource.ports):
res.ports.extend(
JobTemplateTemplateContainersPortsArray.to_proto(resource.ports)
)
if JobTemplateTemplateContainersVolumeMountsArray.to_proto(
resource.volume_mounts
):
res.volume_mounts.extend(
JobTemplateTemplateContainersVolumeMountsArray.to_proto(
resource.volume_mounts
)
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return JobTemplateTemplateContainers(
name=Primitive.from_proto(resource.name),
image=Primitive.from_proto(resource.image),
command=Primitive.from_proto(resource.command),
args=Primitive.from_proto(resource.args),
env=JobTemplateTemplateContainersEnvArray.from_proto(resource.env),
resources=JobTemplateTemplateContainersResources.from_proto(
resource.resources
),
ports=JobTemplateTemplateContainersPortsArray.from_proto(resource.ports),
volume_mounts=JobTemplateTemplateContainersVolumeMountsArray.from_proto(
resource.volume_mounts
),
)
class JobTemplateTemplateContainersArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [JobTemplateTemplateContainers.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [JobTemplateTemplateContainers.from_proto(i) for i in resources]
class JobTemplateTemplateContainersEnv(object):
def __init__(self, name: str = None, value: str = None, value_source: dict = None):
self.name = name
self.value = value
self.value_source = value_source
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = job_pb2.RunAlphaJobTemplateTemplateContainersEnv()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if Primitive.to_proto(resource.value):
res.value = Primitive.to_proto(resource.value)
if JobTemplateTemplateContainersEnvValueSource.to_proto(resource.value_source):
res.value_source.CopyFrom(
JobTemplateTemplateContainersEnvValueSource.to_proto(
resource.value_source
)
)
else:
res.ClearField("value_source")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return JobTemplateTemplateContainersEnv(
name=Primitive.from_proto(resource.name),
value=Primitive.from_proto(resource.value),
value_source=JobTemplateTemplateContainersEnvValueSource.from_proto(
resource.value_source
),
)
class JobTemplateTemplateContainersEnvArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [JobTemplateTemplateContainersEnv.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [JobTemplateTemplateContainersEnv.from_proto(i) for i in resources]
class JobTemplateTemplateContainersEnvValueSource(object):
def __init__(self, secret_key_ref: dict = None):
self.secret_key_ref = secret_key_ref
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = job_pb2.RunAlphaJobTemplateTemplateContainersEnvValueSource()
if JobTemplateTemplateContainersEnvValueSourceSecretKeyRef.to_proto(
resource.secret_key_ref
):
res.secret_key_ref.CopyFrom(
JobTemplateTemplateContainersEnvValueSourceSecretKeyRef.to_proto(
resource.secret_key_ref
)
)
else:
res.ClearField("secret_key_ref")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return JobTemplateTemplateContainersEnvValueSource(
secret_key_ref=JobTemplateTemplateContainersEnvValueSourceSecretKeyRef.from_proto(
resource.secret_key_ref
),
)
class JobTemplateTemplateContainersEnvValueSourceArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
JobTemplateTemplateContainersEnvValueSource.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
JobTemplateTemplateContainersEnvValueSource.from_proto(i) for i in resources
]
class JobTemplateTemplateContainersEnvValueSourceSecretKeyRef(object):
def __init__(self, secret: str = None, version: str = None):
self.secret = secret
self.version = version
@classmethod
def to_proto(self, resource):
| |
self.L
class LCESN(LayeredESN):
def __reservoir_input_size_rule__(self, reservoir_sizes, echo_params, activation):
"""
Set up the reservoirs so that the first takes the input signal as input,
and the rest take the previous reservoir's state as input.
"""
self.reservoirs.append(Reservoir(self.K, reservoir_sizes[0], echo_params[0],
idx=0, debug=self.debug))
for i, (size, echo_prm) in enumerate(zip(reservoir_sizes, echo_params)[1:]):
self.reservoirs.append(Reservoir(
input_size=self.reservoirs[i].N, num_units=size, echo_param=echo_prm,
idx=i+1, activation=activation, debug=self.debug
))
def __forward_routing_rule__(self, u_n):
x_n = np.zeros(0)
for reservoir in self.reservoirs:
u_n = reservoir.forward(u_n)
x_n = np.append(x_n, u_n)
return x_n
class EESN(LayeredESN):
def __reservoir_input_size_rule__(self, reservoir_sizes, echo_params, activation):
"""
Set up the reservoirs so that they all take the input signal as input.
"""
for i, (size, echo_prm) in enumerate(zip(reservoir_sizes, echo_params)):
self.reservoirs.append(Reservoir(
input_size=self.K, num_units=size, echo_param=echo_prm,
idx=i, activation=activation, debug=self.debug
))
def __forward_routing_rule__(self, u_n):
x_n = np.zeros(0)
for reservoir in self.reservoirs:
output = reservoir.forward(u_n)
x_n = np.append(x_n, output)
return x_n
class EESN_ENCODED(LayeredESN):
def __init__(self, *args, **kwargs):
assert 'dims_reduce' in kwargs.keys() or kwargs['dims_reduce'] is list, "MUST UNCLUDE DIMS AS LIST."
self.dims_reduce = kwargs['dims_reduce']
del kwargs['dims_reduce']
if 'train_epochs' not in kwargs.keys():
self.train_epochs = 2 # should make this specific to only VAEs but being quick for now
else:
self.train_epochs = kwargs['train_epochs']
del kwargs['train_epochs']
super(EESN_ENCODED, self).__init__(*args, **kwargs)
self.data_mean = None
# normalisation data for reservoir outputs
self.reservoir_means = [
np.zeros(N_i) for N_i in self.reservoir_sizes
]
self.reservoir_stds = [
np.zeros(N_i) for N_i in self.reservoir_sizes
]
# normalisation data for encoder outputs
self.encoder_means = [
np.zeros(N_i) for N_i in self.dims_reduce
]
self.encoder_stds = [
np.zeros(N_i) for N_i in self.dims_reduce
]
self.encoders = []
for j in range(self.num_reservoirs):
self.encoders.append(VAE(input_size=self.reservoir_sizes[j], latent_variable_size=self.dims_reduce[j],
epochs=self.train_epochs, batch_size=64))
# signals of the encoders
self.encoder_signals = [[] for _ in range(self.num_reservoirs)]
def __reservoir_input_size_rule__(self, reservoir_sizes, echo_params, activation):
"""
Set up the reservoirs so that they all take the input signal as input.
"""
for i, (size, echo_prm) in enumerate(zip(reservoir_sizes, echo_params)):
self.reservoirs.append(Reservoir(
input_size=self.K, num_units=size, echo_param=echo_prm,
idx=i, activation=activation, debug=self.debug
))
def __forward_routing_rule__(self, u_n):
x_n = np.zeros(0)
for i, (reservoir, encoder) in enumerate(zip(self.reservoirs, self.encoders)):
output = np.array(reservoir.forward(u_n))
output -= self.reservoir_means[i]
output /= self.reservoir_stds[i]
output = np.array(encoder.encode(Variable(th.FloatTensor(output)))[0].data.numpy())
# store the encoded signals of each encoder
self.encoder_signals[i].append(output.tolist())
x_n = np.append(x_n, output)
return x_n
def train(self, X, y, debug_info=False, add_bias=True):
""" (needs different train() because reservoirs+encoders have to be warmed up+trained one at a time."""
assert X.shape[1] == self.K, "Training data has unexpected dimensionality (%s). K = %d." % (X.shape, self.K)
X = X.reshape(-1, self.K)
y = y.reshape(-1, self.L)
self.data_mean = np.mean(X, axis=0)[0]
T = len(X) - self.init_echo_timesteps
S = np.zeros((T, np.sum(self.dims_reduce)+self.K))
S[:, -self.K:] = X[self.init_echo_timesteps:]
delim = np.array([0]+self.dims_reduce)
for i in range(1, len(delim)):
delim[i] += delim[i-1]
burn_in = X[:self.init_echo_timesteps] # feed a unique input set to all reservoirs
inputs = X[self.init_echo_timesteps:]
# Now send data into each reservoir one at a time,
# and train each encoder one at a time
for i in range(self.num_reservoirs):
reservoir = self.reservoirs[i]
# burn-in period (init echo timesteps) ===============================================
for u_n in burn_in:
_ = reservoir.forward(u_n)
# ==================
N_i = reservoir.N
S_i = np.zeros((np.shape(inputs)[0], N_i)) # reservoir i's states over T timesteps
# Now collect the real state data for encoders to train on
for n, u_n in enumerate(inputs):
S_i[n, :] = reservoir.forward(u_n)
# All reservoirs except the last output into an autoencoder
encoder = self.encoders[i]
res_mean = np.mean(S_i, axis=0)
res_std = np.std(S_i, axis=0) + 1e-8
self.reservoir_means[i] = res_mean
self.reservoir_stds[i] = res_std
S_i -= res_mean
S_i /= res_std
S_i_train = np.array(S_i[:-1000, :])
S_i_test = np.array(S_i[-1000:, :])
encoder.train_full(th.FloatTensor(S_i_train), th.FloatTensor(S_i_test))
#encoder.train_full(th.FloatTensor(S_i))
S_i = np.array(encoder.encode(Variable(th.FloatTensor(S_i)))[0].data.numpy())
enc_mean = np.mean(S_i, axis=0)
enc_std = np.std(S_i, axis=0)+1e-8
self.encoder_means[i] = np.array(enc_mean) # this would be ~0 anyway because we normalise prior to encoding (but still...)
self.encoder_stds[i] = np.array(enc_std)
lb, ub = delim[i], delim[i+1]
S[:, lb:ub] = np.array(S_i)
# inputs = S_i
if debug_info:
print('res %d mean state magnitude: %.4f' % (i, np.mean(np.abs(S_i))))
if add_bias:
S = np.hstack([S, np.ones((S.shape[0], 1))])
D = y[self.init_echo_timesteps:]
# Solve linear system
T1 = np.dot(D.T, S)
# T2 = la.inv(np.dot(S.T, S) + self.regulariser * np.eye(self.K + self.N))
T2 = la.inv(np.dot(S.T, S) + self.regulariser * np.eye(np.sum(self.dims_reduce)+self.K+1))
self.W_out = np.dot(T1, T2)
class ESN2(object):
"""
Echo state network -------------OLD ONE-----------------.
N = reservoir_size; K = input_size; L = output_size
Dimensions, notation guide:
W_in: (N x K) (inputs-to-reservoir weight matrix)
W: (N x N) (reservoir-to-reservoir weight matrix)
W_out: (L x (K+N)) (reservoir-to-output weight matrix)
u(n): K-dimensional input signal at time n.
x(n): N-dimensional reservoir states at time n.
y(n): L-dimensional output signal at time n.
d(n): L-dimensional TRUE output signal at time n.
z(n): (N+K)-dimensional extended system states at time n, [x(n); u(n)].
f: Activation function for the reservoir units.
g: Activation function for the output layer (possibly identity).
"""
def __init__(self, input_size, output_size, reservoir_size=100, echo_param=0.6,
spectral_scale=1.0, init_echo_timesteps=100,
regulariser=1e-8, input_weights_scale=(1/100.),
debug_mode=False):
# np.random.seed(42)
# ARCHITECTURE PARAMS
self.input_size = input_size
self.reservoir_size = reservoir_size
self.output_size = output_size
self.activation_function = np.tanh
self.input_weights_scale = input_weights_scale
# RESERVOIR PARAMS
self.spectral_scale = spectral_scale
self.reservoir_state = np.zeros((1, self.reservoir_size))
self.echo_param = echo_param
self.init_echo_timesteps = init_echo_timesteps # number of inititial runs before training
self.regulariser = regulariser
# WEIGHTS
#self.W_in = (np.random.randn(input_size, reservoir_size) - 0.5)*(1/1000.)
self.W_in = ((np.random.rand(input_size, reservoir_size) > 0.5).astype(int) - 0.5) *self.input_weights_scale
#self.W_in = (np.random.rand(input_size, reservoir_size) - 0.5) *self.input_weights_scale
# Reservoir-to-reservoir weights (N x N)
self.W_reservoir = []
# self.__reservoir_norm_spectral_radius_norm_weights__()
self.__reservoir_norm_spectral_radius_uniform_weights__()
#self.W_reservoir = np.random.rand(self.reservoir_size, self.reservoir_size)-0.5
# Reservoir-to-output weights (L x (K+N))
self.W_out = []
self.debug = debug_mode
if self.debug: print("W_in[:10]: {}".format(self.W_in[:10]))
if self.debug: print("W_res: {}".format(self.W_reservoir))
# SOME EXTA STORE DATA
self.training_signals = [] # reservoir state over time during training
def copy(self):
return ESN2(self.input_size, self.output_size, self.reservoir_size, self.echo_param,
self.spectral_scale, self.init_echo_timesteps,
self.regulariser, self.input_weights_scale, self.debug)
def reset_reservoir(self):
""" Reset reservoir states to zeros (does not reset W_out weights). """
self.reservoir_state = np.zeros((1, self.reservoir_size))
def __reservoir_norm_spectral_radius_norm_weights__(self):
""" Initialize reservoir weights using standard normal Gaussian. """
return self.__reservoir_norm_spectral_radius__(np.random.randn)
def __reservoir_norm_spectral_radius_uniform_weights__(self):
""" Initialize reservoir weights using uniform [0, 1]. """
return self.__reservoir_norm_spectral_radius__(np.random.rand)
def __reservoir_norm_spectral_radius_binary_weights__(self):
""" Initialize reservoir weights u.a.r. from {0, 1}. """
def binary_distr(d0, d1):
return (np.random.rand(d0, d1) + 0.5).astype(int)
return self.__reservoir_norm_spectral_radius__(binary_distr)
def __reservoir_norm_spectral_radius__(self, weight_distribution_function, offset=0.5):
"""
Initializes the reservoir weights according to some initialization strategy
(e.g. uniform in [0, 1], standard normal).
Then, sets its spectral radius = desired value.
"""
# self.W_reservoir = np.random.rand(reservoir_size, reservoir_size)
self.W_reservoir = weight_distribution_function(self.reservoir_size, self.reservoir_size) - offset
# make the spectral radius < 1 by dividing by the absolute value of the largest eigenvalue.
self.W_reservoir /= max(abs(np.linalg.eig(self.W_reservoir)[0]))
self.W_reservoir *= self.spectral_scale
def __forward_to_res__(self, x_in):
""" x_in = u(n). Puts input signal u(n) into reservoir, returns reservoir states x(n). """
assert np.shape(x_in)[1] == np.shape(self.W_in)[0], "input of {} does not match input weights of {}".format(np.shape(x_in)[1], np.shape(self.W_in)[0])
# in_to_res = W_in u(n+1)
in_to_res = np.dot(x_in, self.W_in)
# res_to_res = W x(n)
res_to_res = np.dot(self.reservoir_state, self.W_reservoir)
assert np.shape(in_to_res) == np.shape(res_to_res), "in-to-res input is {} whereas res-to-res input is {}".format(np.shape(in_to_res), np.shape(res_to_res))
# E = echo parameter; f = activation function
# x(n+1) = (1 - E) x(n) + E f(W x(n) + W_in u(n+1))
self.reservoir_state = (1.0 - self.echo_param)*self.reservoir_state + self.echo_param*self.activation_function(in_to_res + res_to_res)
#res_to_out = np.dot(self.reservoir_state, self.W_out)
return self.reservoir_state.squeeze()
def forward_to_out(self, x_in):
"""
x_in = u(n).
Puts input signal u(n) into reservoir; gets updated reservoir states x(n).
Gets z(n) = [x(n); u(n)]. Returns y(n) = z(n) W_out.T
"""
assert len(self.W_out) > 0, "ESN has not been trained yet!"
assert len(np.shape(x_in)) == 1, "input should have only 1 dimension. Dimension is: {}".format(np.shape(x_in))
# print(np.shape(x_in))
res_out = np.array(self.__forward_to_res__(np.array([x_in])))
x_n = res_out
res_out = np.hstack((res_out, x_in)) # augment the data with the reservoir data
# print(np.shape(res_out))
assert np.shape(res_out)[0] == np.shape(self.W_out)[0], "res output is {}, whereas expected weights are {}".format(np.shape(res_out), np.shape(self.W_out))
# z(n): (N+K); W_out.T: ((N+K)xL); y(n) = z(n) W_out.T
res_to_out = np.dot(res_out, self.W_out)
return res_to_out
def train(self, data_X, data_y):
# check that the data dimensions are the same as the input
assert np.shape(data_X)[1] == self.input_size, "input data is {}; expected input size is {}".format(np.shape(data_X)[1], self.input_size)
assert len(np.shape(data_X[0])) == 1, "input should have only 1 dimension"
# first we run the ESN for a few inputs so that the reservoir starts | |
consecutive value filtering" : tuple,
},
"quantity: profiles/protein groups" : df - number of protein groups | number of profiles | data completeness of profiles
"Unique Proteins": list,
"Analysis parameters" : {
"acquisition" : str,
"filename" : str,
##SILAC##
"Ratio H/L count 1 (>= X)" : int,
"Ratio H/L count 2 (>=Y, var<Z)" : int,
"Ratio variability (<Z, count>=Y)" : int,
##LFQ/spectronaut##
"consecutive data points" : int,
"summed MS/MS counts" : int,
},
"0/1 normalized data - mean" : df - mean of all datapoints,
"0/1 normalized data" : df - individual cluster,
"Distances to the median profile" : df - individual cluster,
"Manhattan distances" : df - individual cluster,
"Dynamic Range": df - individual cluster,
"Overview table" : df - individual cluster,
##if user perform the Misclassification Analysis befor downloading the dictionary AnalysedDatasets.json##
{"Misclassification Analysis": {
"True: ER" : {
"Recall": int,
"FDR": int,
"Precision": int,
"F1": int
}
"True: NPC" : {...}
...
"Summary": {
"Total - Recall": int,
"Membrane - Recall" : int,
"Av per organelle - Recall": int,
"Median per organelle - Recall" : int,
"Av precision organelles" : int,
"Av F1 organelles" : int,
"Av F1 all clusters" : int,
}
}
}
}
Returns:
self:
df_01_filtered_combined: df, "Fraction" is unstacked; "Experiment", "Gene names", "Map", "Exp_Map" are stacked
df_distance_comp: df, no index, column names: "Gene names", "Cluster", "Protein IDs", "Compartment", "Experiment", "Map", "Exp_Map", "distance"
"distance": Manhattan distances for each individual protein of the specified clusters (see self.markerproteins) are stored
df_quantity_pr_pg_combined: df, no index, column names: "filtering", "type", "number of protein groups", "number of profiles",
"data completeness of profiles", "Experiment"
df_dynamicRange_combined: df, no index, column names: "Max", "Min", "Dynamic Range", "Cluster", "Experiment"
unique_proteins_total: dict, key: Experiment name, value: unique protein (groups)
exp_map_names: list of unique Exp_Map - fusions e.g. LFQ_Map1
exp_names: list of unique Experiment names - e.g. LFQ
"""
json_dict = self.json_dict
#add experiments that are not stored in AnalysedDAtasets.json for comparison
#try:
#if len(SpatialDataSet.analysed_datasets_dict.keys())>=1:
# json_dict.update(SpatialDataSet.analysed_datasets_dict)
##except:
#else:
# pass
self.analysis_parameters_total = {}
unique_proteins_total = {}
df_01_combined = pd.DataFrame()
for exp_name in json_dict.keys():
for data_type in json_dict[exp_name].keys():
if data_type == "0/1 normalized data":
df_01_toadd = pd.read_json(json_dict[exp_name][data_type])
df_01_toadd.set_index(["Gene names", "Protein IDs", "Compartment"], inplace=True)
if "Sequence" in df_01_toadd.columns:
df_01_toadd.set_index(["Sequence"], inplace=True, append=True)
df_01_toadd.drop([col for col in df_01_toadd.columns if not col.startswith("normalized profile")], inplace=True)
df_01_toadd.columns = pd.MultiIndex.from_tuples([el.split("?") for el in df_01_toadd.columns], names=["Set", "Map", "Fraction"])
df_01_toadd.rename(columns = {"normalized profile":exp_name}, inplace=True)
df_01_toadd.set_index(pd.Series(["?".join([str(i) for i in el]) for el in df_01_toadd.index.values], name="join"), append=True, inplace=True)
if len(df_01_combined) == 0:
df_01_combined = df_01_toadd.copy()
else:
df_01_combined = pd.concat([df_01_combined,df_01_toadd], sort=False, axis=1)
elif data_type == "quantity: profiles/protein groups" and exp_name == list(json_dict.keys())[0]:
df_quantity_pr_pg_combined = pd.read_json(json_dict[exp_name][data_type])
df_quantity_pr_pg_combined["Experiment"] = exp_name
elif data_type == "quantity: profiles/protein groups" and exp_name != list(json_dict.keys())[0]:
df_quantity_pr_pg_toadd = pd.read_json(json_dict[exp_name][data_type])
df_quantity_pr_pg_toadd["Experiment"] = exp_name
df_quantity_pr_pg_combined = pd.concat([df_quantity_pr_pg_combined, df_quantity_pr_pg_toadd])
elif data_type == "Manhattan distances" and exp_name == list(json_dict.keys())[0]:
df_distances_combined = pd.read_json(json_dict[exp_name][data_type])
df_distances_combined = df_distances_combined.set_index(["Map", "Gene names", "Cluster", "Protein IDs", "Compartment"]).copy()
if "Sequence" in df_distances_combined.columns:
df_distances_combined.set_index(["Sequence"], inplace=True, append=True)
df_distances_combined = df_distances_combined[["distance"]].unstack(["Map"])
df_distances_combined.rename(columns = {"distance":exp_name}, inplace=True)
elif data_type == "Manhattan distances" and exp_name != list(json_dict.keys())[0]:
df_distances_toadd = pd.read_json(json_dict[exp_name][data_type])
df_distances_toadd = df_distances_toadd.set_index(["Map", "Gene names", "Cluster", "Protein IDs", "Compartment"]).copy()
if "Sequence" in df_distances_toadd.columns:
df_distances_toadd.set_index(["Sequence"], inplace=True, append=True)
df_distances_toadd = df_distances_toadd[["distance"]].unstack(["Map"])
df_distances_toadd.rename(columns = {"distance":exp_name}, inplace=True)
df_distances_combined = pd.concat([df_distances_combined, df_distances_toadd], axis=1)#, join="inner")
elif data_type == "Dynamic Range" and exp_name == list(json_dict.keys())[0]:
df_dynamicRange_combined = pd.read_json(json_dict[exp_name][data_type])
df_dynamicRange_combined["Experiment"] = exp_name
elif data_type == "Dynamic Range" and exp_name != list(json_dict.keys())[0]:
df_dynamicRange_toadd = pd.read_json(json_dict[exp_name][data_type])
df_dynamicRange_toadd["Experiment"] = exp_name
df_dynamicRange_combined = pd.concat([df_dynamicRange_combined, df_dynamicRange_toadd])
# if data_type == "Overview table" and exp_name == list(json_dict.keys())[0]:
# #convert into dataframe
# df_distanceOverview_combined = pd.read_json(json_dict[exp_name][data_type])
# df_distanceOverview_combined["Experiment"] = exp_name
# df_distanceOverview_combined = df_distanceOverview_combined.set_index(["Map", "Cluster", "Experiment"]).unstack(["Cluster"])
#
# elif data_type == "Overview table" and exp_name != list(json_dict.keys())[0]:
# df_distanceOverview_toadd = pd.read_json(json_dict[exp_name][data_type])
# df_distanceOverview_toadd["Experiment"] = exp_name
# df_distanceOverview_toadd = df_distanceOverview_toadd.set_index(["Map", "Cluster", "Experiment"]).unstack(["Cluster"])
# #dataframes will be concatenated, only proteins/Profiles that are in both df will be retained
# df_distanceOverview_combined = pd.concat([df_distanceOverview_combined, df_distanceOverview_toadd])
elif data_type == "Unique Proteins":
unique_proteins_total[exp_name] = json_dict[exp_name][data_type]
elif data_type == "Analysis parameters":
self.analysis_parameters_total[exp_name] = json_dict[exp_name][data_type]
#try:
# for paramters in json_dict[exp_name][data_type].keys():
# if paramters=="acquisition":
# acquisition_loaded.append(json_dict[exp_name][data_type][paramters])
# #elif parameters=="Non valid profiles":
#except:
# continue
#
df_01_combined = df_01_combined.droplevel("join", axis=0)
#filter for consistently quantified proteins (they have to be in all fractions and all maps)
#df_01_filtered_combined = df_01_mean_combined.dropna()
df_01_combined.columns.names = ["Experiment", "Map", "Fraction"]
#reframe it to make it ready for PCA
df_01_filtered_combined = df_01_combined.stack(["Experiment", "Map"]).dropna(axis=0)
#df_01_filtered_combined = df_01_combined.stack(["Experiment"]).dropna(axis=1)
df_01_filtered_combined = df_01_filtered_combined.div(df_01_filtered_combined.sum(axis=1), axis=0)
#df_01_filtered_combined = df_01_combined.copy()
#df_01_filtered_combined.columns.names = ["Experiment", "Fraction", "Map"]
## Replace protein IDs by the unifying protein ID across experiments
#comparison_IDs = pd.Series([split_ids_uniprot(el) for el in df_01_filtered_combined.index.get_level_values("Protein IDs")],
# name="Protein IDs")
#df_01_filtered_combined.index = df_01_filtered_combined.index.droplevel("Protein IDs")
#df_01_filtered_combined.set_index(comparison_IDs, append=True, inplace=True)
##reframe it to make it ready for PCA | dropna: to make sure, that you do consider only fractions that are in all experiments
#df_01_filtered_combined = df_01_filtered_combined.stack(["Experiment", "Map"]).swaplevel(0,1, axis=0).dropna(axis=1)
index_ExpMap = df_01_filtered_combined.index.get_level_values("Experiment")+"_"+df_01_filtered_combined.index.get_level_values("Map")
index_ExpMap.name = "Exp_Map"
df_01_filtered_combined.set_index(index_ExpMap, append=True, inplace=True)
df_distances_combined.columns.names = ["Experiment", "Map"]
series = df_distances_combined.stack(["Experiment", "Map"])
series.name = "distance"
df_distance_comp = series.to_frame()
#fuse Experiment and Map into one column = "Exp_Map"
index_dist_ExpMap = df_distance_comp.index.get_level_values("Experiment")+"_"+df_distance_comp.index.get_level_values("Map")
index_dist_ExpMap.name = "Exp_Map"
df_distance_comp.set_index(index_dist_ExpMap, append=True, inplace=True)
#new
#self.df_distance_comp2 = df_distance_comp.copy()
df_distance_comp.reset_index(level=['Protein IDs'], inplace=True)
df_distance_comp["Protein IDs"] = df_distance_comp["Protein IDs"].str.split(";", expand=True)[0]
df_distance_comp = df_distance_comp.set_index("Protein IDs", append=True).unstack(["Experiment", "Exp_Map", "Map"]).dropna().stack(["Experiment", "Exp_Map", "Map"]).reset_index()
#df_distance_comp.reset_index(inplace=True)
self.unique_proteins_total = unique_proteins_total
self.exp_names = list(df_01_filtered_combined.index.get_level_values("Experiment").unique())
self.exp_map_names = list(index_dist_ExpMap.unique())
self.df_01_filtered_combined = df_01_filtered_combined
#self.df_01_mean_filtered_combined = df_01_mean_filtered_combined
self.df_quantity_pr_pg_combined = df_quantity_pr_pg_combined
self.df_dynamicRange_combined = df_dynamicRange_combined
self.df_distance_comp = df_distance_comp
try:
organism = json_dict[list(json_dict.keys())[0]]["Analysis parameters"]['organism']
except:
organism = "Homo sapiens - Uniprot"
marker_table = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/complexes/{}.csv'.format(organism)))
self.markerproteins = {k: v.replace(" ", "").split(",") for k,v in zip(marker_table["Cluster"], marker_table["Members - Gene names"])}
self.clusters_for_ranking = self.markerproteins.keys()
def perform_pca_comparison(self):
"""
PCA will be performed, using logarithmized data.
Args:
self:
df_01_filtered_combined: df, which contains 0/1 normalized data for each map - for all experiments
columns: Fractions, e.g. "03K", "06K", "12K", "24K", "80K"
index: "Protein IDs", "Gene names", "Compartment", "Experiment", "Map", "Exp_Map"
df_01_mean_filtered_combined: df, which contains (global) 0/1 normalized data across all maps (mean) - for all experiments and for all protein IDs,
that are consistent throughout all experiments
columns: Fractions, e.g. "03K", "06K", "12K", "24K", "80K"
index: "Gene names", "Protein IDs", "Compartment", "Experiment"
Returns:
self:
df_pca_for_plotting: PCA processed dataframe
index: "Experiment", "Gene names", "Map", "Exp_Map"
columns: "PC1", "PC2", "PC3"
contains only marker genes, that are consistent throughout all maps / experiments
df_global_pca: PCA processed dataframe
index: "Gene names", "Protein IDs", "Compartment", "Experiment",
columns: "PC1", "PC2", "PC3"
contains all protein IDs, that are consistent throughout all experiments
"""
markerproteins = self.markerproteins.copy()
#df_01_filtered_combined = self.df_01_filtered_combined
#df_01_filtered_combined = self.df_01_filtered_combined
df_mean = pd.DataFrame()
for exp in self.exp_names:
df_exp = self.df_01_filtered_combined.stack("Fraction").unstack(["Experiment", "Map","Exp_Map"])[exp].mean(axis=1).to_frame(name=exp)
df_mean = pd.concat([df_mean, df_exp], axis=1)
df_mean = df_mean.rename_axis("Experiment", axis="columns").stack("Experiment").unstack("Fraction")
pca = PCA(n_components=3)
df_pca = pd.DataFrame(pca.fit_transform(df_mean))
df_pca.columns = ["PC1", "PC2", "PC3"]
df_pca.index = df_mean.index
try:
markerproteins["PSMA subunits"] = [item for sublist in [re.findall("PSMA.*",p) for p in markerproteins["Proteasome"]] for item in sublist]
markerproteins["PSMB subunits"] = [item for sublist in [re.findall("PSMB.*",p) for p in markerproteins["Proteasome"]] for item in sublist]
del markerproteins["Proteasome"]
except:
pass
###only one df, make annotation at that time
df_cluster = pd.DataFrame([(k, i) for k, l in markerproteins.items() for i in l], columns=["Cluster", "Gene names"])
df_global_pca = df_pca.reset_index().merge(df_cluster, how="left", on="Gene names")
df_global_pca.Cluster.replace(np.NaN, "Undefined", inplace=True)
self.markerproteins_splitProteasome = markerproteins
self.df_pca = df_pca
self.df_global_pca = df_global_pca
def plot_pca_comparison(self, cluster_of_interest_comparison="Proteasome", multi_choice=["Exp1", "Exp2"]):
"""
A PCA plot for desired experiments (multi_choice) and 1 desired cluster is generated.
Either the maps for every single experiment are displayed individually or in a combined manner
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
multi_choice: list of experiment | |
'3d', '61', '62', '63', '64', '43', '44', '45', '56', '57', '58', '59', '39', '30', '3a', '6e', '6f', '71', '78', '37', '38', '26', '79', '3e', '3f', '41', '42', '49', '21', '22', '5c'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '79', '3e', '3f', '41', '42', '49', '21', '22', '5c', '48', '23', '24', '25', '40', '4a', '4b', '4c', '4d', '4e', '4f', '51', '52', '53', '72', '73', '5e', '5f', '65', '66', '67', '32', '3b', '3c', '3d', '61', '62', '63', '64', '43', '44', '45', '56', '57', '68', '69', '60', '6a', '6b', '6c', '6d', '74', '75', '76', '77', '54', '55', '5d', '33', '34', '35', '36', '50', '5a', '31', '58', '59', '39', '30', '3a', '6e', '6f', '71', '78', '37', '38', '26', '5b', '46', '47', '27', '28', '29', '70', '7a', '7b', '7c', '7d', '7e', '7f', '20', '2a', '2b', '2c', '2d', '2e', '2f'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '79', '3e', '3f', '41', '42', '49', '21', '22', '5c', '48', '23', '24', '25', '40', '4a', '4b', '4c', '4d', '4e', '4f', '51', '52', '53', '72', '73', '5e', '5f', '65', '66', '67', '32', '3b', '3c', '3d', '61', '62', '63', '7b', '7c', '7d', '7e', '7f', '20', '2a', '2b', '2c', '2d', '2e', '2f', '31', '58', '59', '39', '30', '3a', '6e', '6f', '71', '64', '43', '44', '45', '56', '57', '68', '69', '60', '6a', '6b', '6c', '6d', '74', '75', '76', '77', '54', '55', '5d', '33', '34', '35', '36', '50', '5a', '5b', '46', '47', '27', '28', '29', '70', '7a', '78', '37', '38', '26'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '79', '3e', '3f', '41', '5d', '33', '34', '35', '36', '50', '5a', '5b', '46', '47', '27', '28', '29', '70', '7a', '78', '37', '38', '26', '42', '49', '21', '22', '5c', '48', '23', '24', '25', '40', '4a', '4b', '4c', '4d', '4e', '4f', '51', '62', '63', '7b', '7c', '7d', '7e', '7f', '20', '2a', '2b', '2c', '2d', '2e', '2f', '31', '58', '59', '39', '30', '3a', '6e', '6f', '71', '64', '43', '44', '45', '56', '57', '68', '69', '60', '6a', '6b', '6c', '6d', '74', '75', '76', '77', '54', '55', '52', '53', '72', '73', '5e', '5f', '65', '66', '67', '32', '3b', '3c', '3d', '61'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '29', '70', '7a', '78', '37', '38', '26', '42', '7e', '7f', '20', '2a', '2b', '2c', '2d', '2e', '57', '68', '69', '60', '79', '3e', '3f', '41', '5d', '33', '34', '35', '36', '50', '5a', '5b', '46', '47', '27', '28', '6a', '6b', '6c', '6d', '74', '75', '76', '77', '54', '55', '52', '53', '72', '73', '5e', '5f', '65', '66', '67', '32', '3b', '3c', '3d', '61', '2f', '31', '58', '59', '39', '30', '3a', '6e', '6f', '71', '64', '43', '44', '45', '56', '49', '21', '22', '5c', '48', '23', '24', '25', '40', '4a', '4b', '4c', '4d', '4e', '4f', '51', '62', '63', '7b', '7c', '7d'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '56', '49', '21', '22', '5c', '48', '23', '24', '25', '40', '4a', '4b', '4c', '4d', '4e', '4f', '51', '62', '63', '7b', '7c', '7d', '34', '35', '36', '50', '5a', '5b', '46', '47', '27', '28', '6a', '29', '70', '7a', '78', '37', '38', '26', '42', '7e', '7f', '20', '2a', '2b', '2c', '2d', '2e', '57', '68', '69', '60', '79', '3e', '3f', '41', '5d', '33', '3c', '3d', '61', '2f', '31', '58', '59', '39', '30', '3a', '6e', '6f', '71', '64', '43', '44', '45', '6b', '6c', '6d', '74', '75', '76', '77', '54', '55', '52', '53', '72', '73', '5e', '5f', '65', '66', '67', '32', '3b'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '46', '47', '27', '28', '6a', '29', '70', '7a', '78', '37', '38', '26', '42', '7e', '7f', '20', '2a', '3a', '6e', '6f', '71', '64', '43', '44', '45', '6b', '6c', '6d', '74', '75', '76', '56', '49', '21', '22', '5c', '48', '23', '24', '25', '40', '4a', '4b', '4c', '4d', '4e', '4f', '51', '62', '63', '7b', '7c', '7d', '34', '35', '36', '50', '5a', '5b', '77', '54', '55', '52', '53', '72', '73', '5e', '5f', '65', '66', '67', '32', '3b', '2b', '2c', '2d', '2e', '57', '68', '69', '60', '79', '3e', '3f', '41', '5d', '33', '3c', '3d', '61', '2f', '31', '58', '59', '39', '30'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '7f', '20', '2a', '3a', '6e', '6f', '71', '64', '43', '44', '45', '6b', '6c', '6d', '74', '75', '76', '56', '49', '79', '3e', '3f', '41', '5d', '33', '3c', '3d', '61', '2f', '31', '58', '59', '39', '30', '21', '22', '5c', '48', '23', '24', '25', '40', '4a', '4b', '4c', '4d', '4e', '4f', '51', '62', '63', '7b', '7c', '7d', '34', '35', '36', '50', '5a', '5b', '77', '54', '55', '52', '53', '72', '73', '5e', '5f', '65', '66', '67', '32', '3b', '2b', '2c', '2d', '2e', '57', '68', '69', '60', '46', '47', '27', '28', '6a', '29', '70', '7a', '78', '37', '38', '26', '42', '7e'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '49', '79', '3e', '3f', '41', '5d', '33', '3c', '3d', '61', '2f', '31', '58', '59', '39', '30', '21', '22', '5c', '67', '32', '3b', '2b', '2c', '2d', '2e', '57', '68', '69', '60', '48', '23', '24', '25', '40', '4a', '4b', '4c', '4d', '4e', '4f', '51', '62', '63', '7b', '7c', '7d', '34', '35', '36', '50', '5a', '5b', '77', '54', '55', '52', '53', '72', '73', '5e', '5f', '65', '66', '46', '47', '27', '28', '6a', '29', '70', '7a', '78', '37', '38', '26', '42', '7e', '7f', '20', '2a', '3a', '6e', '6f', '71', '64', '43', '44', '45', '6b', '6c', '6d', '74', '75', '76', '56'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '39', '30', '21', '22', '5c', '67', '32', '3b', '2b', '2c', '2d', '2e', '57', '68', '69', '60', '48', '23', '24', '25', '40', '4a', '4b', '4c', '4d', '4e', '4f', '51', '62', '63', '7b', '7c', '7d', '34', '35', '36', '50', '5a', '5b', '77', '54', '55', '6d', '74', '75', '76', '49', '79', '3e', '3f', '41', '5d', '33', '3c', '3d', '61', '2f', '31', '58', '59', '56', '52', '53', '72', '73', '5e', '5f', '65', '66', '46', '47', '27', '28', '6a', '29', '70', '7a', '78', '37', '38', '26', '42', '7e', '7f', '20', '2a', '3a', '6e', '6f', '71', '64', '43', '44', '45', '6b', '6c'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '4c', '4d', '4e', '4f', '51', '62', '63', '7b', '7c', '7d', '34', '35', '36', '50', '5a', '5b', '77', '54', '55', '6d', '74', '75', '76', '49', '79', '3e', '3f', '41', '5d', '7e', '7f', '20', '2a', '3a', '6e', '6f', '71', '64', '43', '44', '45', '6b', '6c', '33', '3c', '39', '30', '21', '22', '5c', '67', '32', '3b', '2b', '2c', '2d', '2e', '57', '68', '69', '60', '48', '23', '24', '25', | |
timeout=60, ignore_sighup=False, options=login_ssh_options
)
self._second_ssh = CustomSSH(
timeout=60,
ignore_sighup=False,
options={"PubkeyAuthentication": "yes" if self.use_pubkey else "no"},
)
# perform close() on exit or term() on interrupt
atexit.register(self.close)
self.flag_exit = False
for sig in (SIGABRT, SIGINT, SIGTERM):
signal(sig, self.term)
def run(self):
"""
Run the standard JupyterO2 sequence
"""
if (
not self.use_pubkey
): # If using PubKey authentication, we don't need to use a password
self.ask_for_pin()
if self.connect() or self.keep_alive:
self.logger.debug("Starting pexpect interactive mode.")
self.interact()
def ask_for_pin(self):
"""
Prompt for an O2 password
"""
self.__pass = self._pinentry.ask(
prompt="Enter your passphrase: ",
description=f"Connect to O2 server for jupyter {self.subcommand}",
error="No password entered",
validator=lambda x: x is not None and len(x) > 0,
)
self._pinentry.close()
def connect(self):
"""
Connect to Jupyter
First SSH into an interactive node and run jupyter.
Then SSH into that node to set up forwarding.
Finally, open the jupyter notebook page in the browser.
:return: True if connection is successful
"""
# start login ssh
self.logger.info(f"Connecting to {self.user}@{self.host}")
code = self.codes_2fa[0] if self.codes_2fa else None
if not self._login_ssh.login(
self.host,
self.user,
self.__pass,
code,
login_timeout=self._login_ssh.timeout,
):
return False
self.logger.debug("Connected.")
# get the login hostname
jp_login_host = self._login_ssh.get_hostname()
self.logger.info(f"Hostname: {jp_login_host}\n")
if self.run_internal_session:
# start an interactive session and get the name of the interactive node
jp_interactive_host = self.start_interactive_session(self._login_ssh)
else:
jp_interactive_host = None
# start jupyter and get the URL
jp_site = self.start_jupyter(self._login_ssh)
if self.run_internal_session and self.run_second_ssh:
# log in to the second ssh
self.logger.info("\nStarting a second connection to the login node.")
code = self.codes_2fa[:2][-1] if self.codes_2fa else None
if not self._second_ssh.login(
jp_login_host,
self.user,
self.__pass,
code,
login_timeout=self._second_ssh.timeout,
):
return False
self.logger.debug("Connected.")
# ssh into the running interactive node
if not self.ssh_into_interactive_node(
self._second_ssh, jp_interactive_host
):
return False
self._second_ssh.logfile_read = (
STDOUT_BUFFER # print any errors/output from self._second_ssh to stdout
)
# password is not needed anymore
self.clear_pass()
print(f"\nJupyter is ready! Access at:\n{jp_site}")
# open Jupyter in browser
if not self.no_browser:
self.logger.info("Opening in browser...")
if not self.open_in_browser(jp_site):
self.logger.error("Please open the Jupyter page manually.")
# quit XQuartz because the application is not necessary
# to keep the connection open.
if not self.keep_xquartz:
try_quit_xquartz()
return True
def start_jupyter(self, s):
"""
Start Jupyter in the given CustomSSH instance
:param s: an active CustomSSH
:return: the site where Jupyter can be accessed
:raises JupyterO2Error: if jupyter fails to launch or launch is not detected
"""
# start jupyter
self.logger.info(f"Starting Jupyter {self.subcommand}.")
for command in self.init_jupyter_commands:
s.sendlineprompt(command, silence=False, check_exit_status=True)
s.sendline(self.jp_call, silence=False)
s.logfile_read = STDOUT_BUFFER
# get the address jupyter is running at
site_pat = re.compile(
JP_SITE_PATTERN_FORMAT.format(port=self.jp_port).encode("utf-8")
)
prompt = s.PROMPT
s.PROMPT = site_pat
if not s.prompt(): # timed out; failed to launch jupyter
raise JupyterO2Error(
"Failed to launch jupyter, or launch not detected. "
f"(timed out, {s.timeout})"
)
s.PROMPT = prompt
jp_site = s.after.decode("utf-8").strip()
self.logger.debug(f"Jupyter {self.subcommand} started.")
return jp_site
def start_interactive_session(self, s, sendpass=False):
"""
Start an interactive session in the given CustomSSH instance
:param s: an active CustomSSH
:param sendpass: when connecting, wait for password request and then
send password
:return: the name of the interactive node
:raises JupyterO2Error: if the session could not be started
"""
# enter an interactive session
self.logger.info("Starting an interactive session.")
if self.logger.isEnabledFor(logging.DEBUG):
s.logfile_read = STDOUT_BUFFER
else:
s.logfile_read = FilteredOut(
STDOUT_BUFFER,
[b"srun:", b"authenticity", b"unavailable"],
reactions={
b"authenticity": self.close_on_known_hosts_error,
b"No DISPLAY variable set": self.log_x11_error,
b"srun: error:": self.close_on_srun_error,
b"in use or unavailable": self.close_on_port_unavailable,
},
)
timeout = s.timeout if self.srun_timeout == -1 else self.srun_timeout
if sendpass:
s.PROMPT = self.password_request_pattern
if not s.sendlineprompt(
self.srun_call, silence=False, timeout=self.srun_timeout
)[1]:
raise JupyterO2Error(
f"The timeout ({timeout}) was reached without receiving "
"a password request."
)
s.sendpass(self.__pass) # automatically silences all logfiles in s
else:
s.PROMPT = "\\$" # TODO allow customization if user has different prompt
if not s.sendlineprompt(
self.srun_call, silence=False, timeout=self.srun_timeout
)[1]:
raise JupyterO2Error(
f"The timeout ({timeout}) was reached without receiving a prompt."
)
s.logfile_read = None
# within interactive session: get the name of the interactive node
s.PROMPT = s.UNIQUE_PROMPT
s.sendlineprompt("unset PROMPT_COMMAND; PS1='[PEXPECT]\\$ '")
jp_interactive_host = s.get_hostname().split(".")[0]
self.logger.debug("Interactive session started.")
self.logger.info(f"Node: {jp_interactive_host}\n")
if "login" in jp_interactive_host:
self.logger.warning("WARNING: jupyter will run on login node!")
return jp_interactive_host
def close_on_known_hosts_error(self):
"""
Print a known_hosts error message and close.
"""
self.logger.critical(
"Could not connect to interactive session.\n"
"For some reason, the requested node is not recognized "
"in ssh_known_hosts.\n"
"If on O2, check with HMS RC."
)
self.term()
def log_x11_error(self):
"""
Print an error message for an X11 error
"""
if sys.platform == "darwin":
self.logger.critical(
"X11 error. "
"You may need to reinstall XQuartz. "
"Download from https://www.xquartz.org or use brew reinstall xquartz"
)
else:
self.logger.critical(
"X11 error. "
"You may need to reinstall X11 or export the DISPLAY variable."
)
def close_on_srun_error(self):
"""
Print a known_hosts error message and close.
"""
self.logger.critical("Could not start interactive session due to SLURM error.")
self.term()
def close_on_port_unavailable(self):
"""
Print a port unavailable error message and close.
"""
self.logger.critical("The selected port appears to be unavailable.")
self.term()
def ssh_into_interactive_node(self, s, interactive_host, sendpass=False):
"""
SSH into an interactive node from within the server and forward its connection.
:param s: an active CustomSSH
:param interactive_host: the name of the interactive node
:param sendpass: when connecting, wait for password request and then
send password
:return: True if the connection is successful
:raises JupyterO2Error: if the connection is not successful
"""
self.logger.info("Connecting to the interactive node.")
jp_interactive_command = "ssh -N -L {0}:127.0.0.1:{0} {1}".format(
self.jp_port, interactive_host
)
if sendpass:
prompt = s.PROMPT
s.PROMPT = self.password_request_pattern
if not s.sendlineprompt(jp_interactive_command, silence=False)[1]:
raise JupyterO2Error(f"The timeout ({s.timeout}) was reached.")
s.PROMPT = prompt
s.sendpass(self.__pass)
else:
s.sendline(jp_interactive_command, silence=False)
self.logger.debug("Connected.")
return True
def open_in_browser(self, site):
"""
Open site in the browser.
"""
if not isinstance(site, (str, bytes)):
return False
try:
webbrowser.open(site, new=2)
except webbrowser.Error as error:
self.logger.error(f"Error: {error}")
return False
return True
def interact(self):
"""
Keep the ssh session alive and allow input such as Ctrl-C to close Jupyter.
"""
self._login_ssh.silence_logs()
if self.keep_alive: # exit when you log out of the login shell
interact_filter = FilteredOut(None, b"[PEXPECT]$ logout")
self._login_ssh.interact(output_filter=interact_filter.exit_on_find)
else: # exit when jupyter exits and [PEXPECT]$ appears
interact_filter = FilteredOut(None, b"[PEXPECT]$ ")
self._login_ssh.interact(output_filter=interact_filter.exit_on_find)
def clear_pass(self):
cleared = zero(self.__pass)
self.__pass = None
return cleared
def close(self, print_func=print, *__):
"""
Close JupyterO2.
Print messages if used in logging.DEBUG mode.
:param print_func: the function to use to print, allows printing to be
disabled if necessary,
using `print_func=lambda x, end=None, flush=None: None`.
"""
def _print(*args, **kwargs):
if self.logger.isEnabledFor(logging.DEBUG):
print_func(*args, **kwargs)
_print("Cleaning up\r\n", end="", flush=True)
self.clear_pass()
self._pinentry.close()
if not self._login_ssh.closed:
_print("Closing login_ssh\n", end="", flush=True)
self._login_ssh.close(force=True)
if not self._second_ssh.closed:
_print("Closing second_ssh\n", end="", flush=True)
self._second_ssh.close(force=True)
def term(self, *__):
"""
Terminate JupyterO2 and exit.
"""
if not self.flag_exit:
self.flag_exit = True
try:
self.close()
except RuntimeError:
# printing from signal can cause RuntimeError: reentrant call
self.close(print_func=lambda x, end=None, flush=None: None)
sys.stdout.close()
sys.stderr.close()
sys.stdin.close()
os.closerange(0, 3)
os._exit(1)
def main():
# load the config file
config_mgr = ConfigManager()
cfg_locations = config_mgr.cfg_locations
config = config_mgr.config
# parse the command line arguments
pargs = config_mgr.get_arg_parser().parse_args()
pargs = vars(pargs)
# print the current version and exit
if pargs.pop("version"):
print(__version__)
return 0
# generate the config file and exit
gen_config = pargs.pop("generate_config")
if gen_config:
cfg_path = generate_config_file(gen_config)
print(f"Generated config file at:\n {cfg_path}")
return 0
# print the paths where config files are located,
# in descending order of precedence, and exit
if pargs.pop("paths"):
print(
"\n ".join(
["Searching for config file in:"] + CFG_SEARCH_LOCATIONS[::-1]
)
)
print("\n ".join(["Found config file in:"] + cfg_locations[::-1]))
return 0
# configure the logging level
logging.basicConfig(level=logging.INFO, format="%(msg)s")
if pargs.pop("verbose"):
logging.getLogger().setLevel(logging.DEBUG) # set root logger level
logger = logging.getLogger(__name__)
new_version = check_for_updates()
if new_version:
logger.info(f"A new version of jupyter-o2 is available ({new_version})")
if not cfg_locations:
logger.warning("Config file could not be read. Using internal defaults.")
else:
logger.debug(
"Config file(s) read from (in decreasing priority):\n{}\n".format(
"\n".join(cfg_locations[::-1])
)
)
if not pargs["subcommand"]:
default_jp_subcommand = config.get("Defaults", "DEFAULT_JP_SUBCOMMAND")
# # removed error message so that program will use the default subcommand
# JO2_ARG_PARSER.error("the following arguments are required: subcommand")
logger.warning(
f"Jupyter subcommand not provided. Using default: {default_jp_subcommand}"
)
pargs["subcommand"] = default_jp_subcommand
| |
[ P.h02, P.h02, P.su6__tilde__, P.su6 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1954})
V_1083 = Vertex(name = 'V_1083',
particles = [ P.A0, P.A0, P.su6__tilde__, P.su6 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1145})
V_1084 = Vertex(name = 'V_1084',
particles = [ P.G0, P.G0, P.su6__tilde__, P.su6 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_2253})
V_1085 = Vertex(name = 'V_1085',
particles = [ P.G__minus__, P.G__plus__, P.su6__tilde__, P.su6 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_2253})
V_1086 = Vertex(name = 'V_1086',
particles = [ P.H__minus__, P.H__plus__, P.su6__tilde__, P.su6 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1145})
V_1087 = Vertex(name = 'V_1087',
particles = [ P.sl1__plus__, P.sl1__minus__, P.su6__tilde__, P.su6 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_835})
V_1088 = Vertex(name = 'V_1088',
particles = [ P.sl2__plus__, P.sl2__minus__, P.su6__tilde__, P.su6 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_836})
V_1089 = Vertex(name = 'V_1089',
particles = [ P.sl3__plus__, P.sl3__minus__, P.su6__tilde__, P.su6 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_837})
V_1090 = Vertex(name = 'V_1090',
particles = [ P.sl4__plus__, P.sl4__minus__, P.su6__tilde__, P.su6 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_838})
V_1091 = Vertex(name = 'V_1091',
particles = [ P.sl5__plus__, P.sl5__minus__, P.su6__tilde__, P.su6 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_839})
V_1092 = Vertex(name = 'V_1092',
particles = [ P.sl6__plus__, P.sl6__minus__, P.su6__tilde__, P.su6 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_840})
V_1093 = Vertex(name = 'V_1093',
particles = [ P.h01, P.h02, P.su6__tilde__, P.su6 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1968})
V_1094 = Vertex(name = 'V_1094',
particles = [ P.A0, P.G0, P.su6__tilde__, P.su6 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_2270})
V_1095 = Vertex(name = 'V_1095',
particles = [ P.G__plus__, P.H__minus__, P.su6__tilde__, P.su6 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_2270})
V_1096 = Vertex(name = 'V_1096',
particles = [ P.G__minus__, P.H__plus__, P.su6__tilde__, P.su6 ],
color = [ 'Identity(3,4)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_2270})
V_1097 = Vertex(name = 'V_1097',
particles = [ P.su4__tilde__, P.su4, P.su6__tilde__, P.su6 ],
color = [ 'Identity(1,2)*Identity(3,4)', 'T(-1,2,1)*T(-1,4,3)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_844,(1,0):C.GC_856})
V_1098 = Vertex(name = 'V_1098',
particles = [ P.su5__tilde__, P.su5, P.su6__tilde__, P.su6 ],
color = [ 'Identity(1,2)*Identity(3,4)', 'T(-1,2,1)*T(-1,4,3)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_845,(1,0):C.GC_857})
V_1099 = Vertex(name = 'V_1099',
particles = [ P.sd1__tilde__, P.sd1, P.su6__tilde__, P.su6 ],
color = [ 'Identity(1,2)*Identity(3,4)', 'T(-1,2,1)*T(-1,4,3)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_829,(1,0):C.GC_847})
V_1100 = Vertex(name = 'V_1100',
particles = [ P.sd2__tilde__, P.sd2, P.su6__tilde__, P.su6 ],
color = [ 'Identity(1,2)*Identity(3,4)', 'T(-1,2,1)*T(-1,4,3)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_830,(1,0):C.GC_848})
V_1101 = Vertex(name = 'V_1101',
particles = [ P.sd3__tilde__, P.sd3, P.su6__tilde__, P.su6 ],
color = [ 'Identity(1,2)*Identity(3,4)', 'Identity(1,4)*Identity(2,3)', 'T(-1,2,1)*T(-1,4,3)' ],
lorentz = [ L.SSSS1 ],
couplings = {(1,0):C.GC_975,(0,0):C.GC_831,(2,0):C.GC_849})
V_1102 = Vertex(name = 'V_1102',
particles = [ P.sd4__tilde__, P.sd4, P.su6__tilde__, P.su6 ],
color = [ 'Identity(1,2)*Identity(3,4)', 'T(-1,2,1)*T(-1,4,3)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_832,(1,0):C.GC_850})
V_1103 = Vertex(name = 'V_1103',
particles = [ P.sd5__tilde__, P.sd5, P.su6__tilde__, P.su6 ],
color = [ 'Identity(1,2)*Identity(3,4)', 'T(-1,2,1)*T(-1,4,3)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_833,(1,0):C.GC_851})
V_1104 = Vertex(name = 'V_1104',
particles = [ P.sd6__tilde__, P.sd6, P.su6__tilde__, P.su6 ],
color = [ 'Identity(1,2)*Identity(3,4)', 'T(-1,2,1)*T(-1,4,3)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_834,(1,0):C.GC_852})
V_1105 = Vertex(name = 'V_1105',
particles = [ P.su1__tilde__, P.su1, P.su6__tilde__, P.su6 ],
color = [ 'Identity(1,2)*Identity(3,4)', 'T(-1,2,1)*T(-1,4,3)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_841,(1,0):C.GC_853})
V_1106 = Vertex(name = 'V_1106',
particles = [ P.su2__tilde__, P.su2, P.su6__tilde__, P.su6 ],
color = [ 'Identity(1,2)*Identity(3,4)', 'T(-1,2,1)*T(-1,4,3)' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_842,(1,0):C.GC_854})
V_1107 = Vertex(name = 'V_1107',
particles = [ P.su3__tilde__, P.su3, P.su6__tilde__, P.su6 ],
color = [ 'Identity(1,2)*Identity(3,4)', 'Identity(1,4)*Identity(2,3)', 'T(-1,2,1)*T(-1,4,3)' ],
lorentz = [ L.SSSS1 ],
couplings = {(1,0):C.GC_976,(0,0):C.GC_843,(2,0):C.GC_855})
V_1108 = Vertex(name = 'V_1108',
particles = [ P.su6__tilde__, P.su6__tilde__, P.su6, P.su6 ],
color = [ 'Identity(1,3)*Identity(2,4)', 'Identity(1,4)*Identity(2,3)', 'T(-1,3,1)*T(-1,4,2)', 'T(-1,3,2)*T(-1,4,1)' ],
lorentz = [ L.SSSS1 ],
couplings = {(1,0):C.GC_846,(0,0):C.GC_846,(3,0):C.GC_858,(2,0):C.GC_858})
V_1109 = Vertex(name = 'V_1109',
particles = [ P.go, P.d, P.sd1__tilde__ ],
color = [ 'T(1,2,3)' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_189})
V_1110 = Vertex(name = 'V_1110',
particles = [ P.g, P.sd1__tilde__, P.sd1 ],
color = [ 'T(1,3,2)' ],
lorentz = [ L.VSS1, L.VSS3 ],
couplings = {(0,0):C.GC_192,(0,1):C.GC_193})
V_1111 = Vertex(name = 'V_1111',
particles = [ P.go, P.s, P.sd2__tilde__ ],
color = [ 'T(1,2,3)' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_216})
V_1112 = Vertex(name = 'V_1112',
particles = [ P.g, P.sd2__tilde__, P.sd2 ],
color = [ 'T(1,3,2)' ],
lorentz = [ L.VSS1, L.VSS3 ],
couplings = {(0,0):C.GC_219,(0,1):C.GC_220})
V_1113 = Vertex(name = 'V_1113',
particles = [ P.go, P.b, P.sd3__tilde__ ],
color = [ 'T(1,2,3)' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_245})
V_1114 = Vertex(name = 'V_1114',
particles = [ P.g, P.sd3__tilde__, P.sd3 ],
color = [ 'T(1,3,2)' ],
lorentz = [ L.VSS1, L.VSS3 ],
couplings = {(0,0):C.GC_248,(0,1):C.GC_249})
V_1115 = Vertex(name = 'V_1115',
particles = [ P.go, P.d, P.sd4__tilde__ ],
color = [ 'T(1,2,3)' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_280})
V_1116 = Vertex(name = 'V_1116',
particles = [ P.g, P.sd4__tilde__, P.sd4 ],
color = [ 'T(1,3,2)' ],
lorentz = [ L.VSS1, L.VSS3 ],
couplings = {(0,0):C.GC_283,(0,1):C.GC_284})
V_1117 = Vertex(name = 'V_1117',
particles = [ P.go, P.s, P.sd5__tilde__ ],
color = [ 'T(1,2,3)' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_305})
V_1118 = Vertex(name = 'V_1118',
particles = [ P.g, P.sd5__tilde__, P.sd5 ],
color = [ 'T(1,3,2)' ],
lorentz = [ L.VSS1, L.VSS3 ],
couplings = {(0,0):C.GC_308,(0,1):C.GC_309})
V_1119 = Vertex(name = 'V_1119',
particles = [ P.go, P.b, P.sd6__tilde__ ],
color = [ 'T(1,2,3)' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_332})
V_1120 = Vertex(name = 'V_1120',
particles = [ P.g, P.sd6__tilde__, P.sd6 ],
color = [ 'T(1,3,2)' ],
lorentz = [ L.VSS1, L.VSS3 ],
couplings = {(0,0):C.GC_335,(0,1):C.GC_336})
V_1121 = Vertex(name = 'V_1121',
particles = [ P.go, P.u, P.su1__tilde__ ],
color = [ 'T(1,2,3)' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_556})
V_1122 = Vertex(name = 'V_1122',
particles = [ P.g, P.su1__tilde__, P.su1 ],
color = [ 'T(1,3,2)' ],
lorentz = [ L.VSS1, L.VSS3 ],
couplings = {(0,0):C.GC_559,(0,1):C.GC_560})
V_1123 = Vertex(name = 'V_1123',
particles = [ P.go, P.c, P.su2__tilde__ ],
color = [ 'T(1,2,3)' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_607})
V_1124 = Vertex(name = 'V_1124',
particles = [ P.g, P.su2__tilde__, P.su2 ],
color = [ 'T(1,3,2)' ],
lorentz = [ L.VSS1, L.VSS3 ],
couplings = {(0,0):C.GC_611,(0,1):C.GC_612})
V_1125 = Vertex(name = 'V_1125',
particles = [ P.go, P.t, P.su3__tilde__ ],
color = [ 'T(1,2,3)' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_661})
V_1126 = Vertex(name = 'V_1126',
particles = [ P.g, P.su3__tilde__, P.su3 ],
color = [ 'T(1,3,2)' ],
lorentz = [ L.VSS1, L.VSS3 ],
couplings = {(0,0):C.GC_665,(0,1):C.GC_666})
V_1127 = Vertex(name = 'V_1127',
particles = [ P.go, P.u, P.su4__tilde__ ],
color = [ 'T(1,2,3)' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_721})
V_1128 = Vertex(name = 'V_1128',
particles = [ P.g, P.su4__tilde__, P.su4 ],
color = [ 'T(1,3,2)' ],
lorentz = [ L.VSS1, L.VSS3 ],
couplings = {(0,0):C.GC_725,(0,1):C.GC_726})
V_1129 = Vertex(name = 'V_1129',
particles = [ P.go, P.c, P.su5__tilde__ ],
color = [ 'T(1,2,3)' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_765})
V_1130 = Vertex(name = 'V_1130',
particles = [ P.g, P.su5__tilde__, P.su5 ],
color = [ 'T(1,3,2)' ],
lorentz = [ L.VSS1, L.VSS3 ],
couplings = {(0,0):C.GC_769,(0,1):C.GC_770})
V_1131 = Vertex(name = 'V_1131',
particles = [ P.go, P.t, P.su6__tilde__ ],
color = [ 'T(1,2,3)' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_811})
V_1132 = Vertex(name = 'V_1132',
particles = [ P.g, P.su6__tilde__, P.su6 ],
color = [ 'T(1,3,2)' ],
lorentz = [ L.VSS1, L.VSS3 ],
couplings = {(0,0):C.GC_815,(0,1):C.GC_816})
V_1133 = Vertex(name = 'V_1133',
particles = [ | |
corresponding element of data.
'''
r = data[:, :, red].astype(float)
if len(r.shape) == 3 and r.shape[2] > 1:
r = sum(r, 2) / r.shape[2]
n = data[:, :, nir].astype(float)
if len(n.shape) == 3 and n.shape[2] > 1:
n = sum(n, 2) / n.shape[2]
return (n - r) / (n + r)
def bdist(class1, class2):
'''
Calulates the Bhattacharyya distance between two classes.
USAGE: bd = bdist(class1, class2)
Arguments:
`class1`, `class2` (:class:`~spectral.algorithms.algorithms.TrainingClass`)
Returns:
A float value for the Bhattacharyya Distance between the classes. This
function is aliased to :func:`~spectral.algorithms.algorithms.bDistance`.
References:
<NAME>. & <NAME>. Remote Sensing Digital Image Analysis: An
Introduction. (Springer: Berlin, 1999).
'''
terms = bdist_terms(class1, class2)
return terms[0] + terms[1]
bDistance = bdist
def bdist_terms(a, b):
'''
Calulate the linear and quadratic terms of the Bhattacharyya distance
between two classes.
USAGE: (linTerm, quadTerm) = bDistanceTerms(a, b)
ARGUMENTS:
(a, b) The classes for which to determine the
B-distance.
RETURN VALUE:
A 2-tuple of the linear and quadratic terms
'''
m = a.stats.mean - b.stats.mean
avgCov = (a.stats.cov + b.stats.cov) / 2
lin_term = (1 / 8.) * np.dot(np.transpose(m), np.dot(np.inv(avgCov), m))
quad_term = 0.5 * (log_det(avgCov)
- 0.5 * a.stats.log_det_cov
- 0.5 * b.stats.log_det_cov)
return (lin_term, float(quad_term))
def transform_image(matrix, image):
'''
Performs linear transformation on all pixels in an image.
Arguments:
matrix (:class:`numpy.ndarray`):
A `CxB` linear transform to apply.
image (:class:`numpy.ndarray` or :class:`spectral.Image`):
Image data to transform
Returns:
If `image` is an `MxNxB` :class:`numpy.ndarray`, the return will be a
transformed :class:`numpy.ndarray` with shape `MxNxC`. If `image` is
:class:`spectral.Image`, the returned object will be a
:class:`spectral.TransformedImage` object and no transformation of data
will occur until elements of the object are accessed.
'''
if isinstance(image, SpyFile):
return TransformedImage(matrix, image)
elif isinstance(image, np.ndarray):
(M, N, B) = image.shape
ximage = np.zeros((M, N, matrix.shape[0]), float)
for i in range(M):
for j in range(N):
ximage[i, j] = np.dot(matrix, image[i, j].astype(float))
return ximage
else:
raise 'Unrecognized image type passed to transform_image.'
def orthogonalize(vecs, start=0):
'''
Performs Gram-Schmidt Orthogonalization on a set of vectors.
Arguments:
`vecs` (:class:`numpy.ndarray`):
The set of vectors for which an orthonormal basis will be created.
If there are `C` vectors of length `B`, `vecs` should be `CxB`.
`start` (int) [default 0]:
If `start` > 0, then `vecs[start]` will be assumed to already be
orthonormal.
Returns:
A new `CxB` containing an orthonormal basis for the given vectors.
'''
(M, N) = vecs.shape
basis = np.array(np.transpose(vecs))
eye = np.identity(N).astype(float)
for i in range(start, M):
if i == 0:
basis[:, 0] /= np.linalg.norm(basis[:, 0])
continue
v = basis[:, i] / np.linalg.norm(basis[:, i])
U = basis[:, :i]
P = eye - U.dot(np.linalg.inv(U.T.dot(U)).dot(U.T))
basis[:, i] = P.dot(v)
basis[:, i] /= np.linalg.norm(basis[:, i])
return np.transpose(basis)
def unmix(data, members):
'''
Perform linear unmixing on image data.
USAGE: mix = unmix(data, members)
ARGUMENTS:
data The MxNxB image data to be unmixed
members An CxB array of C endmembers
RETURN VALUE:
mix An MxNxC array of endmember fractions.
unmix performs linear unmixing on the image data. After calling the
function, mix[:,:,i] will then represent the fractional abundances
for the i'th endmember. If the result of unmix is returned into 'mix',
then an array of indices of greatest fractional endmembers is obtained
by argmax(mix).
Note that depending on endmembers given, fractional abundances for
endmembers may be negative.
'''
assert members.shape[1] == data.shape[2], \
'Matrix dimensions are not aligned.'
members = members.astype(float)
# Calculate the pseudo inverse
pi = np.dot(members, np.transpose(members))
pi = np.dot(np.inv(pi), members)
(M, N, B) = data.shape
unmixed = np.zeros((M, N, members.shape[0]), float)
for i in range(M):
for j in range(N):
unmixed[i, j] = np.dot(pi, data[i, j].astype(float))
return unmixed
def spectral_angles(data, members):
'''Calculates spectral angles with respect to given set of spectra.
Arguments:
`data` (:class:`numpy.ndarray` or :class:`spectral.Image`):
An `MxNxB` image for which spectral angles will be calculated.
`members` (:class:`numpy.ndarray`):
`CxB` array of spectral endmembers.
Returns:
`MxNxC` array of spectral angles.
Calculates the spectral angles between each vector in data and each of the
endmembers. The output of this function (angles) can be used to classify
the data by minimum spectral angle by calling argmin(angles).
'''
assert members.shape[1] == data.shape[2], \
'Matrix dimensions are not aligned.'
m = np.array(members, np.float64)
m /= np.sqrt(np.einsum('ij,ij->i', m, m))[:, np.newaxis]
norms = np.sqrt(np.einsum('ijk,ijk->ij', data, data))
dots = np.einsum('ijk,mk->ijm', data, m)
dots = np.clip(dots / norms[:, :, np.newaxis], -1, 1)
return np.arccos(dots)
def msam(data, members):
'''Modified SAM scores according to Oshigami, et al [1]. Endmembers are
mean-subtracted prior to spectral angle calculation. Results are
normalized such that the maximum value of 1 corresponds to a perfect match
(zero spectral angle).
Arguments:
`data` (:class:`numpy.ndarray` or :class:`spectral.Image`):
An `MxNxB` image for which spectral angles will be calculated.
`members` (:class:`numpy.ndarray`):
`CxB` array of spectral endmembers.
Returns:
`MxNxC` array of MSAM scores with maximum value of 1 corresponding
to a perfect match (zero spectral angle).
Calculates the spectral angles between each vector in data and each of the
endmembers. The output of this function (angles) can be used to classify
the data by minimum spectral angle by calling argmax(angles).
References:
[1] <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, and
<NAME>. 2013. Mineralogical mapping of southern Namibia by application
of continuum-removal MSAM method to the HyMap data. Int. J. Remote Sens.
34, 15 (August 2013), 5282-5295.
'''
# The modifications to the `spectral_angles` function were contributed by
# <NAME>.
assert members.shape[1] == data.shape[2], \
'Matrix dimensions are not aligned.'
(M, N, B) = data.shape
m = np.array(members, np.float64)
C = m.shape[0]
# Normalize endmembers
for i in range(C):
# Fisher z trafo type operation
m[i] -= np.mean(m[i])
m[i] /= np.sqrt(m[i].dot(m[i]))
angles = np.zeros((M, N, C), np.float64)
for i in range(M):
for j in range(N):
#Fisher z trafo type operation
v = data[i, j] - np.mean(data[i, j])
v /= np.sqrt(v.dot(v))
v = np.clip(v, -1, 1)
for k in range(C):
# Calculate Mineral Index according to Oshigami et al.
# (Intnl. J. of Remote Sens. 2013)
a = np.clip(v.dot(m[k]), -1, 1)
angles[i,j,k]= 1.0 - np.arccos(a) / (math.pi / 2)
return angles
def noise_from_diffs(X, direction='lowerright'):
'''Estimates noise statistcs by taking differences of adjacent pixels.
Arguments:
`X` (np.ndarray):
The data from which to estimage noise statistics. `X` should have
shape `(nrows, ncols, nbands`).
`direction` (str, default "lowerright"):
The pixel direction along which to calculate pixel differences.
Must be one of the following:
'lowerright':
Take difference with pixel diagonally to lower right
'lowerleft':
Take difference with pixel diagonally to lower right
'right':
Take difference with pixel to the right
'lower':
Take differenece with pixel below
Returns a :class:`~spectral.algorithms.algorithms.GaussianStats` object.
'''
if direction.lower() not in ['lowerright', 'lowerleft', 'right', 'lower']:
raise ValueError('Invalid `direction` value.')
if direction == 'lowerright':
deltas = X[:-1, :-1, :] - X[1:, 1:, :]
elif direction == 'lowerleft':
deltas = X[:-1, 1:, :] - X[1:, :-1, :]
elif direction == 'right':
deltas = X[:, :-1, :] - X[:, 1:, :]
else:
deltas = X[:-1, :, :] - X[1:, :, :]
stats = calc_stats(deltas)
stats.cov /= 2.0
return stats
class MNFResult(object):
'''Result object returned by :func:`~spectral.algorithms.algorithms.mnf`.
This object contains data associates with a Minimum Noise Fraction
calculation, including signal and noise statistics, as well as the
Noise-Adjusted Principal Components (NAPC). This object can be used to
denoise image data or to reduce its dimensionality.
'''
def __init__(self, signal, noise, napc):
'''
Arguments:
`signal` (:class:`~spectral.GaussianStats`):
Signal statistics
`noise` (:class:`~spectral.GaussianStats`):
Noise statistics
`napc` (:class:`~spectral.PrincipalComponents`):
Noise-Adjusted Pricipal Components
'''
self.signal = signal
self.noise = noise
self.napc = napc
def _num_from_kwargs(self, **kwargs):
'''Returns number of components to retain for the given kwargs.'''
for key in kwargs:
if key not in ('num', 'snr'):
raise Exception('Keyword not recognized.')
num = kwargs.get('num', None)
snr = kwargs.get('snr', None)
if num == snr == None:
raise Exception('Must specify either `num` or `snr` keyword.')
if None not in (num, snr):
raise Exception('Can not specify both `num` and `snr` keywords.')
if snr is not None:
num = self.num_with_snr(snr)
return num
def denoise(self, X, **kwargs):
'''Returns a de-noised version | |
from typing import Tuple
from typing import Union
import numpy as np
from sklearn.utils.validation import check_is_fitted, check_array
from . import LVQBaseClass
from ..objectives import GeneralizedLearningObjective
ModelParamsType = Tuple[np.ndarray, np.ndarray]
DISTANCES = [
"adaptive-squared-euclidean",
]
SOLVERS = [
"adaptive-moment-estimation",
"broyden-fletcher-goldfarb-shanno",
"limited-memory-bfgs",
"steepest-gradient-descent",
"waypoint-gradient-descent",
]
class GMLVQ(LVQBaseClass):
r"""Generalized Matrix Learning Vector Quantization
This model uses the :class:`sklvq.objectives.GeneralizedLearningObjective` as its objective
function `[1]`_. In addition to learning the positions of the prototypes it learns a relevance
matrix that is used in the distance functions `[2]`_.
Parameters
----------
distance_type : {"adaptive-squared-euclidean"} or Class, default="squared-euclidean"
Distance function that employs a relevance matrix in its calculation.
- "adaptive-squared-euclidean"
See :class:`sklvq.distances.AdaptiveSquaredEuclidean`
distance_params : Dict, default=None
Parameters passed to init of distance callable
activation_type : {"identity", "sigmoid", "soft+", "swish"} or Class, default="sigmoid"
Parameters passed to init of activation function. See the documentation of the activation
functions for parameters and defaults.
- "identity"
See :class:`sklvq.activations.Identity`
- "sigmoid"
See :class:`sklvq.activations.Sigmoid`
- "soft+"
See :class:`sklvq.activations.SoftPlus`
- "swish"
See :class:`sklvq.activations.Swish`
activation_params : Dict, default=None
Parameters passed to init of activation function. See the documentation of activation
functions for function dependent parameters and defaults.
discriminant_type : {"relative-distance"} or Class, default = "relative-distance"
The discriminant function. Note that different discriminant type may require to rewrite
the ``decision_function`` and ``predict_proba`` methods.
- "relative-distance"
See :class:`sklvq.discriminants.RelativeDistance`
discriminant_params : Dict, default=None
Parameters passed to init of discriminant callable. See the documentation of the
discriminant functions for parameters and defaults.
solver_type : {"sgd", "wgd", "adam", "lbfgs", "bfgs"},
The solver used for optimization
- "sgd" or "steepest-gradient-descent"
See :class:`sklvq.solvers.SteepestGradientDescent`.
- "wgd" or "waypoint-gradient-descent"
See :class:`sklvq.solvers.WaypointGradientDescent`.
- "adam" or "adaptive-moment-estimation"
See :class:`sklvq.solvers.AdaptiveMomentEstimation`.
- "bfgs" or "broyden-fletcher-goldfarb-shanno"
Implementation from scipy package.
- "lbfgs" or "limited-memory-bfgs"
Implementation from scipy package.
solver_params : dict, default=None
Parameters passed to init of solvers. See the documentation of the solvers relevant
parameters and defaults.
prototype_init: "class-conditional-mean" or ndarray, default="class-conditional-mean"
Default will initiate the prototypes to the class conditional mean with a small random
offset. Custom numpy array can be passed to change the initial positions of the prototypes.
prototype_n_per_class: int or np.ndarray, optional, default=1
Default will generate single prototype per class. In the case of unequal number of
prototypes per class is needed, provide this as np.ndarray. For example,
prototype_n_per_class = np.array([1, 6, 3]) this will result in one prototype for the first class,
six for the second, and three for the third. Note that the order needs to be the same as the on in the
classes\_ attribute, which is equal to calling np.unique(labels).
relevance_init : {"identity", "random"} or np.ndarray, default="identity"
Default will initiate the omega matrices to be the identity matrix. The rank of the matrix can be reduced by
setting the ``relevance_n_components`` attribute `[3]`_.
relevance_normalization: bool, optional, default=True
Flag to indicate whether to normalize omega, whenever it is updated, such that the trace of the relevance matrix
is equal to 1.
relevance_n_components: str {"all"} or int, optional, default="all"
For a square relevance matrix use the string "all" (default). For a rectangular relevance matrix use set the
number of components explicitly by providing it as an int.
random_state : int, RandomState instance, default=None
Set the random number generation for reproducibility purposes. Used in random offset of prototypes and
shuffling of the data in the solvers. Potentially, also used in the random generation of relevance matrix.
force_all_finite : {True, "allow-nan"}, default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The possibilities are:
- True: Force all values of array to be finite.
- "allow-nan": accepts only np.nan and pd.NA values in array. Values cannot be infinite.
Attributes
----------
classes_ : ndarray of shape (n_classes,)
Class labels for each output.
prototypes_ : ndarray of shape (n_protoypes, n_features)
Positions of the prototypes after ``fit(X, labels)`` has been called.
prototypes_labels_ : ndarray of shape (n_prototypes)
Labels for each prototypes. Labels are indexes to ``classes_``
omega_: ndarray with size depending on initialization, default (n_features, n_features)
Omega matrix that was found during training and defines the relevance matrix ``lambda_``.
lambda_: ndarray of size (n_features, n_features)
The relevance matrix ``omega_.T.dot(omega_)``
omega_hat_: ndarray
The omega matrix found by the eigenvalue decomposition of the relevance matrix ``lambda_``.
The eigenvectors (columns of ``omega_hat_``) can be used to transform the X `[3]`_.
eigenvalues_: ndarray
The corresponding eigenvalues to ``omega_hat_`` found by the eigenvalue decomposition of
the relevance matrix ``lambda_``
References
----------
_`[1]` <NAME>., and <NAME>. (1996) "Generalized Learning Vector Quantization."
Advances in Neural Network Information Processing Systems, 423–429, 1996.
_`[2]` <NAME>., <NAME>., & <NAME>. (2009). "Adaptive Relevance Matrices in
Learning Vector Quantization" Neural Computation, 21(12), 3532–3561, 2009.
_`[3]` <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2012).
"Limited Rank Matrix Learning, discriminative dimension reduction and visualization." Neural
Networks, 26, 159–173, 2012."""
classes_: np.ndarray
prototypes_: np.ndarray
prototypes_labels_: np.ndarray
omega_: np.ndarray
lambda_: np.ndarray
omega_hat_: np.ndarray
eigenvalues_: np.ndarray
def __init__(
self,
distance_type: Union[str, type] = "adaptive-squared-euclidean",
distance_params: dict = None,
activation_type: Union[str, type] = "sigmoid",
activation_params: dict = None,
discriminant_type: Union[str, type] = "relative-distance",
discriminant_params: dict = None,
solver_type: Union[str, type] = "steepest-gradient-descent",
solver_params: dict = None,
prototype_init: Union[str, np.ndarray] = "class-conditional-mean",
prototype_n_per_class: Union[int, np.ndarray] = 1,
relevance_init="identity",
relevance_normalization: bool = True,
relevance_n_components: Union[str, int] = "all",
relevance_regularization: Union[int, float] = 0,
random_state: Union[int, np.random.RandomState] = None,
force_all_finite: Union[str, bool] = True,
):
self.activation_type = activation_type
self.activation_params = activation_params
self.discriminant_type = discriminant_type
self.discriminant_params = discriminant_params
self.relevance_init = relevance_init
self.relevance_normalization = relevance_normalization
self.relevance_n_components = relevance_n_components
self.relevance_regularization = relevance_regularization
super(GMLVQ, self).__init__(
distance_type,
distance_params,
DISTANCES,
solver_type,
solver_params,
SOLVERS,
prototype_init,
prototype_n_per_class,
random_state,
force_all_finite,
)
###########################################################################################
# The "Getter" and "Setter" that are used by the solvers to set and get model params.
###########################################################################################
def set_variables(self, new_variables: np.ndarray) -> None:
"""
Modifies the ``self._variables`` by copying the values of ``new_variables`` into the
memory of ``self._variables``.
Parameters
----------
new_variables : ndarray
1d numpy array that contains all the model parameters in continuous memory
"""
np.copyto(self._variables, new_variables)
if self.relevance_normalization:
GMLVQ._normalise_omega(self.omega_)
def set_model_params(self, new_model_params: ModelParamsType):
"""
Changes the model's internal parameters. Copies the values of model_params into
``self.prototypes_`` and ``self.omega_`` therefor updating the ``self.variables_``
array.
Also normalized the relevance matrix if necessary.
Parameters
----------
new_model_params : tuple of ndarrays
Shapes depend on initialization but in the case of a square relevance matrix:
tuple((n_prototypes, n_features), (n_features, n_features))
"""
new_prototypes, new_omega = new_model_params
self.set_prototypes(new_prototypes)
self.set_omega(new_omega)
if self.relevance_normalization:
GMLVQ._normalise_omega(self.omega_)
def get_model_params(self) -> ModelParamsType:
"""
Returns a tuple of all model parameters. In this case the prototypes and omega matrix.
Returns
-------
ndarray
Returns a tuple of views, i.e., the prototypes and omega matrix.
"""
return self.prototypes_, self.omega_
###########################################################################################
# Specific "getters" and "setters" for GMLVQ
###########################################################################################
def get_omega(self):
"""
Convenience function to return ``self.omega_``
Returns
-------
ndarray, with shape depending on initialization of omega.
"""
return self.omega_
def set_omega(self, omega):
"""
Convenience function that makes sure to copy the value to ``self.omega_`` and not overwrite
it.
Parameters
----------
omega : ndarray with same shape as ``self.omega_``
"""
np.copyto(self.omega_, omega)
###########################################################################################
# Functions to transform the 1D variables array to model parameters and back
###########################################################################################
def to_model_params_view(self, var_buffer: np.ndarray) -> ModelParamsType:
"""
Parameters
----------
var_buffer : ndarray
Array with the same size as the model's variables array as returned
by ``get_variables()``.
Returns
-------
tuple
Returns a tuple with the prototypes and omega matrix as ndarrays.
"""
return (
self.to_prototypes_view(var_buffer),
self.to_omega(var_buffer),
)
def to_prototypes_view(self, var_buffer: np.ndarray) -> np.ndarray:
"""
Returns a view (of the shape of the model's prototypes) into the provided variables
buffer of the same size as the model's variables array.
Parameters
----------
var_buffer : ndarray
Array with the same size as the model's variables array as returned
by ``get_variables()``.
Returns
-------
ndarray of shape (n_prototypes, n_features)
Prototype view into the var_buffer.
"""
return var_buffer[: self._prototypes_size].reshape(self._prototypes_shape)
def to_omega(self, var_buffer: np.ndarray) -> np.ndarray:
"""
Returns a view (of the shape of the model's omega) into the provided variables
buffer of the same size as the model's variables array.
Parameters
----------
var_buffer : ndarray
Array with the same size as | |
<gh_stars>0
import setuptools
import distutils.command.build
from distutils.errors import DistutilsSetupError
import distutils.sysconfig
import distutils.spawn
import hashlib
import os
import re
import shutil
import StringIO
import sys
import tarfile
import tempfile
import urllib2
import urlparse
import zipfile
#
# Things that need to be built
#
# Ilastik
# vigra-numpy
# libhdf5 - so that CMake has an installation of it
# zlib
# szip
# boost
#
# Things that need to be installed
# QT
#
is_win = sys.platform.startswith('win')
if is_win:
from distutils.msvc9compiler import get_build_version
lib_ext = "lib"
dll_ext = "dll"
build_version = get_build_version()
toolset = "vc%d" % (int(build_version) * 10)
else:
lib_ext = "so"
dll_ext = "so"
toolset = None
class BuildWithCMake(setuptools.Command):
user_options = [
("cmake", None, "Location of CMake executables"),
("install-dir", None, "Package install directory")
]
def initialize_options(self):
self.build_lib = None
self.cmake = None
self.source_dir = None
self.target_dir = None
self.src_command = None
self.extra_cmake_options = []
self.install_dir = None
self.install_root = None
self.do_install = True
def finalize_options(self):
self.set_undefined_options(
'build', ('build_lib', 'build_lib'))
self.set_undefined_options('build', ('cmake', 'cmake'))
if self.cmake is None and is_win:
path = r"C:\Program Files (x86)\CMake\bin"
if os.path.exists(path):
self.cmake = os.path.join(path, "cmake")
else:
for path in os.environ["PATH"].split(";"):
cmake_path = os.path.join(path, "cmake.exe")
if os.path.exists(cmake_path):
self.cmake = cmake_path
break
else:
raise distutils.command.build.DistutilsOptionError(
"CMake is not installed in the default location and --cmake not specified")
elif self.cmake is None:
self.cmake = "cmake"
if self.source_dir is None:
self.set_undefined_options(
self.src_command, ("source_dir", "source_dir"))
root, leaf = os.path.split(self.source_dir)
if self.target_dir is None:
self.target_dir = os.path.join(root, "tmp", leaf)
if self.install_root is None:
self.install_root = os.path.abspath(
os.path.join(root, "install", leaf))
if self.install_dir is None:
if is_win:
self.install_dir = self.install_root
else:
self.install_dir = os.path.join(
self.install_root, "usr", "local")
def get_sub_commands(self):
if os.path.exists(self.source_dir):
return []
return [self.src_command]
def get_cmake_generator(self):
if is_win:
return "NMake Makefiles"
else:
return "Unix Makefiles"
def get_make_program(self):
if is_win:
return "nmake"
return "make"
def run(self):
cmake_args = [self.cmake]
cmake_args += ["-G", self.get_cmake_generator()]
if self.do_install and is_win:
cmake_args.append(
'"-DCMAKE_INSTALL_PREFIX:PATH=%s"' %
os.path.abspath(self.install_root))
target_dir = os.path.abspath(self.target_dir)
if is_win:
cmake_args.append('-DCMAKE_BUILD_TYPE:STRING="Release"')
cmake_args += self.extra_cmake_options
if not os.path.exists(self.target_dir):
os.makedirs(self.target_dir)
# I don't like changing directories. I can't see any way to make
# cmake build its makefiles in another directory
old_dir = os.path.abspath(os.curdir)
source_dir = os.path.abspath(self.source_dir)
cmake_args.append(source_dir)
os.chdir(target_dir)
try:
try:
self.spawn(cmake_args)
except SystemExit:
logfile = os.path.join("CMakeFiles", "CMakeError.log")
with open(logfile, "r") as fd:
for line in fd:
self.announce(line)
raise
os.chdir(target_dir)
self.spawn([self.get_make_program()])
if self.do_install:
if is_win:
self.spawn([self.get_make_program(), "install"])
else:
self.spawn([self.get_make_program(),
"DESTDIR=%s" % os.path.abspath(self.install_root),
"install"])
finally:
os.chdir(old_dir)
class BuildWithNMake(setuptools.Command):
user_options = []
def initialize_options(self):
self.source_dir = None
self.src_command = None
self.makefile = None
def finalize_options(self):
if self.source_dir is None:
self.set_undefined_options(self.src_command,
('source_dir', 'source_dir'))
if self.makefile is None:
self.makefile = "Makefile"
def run(self):
old_cwd = os.path.abspath(os.curdir)
os.chdir(self.source_dir)
try:
self.spawn(["nmake", "-f", self.makefile])
finally:
os.chdir(old_cwd)
class FetchSource(setuptools.Command, object):
'''Download and untar a tarball or zipfile
interesting configurable attributes:
package_name - the name of the package, used to provide defaults for
other stuff. Defaults to
self.get_command_name().rpartition("_")[-1] (e.g.
"fetch_foo" has a default package name of "foo")
version - the version of the package to be fetched.
full_name - the full name of the source, defaults to
"{package_name}-{version}"
url - the download source. FetchSource untars based on the extension.
The url is parameterizable using .format(d) where d is a dictionary
containing the package name, full name and version. For instance,
"http://my.org/package-{version}.tar.gz" will be parameterizable
by the version attribute. The default URL assumes that the
package name is both the owner and repo name of a Github repo
and that the version is tagged.
unpack_dir - where to unpack the tarball, relative to the build library
directory. Defaults to package name
source_dir - where the source unpacks to. Defaults to fullname.
tarball_source_dir - where the tarball unpacks the source. This defaults
to the source directory, but FetchSource will move it
if not.
post_fetch - a callable object to be run after the source has been downloaded
and untarred, e.g. to apply a patch. Called with the command
as the single argument
member_filter - a function that evaluates a path in the tarball and returns
True only if the associated member should be untarred.
'''
user_options = [
( 'package-name', None, 'Name of the package being fetched' ),
( 'github-owner', None, 'Name of the Github owner organization for the repo'),
( 'full-name', None, "Package name + version" ),
( 'version' , None, 'Revision # of the package' ),
( 'url', None, 'URL to download the package' ),
( 'unpack-dir', None, 'Where to unpack the source' ),
( 'source-dir', None, 'Where the package will be after unpacking'),
( 'tarball-source-dir', None, 'The top-level directory of the tarball'),
( 'post-fetch', None, 'Callable to run after unpacking' ),
( 'member-filter', None, 'Function to filter tarball members' )
]
def initialize_options(self):
#
# attributes fetched from build command
#
self.build_lib = None
#
# command attributes
#
self.package_name = None
self.github_owner = None
self.full_name = None
self.version = None
self.url = None
self.unpack_dir = None
self.source_dir = None
self.tarball_source_dir = None
self.post_fetch = None
self.member_filter = None
def finalize_options(self):
self.set_undefined_options(
'build', ('build_lib', 'build_lib'))
if self.package_name is None:
# "fetch_foo" has a default package name of "foo"
for key, value in self.distribution.command_obj.iteritems():
if value == self:
self.package_name = key.rpartition("_")[-1]
break
else:
raise DistutilsSetupError(
"package-name must be defined")
if self.github_owner is None:
self.github_owner = self.package_name
if self.version is None and self.full_name is None:
raise DistutilsSetupError(
"Either one of or both the version and full_name must be defined")
elif self.full_name is None:
self.full_name = "{package_name}-{version}".format(**self.__dict__)
else:
self.full_name = self.full_name.format(**self.__dict__)
if self.url is None and self.version is None:
raise DistutilsSetupError(
"Setup script must define this command's url")
elif self.url is None:
self.url = "https://github.com/{github_owner}/{package_name}/archive/{version}.tar.gz"
self.url = self.url.format(**self.__dict__)
if self.unpack_dir is None:
self.unpack_dir = os.path.join(
self.build_lib, self.package_name)
else:
self.unpack_dir = self.unpack_dir.format(**self.__dict__)
if self.source_dir is None:
self.source_dir = os.path.join(
self.unpack_dir, self.full_name)
else:
self.source_dir = self.source_dir.format(**self.__dict__)
if self.tarball_source_dir is None:
self.tarball_source_dir = self.source_dir
def run(self):
import requests
self.announce("Fetching " + self.url)
up = urlparse.urlparse(self.url)
target = os.path.join(os.path.dirname(self.source_dir),
up.path.rpartition('/')[-1])
if not os.path.exists(self.source_dir):
os.makedirs(self.source_dir)
if up.scheme == 'ftp':
fdsrc = urllib2.urlopen(self.url)
with open(target, "wb") as fd:
while True:
data = fdsrc.read()
if len(data) == 0:
break
fd.write(data)
else:
request = requests.get(self.url, stream=True)
with open(target, "wb") as fd:
for chunk in request.iter_content(chunk_size = 65536):
fd.write(chunk)
members = None
if target.lower().endswith(".zip"):
tarball = zipfile.ZipFile(target)
if self.member_filter is not None:
members = filter(self.member_filter, tarball.namelist)
else:
tarball = tarfile.open(target)
if self.member_filter is not None:
def filter_fn(member, name_filter = self.member_filter):
return name_filter(member.name)
members = filter(filter_fn, tarball.getmembers())
tarball.extractall(self.unpack_dir, members = members)
tarball.close()
tarball_source_dir = os.path.join(
self.unpack_dir, self.tarball_source_dir)
if self.source_dir != self.tarball_source_dir:
if os.path.isdir(self.source_dir):
shutil.rmtree(self.source_dir)
shutil.move(tarball_source_dir, self.source_dir)
if self.post_fetch is not None:
self.post_fetch(self)
class BuildLibhdf5(BuildWithCMake):
def initialize_options(self):
BuildWithCMake.initialize_options(self)
self.zlib_install_dir = None
self.szip_install_dir = None
self.zlib_source_dir = None
self.szip_source_dir = None
self.zlib_make_dir = None
self.szip_make_dir = None
def finalize_options(self):
BuildWithCMake.finalize_options(self)
self.set_undefined_options(
'build_zlib',
('install_dir', 'zlib_install_dir'),
('target_dir', 'zlib_make_dir'),
('source_dir', 'zlib_source_dir'))
self.set_undefined_options(
'build_szip',
('install_dir', 'szip_install_dir'),
('target_dir', 'szip_make_dir'),
('source_dir','szip_source_dir'))
if is_win:
szip_lib = 'szip.' + lib_ext
zlib_lib = 'zlib.' + lib_ext
else:
szip_lib = 'libszip.' + lib_ext
zlib_lib = 'libz.' + lib_ext
for varname, cmake_type, install_dir, folder in (
("SZIP_LIBRARY_RELEASE", "FILEPATH",
self.szip_install_dir, os.path.join("lib", szip_lib)),
("SZIP_DIR", "PATH", self.szip_make_dir, None),
("SZIP_INCLUDE_DIR", "PATH", self.szip_install_dir, "include"),
("ZLIB_DIR", "PATH", self.zlib_make_dir, None),
("ZLIB_INCLUDE_DIR", "PATH", self.zlib_install_dir, "include"),
("ZLIB_LIBRARY_RELEASE", "FILEPATH",
self.zlib_install_dir, os.path.join("lib", zlib_lib))):
if folder is not None:
path = os.path.abspath(os.path.join(install_dir, folder))
else:
path = os.path.abspath(install_dir)
self.extra_cmake_options.append(
"\"-D{varname}:{cmake_type}={path}\"".format(**locals()))
class BuildH5Py(setuptools.Command):
user_options = [("hdf5", None, "Location of libhdf5 install")]
command_name = "build_h5py"
def initialize_options(self):
self.hdf5 = None
self.source_dir = None
self.temp_dir = None
self.szip_install_dir = None
self.zlib_install_dir = None
def finalize_options(self):
if self.hdf5 is None:
self.set_undefined_options(
'build_libhdf5', ('install_dir', 'hdf5'))
if self.szip_install_dir is None:
self.set_undefined_options(
'build_szip', ('install_dir', 'szip_install_dir'))
if self.zlib_install_dir is None:
self.set_undefined_options(
'build_zlib', ('install_dir', 'zlib_install_dir'))
if self.source_dir is None:
self.set_undefined_options(
'fetch_h5py', ('source_dir', 'source_dir'))
if self.temp_dir is None:
self.temp_dir = os.path.join(os.path.dirname(self.source_dir), "tmp")
def run(self):
hdf5 = os.path.abspath(self.hdf5)
for directory, ext in (('bin', 'dll'), ('lib', 'lib')):
hdf5_dll = os.path.join(self.hdf5, directory, "hdf5."+ext)
hdf5_hl_dll = os.path.join(self.hdf5, directory, "hdf5_hl."+ext)
szip_dll = os.path.join(
self.szip_install_dir, directory, "szip."+ext)
zlib_dll = os.path.join(
self.zlib_install_dir, directory, "zlib."+ext)
for src, destfile in ((hdf5_dll, "h5py_hdf5."+ext),
(hdf5_hl_dll, "h5py_hdf5_hl."+ext),
(szip_dll, "szip.dll"+ext),
(zlib_dll, "zlib.dll"+ext)):
dest = os.path.join(self.hdf5, directory, destfile)
self.copy_file(src, dest)
old_curdir = os.path.abspath(os.curdir)
os.chdir(os.path.abspath(self.source_dir))
try:
self.spawn([
| |
while performing the operations are not handled.
see: config.backed_up
filename: Name of the file to create
content: list of strings to store into 'filename'
which: Kind of backup: 'wxg' or 'codegen'"""
if which == 'wxg':
content = [line.encode('utf-8') for line in content] # encode from unicode to utf-8
do_backup = config.preferences.wxg_backup
elif which == 'codegen':
do_backup = config.preferences.codegen_backup
else:
raise NotImplementedError( 'Unknown value "%s" for parameter "which"!' % which )
if os.path.isfile(filename):
# read existing file to check content
chksum_oldcontent = _smart_checksum( _read_file(filename) )
# nothing changed?
chksum_content = _smart_checksum(content)
if chksum_oldcontent == chksum_content:
return
# create the backup file only with the first save
need_backup = do_backup and filename not in config.backed_up and os.path.isfile(filename)
outfile = None
try:
if which=="codegen":
if os.path.isfile(filename):
with open(filename, 'rb') as infile:
win_line_ending = infile.readline().endswith(b"\r\n")
else:
win_line_ending = sys.platform.startswith("win")
if need_backup:
backup_name = filename + config.preferences.backup_suffix
if os.path.isfile(backup_name):
os.remove(backup_name)
os.rename(filename, backup_name)
config.backed_up[filename] = True
# create necessary subdirectories on demand
directory = os.path.dirname(filename)
if directory and not os.path.isdir(directory):
os.makedirs(directory)
outfile = open(filename, 'wb')
for line in content:
if which=="codegen" and win_line_ending:
line = line.replace(b"\n", b"\r\n")
outfile.write(line)
outfile.close()
finally:
if outfile:
outfile.close()
########################################################################################################################
# files and paths
def get_name_for_autosave(filename=None):
"Return filename for the automatic backup of named file or current file (root.filename)"
if not filename:
filename = root.filename
if not filename:
path, name = config.home_path, ""
else:
path, name = os.path.split(filename)
ret = os.path.join(path, "#~wxg.autosave~%s#" % name)
return ret
class _Writer(object):
# file with a list compatible interface: append and extend
def __init__(self, filename):
self.outfile = codecs.open(filename, 'w', 'utf-8')
def append(self, line):
self.outfile.write(line)
def extend(self, lines):
for line in lines: self.outfile.write(line)
def close(self):
self.outfile.close()
def autosave_current():
"Save automatic backup copy for the current and un-saved design; returns 0: error; 1: no changes to save; 2: saved"
if root.saved:
return 1 # do nothing in this case...
autosave_name = get_name_for_autosave()
try:
outfile = _Writer(autosave_name)
root.write(outfile)
outfile.close()
except EnvironmentError as details:
logging.warning( _('Saving the autosave file "%s" failed: %s'), autosave_name, details )
return 0
return 2
def remove_autosaved(filename=None):
"Remove the automatic backup; see: get_name_for_autosave()"
autosave_name = get_name_for_autosave(filename)
if os.path.exists(autosave_name):
try:
os.unlink(autosave_name)
except EnvironmentError:
logging.exception(_('Internal Error'))
def check_autosaved(filename):
"Returns True if there are an automatic backup for filename"
if filename is not None and filename == root.filename:
# this happens when reloading, no auto-save-restoring in this case...
return False
autosave_name = get_name_for_autosave(filename)
try:
if filename:
orig = os.stat(filename)
auto = os.stat(autosave_name)
if orig.st_mtime > auto.st_mtime: return False
else:
if not os.path.exists(autosave_name): return False
auto = os.stat(autosave_name)
# check contents for empty or incomplete file
if auto.st_size < 50: return False
f = open(autosave_name, "rb")
f.seek(-16,2) # from the end
file_end = f.read(16)
f.close()
return b"</application>" in file_end
except EnvironmentError as inst:
# File doesn't exists
if inst.errno == errno.ENOENT:
pass
# Security frameworks like SELinux may deny the write access even if
# the check for write permissions was successful.
elif inst.errno in [errno.EPERM, errno.EACCES]:
logging.info(
_('Ignore autosave permission error: %s'), str(inst))
else:
logging.exception(_('Internal Error'))
return False
def restore_from_autosaved(filename):
"""Copy the content of an auto-saved file to the current file.
The auto-saved file will still remain as a kind of backup.
Returns True on success."""
autosave_name = get_name_for_autosave(filename)
if os.access(autosave_name, os.R_OK):
try:
content = codecs.open(autosave_name, encoding='UTF-8').read()
save_file(filename, content, 'wxg')
except EnvironmentError:
logging.exception(_('Internal Error'))
return False
return True
return False
def init_paths(options):
"Set all wxGlade related paths; the paths will be stored in config."
# use directory of the exe in case of frozen packages e.g. PyInstaller or py2exe
if hasattr(sys, 'frozen'):
wxglade_path = os.path.dirname(sys.argv[0])
else:
wxglade_path = __file__
if os.path.islink(wxglade_path):
wxglade_path = os.path.realpath(wxglade_path)
wxglade_path = os.path.dirname(os.path.abspath(wxglade_path))
# set the program's paths
config.wxglade_path = wxglade_path
share_dir = _get_share_path()
if _get_install_method() == 'single_directory':
config.docs_path = os.path.join(share_dir, 'docs')
config.icons_path = os.path.join(share_dir, 'icons')
config.templates_path = os.path.join(share_dir, 'templates')
else:
config.docs_path = os.path.join(share_dir, 'doc', 'wxglade')
config.icons_path = os.path.join(share_dir, 'wxglade', 'icons')
config.templates_path = os.path.join(share_dir, 'wxglade', 'templates')
_set_home_path()
_set_appdata_path()
_set_file_paths(options)
_normalise_paths()
_create_appdata_path()
def _create_appdata_path():
"""Create missing application data directory.
Otherwise log initialisation will fail with an IOError "No such file or directory".
The file logger will be initialised after this function returns."""
if not os.path.isdir(config.appdata_path):
try:
os.makedirs(config.appdata_path, 0o700)
except EnvironmentError as e:
logging.error(_('Failed to create config directory: "%s"'), e)
def _set_appdata_path():
"Set the path of the application data directory"
if 'WXGLADE_CONFIG_PATH' in os.environ:
config.appdata_path = os.path.expandvars( os.environ['WXGLADE_CONFIG_PATH'] )
return
if os.name == 'nt' and 'APPDATA' in os.environ:
path = os.path.expandvars(os.environ['APPDATA'])
old_name = '%s/.wxglade' % path
new_name = '%s/wxglade' % path
if os.path.isdir(new_name):
path = new_name
elif os.path.isdir(old_name):
logging.info( _('Rename appdata path from "%s" to "%s"'), old_name, new_name)
try:
os.rename(old_name, new_name)
path = new_name
except EnvironmentError as e:
# ignore rename errors and just write an info message
logging.info(_('Renaming failed: "%s"'), e)
logging.info(_('Using the old path "%s" instead'), old_name)
path = old_name
else:
path = new_name
config.appdata_path = path
return
if os.name == 'nt':
path = os.path.join(config.home_path, 'wxglade')
else:
path = os.path.join(config.home_path, '.wxglade')
config.appdata_path = path
def _set_home_path():
"Set the path of the home directory"
home_path = os.path.expanduser('~')
# to prevent unexpanded "%HOMEDRIVE%%HOMEPATH%" as reported in SF Bug #185
home_path = os.path.expandvars(home_path)
if home_path == '~':
home_path = tempfile.gettempdir()
logging.info( _('Expansion of the home directory shortcut "~" failed. Use temp directory "%s" instead'),
home_path )
if not os.path.isdir(home_path):
tmp_dir = tempfile.gettempdir()
logging.info( _('The home path "%s" is not a directory. Use the temp directory "%s" instead.'),
home_path, tmp_dir )
home_path = tmp_dir
if not os.access(home_path, os.W_OK):
logging.info(_('The home path "%s" is not writable.'), home_path)
tmp_dir = tempfile.gettempdir()
logging.info(_('The home path "%s" is not writable. Use the temp directory "%s" instead.'), home_path, tmp_dir)
home_path = tmp_dir
config.home_path = home_path
return
def _get_install_method():
"""Return a string indicating the installation method as string:
- 'single_directory' - just extract the source into an empty directory
- 'filesystem_installation' - install the software below /usr resp. C:/Program Files"""
# on Windows or installation in a single directory
if os.name == 'nt' or os.path.isdir(os.path.join(config.wxglade_path, 'icons')):
return 'single_directory'
else:
return 'filesystem_installation'
def _get_share_path():
"""Return the path of the "share" directory (architecture independent data files).
That's something like "/usr/share" or "/usr/local/share" on Unix or the installation directory on Windows.
see: _get_install_method()"""
# all in a single directory (extract and run)
if _get_install_method() == 'single_directory':
share_dir = config.wxglade_path
# alternative installation path
else:
assert config.wxglade_path.endswith('wxglade')
# split path into single components to check the last four elements
dir_list = split_path(os.path.normpath(config.wxglade_path))
if len(dir_list) > 4 and dir_list[-1] == 'wxglade' and dir_list[-2] in ['site-packages', 'dist-packages'] and \
dir_list[-3].startswith('python') and dir_list[-4].startswith('lib'):
share_dir = os.path.join(*dir_list[:-4])
share_dir = os.path.join(share_dir, 'share')
elif len(dir_list) > 4 and dir_list[-1] == 'wxglade' and dir_list[-2].endswith('.egg'):
# egg installation
share_dir = os.path.join(*dir_list[:-1])
share_dir = os.path.join(share_dir, 'share')
else:
logging.error(_('Unknown path structure %s'), config.wxglade_path)
share_dir = ''
if not share_dir:
logging.error(_('Share directory not found'))
elif not os.path.exists(share_dir):
logging.error(_('Share directory "%s" does not exists'), share_dir)
elif not os.path.isdir(share_dir):
logging.error(_('Share directory "%s" is not a directory'), share_dir)
return share_dir
def split_path(path):
"Split the path into single components; e.g. split_path('/usr/local/share') -> ['/', 'usr', 'local', 'share']"
drive, path = os.path.splitdrive(path)
components = []
while True:
path, tail = os.path.split(path)
if tail:
components.append(tail)
else:
if path:
components.append(path)
break
components.reverse()
if drive:
components.insert(0, drive)
return components
def _normalise_paths():
"Normalise all paths stored in config module"
for name in ['appdata_path', 'credits_file', 'docs_path', 'history_file', 'home_path', 'icons_path', 'license_file',
'manual_file', 'rc_file', 'templates_path', 'tutorial_file', 'widgets_path', 'wxglade_path']:
assert hasattr(config, name)
path = getattr(config, name)
path = os.path.normpath(path)
setattr(config, name, path)
def _set_file_paths(options):
"Set the full path for all files (config.*_file except default_output_file)"
install_method = _get_install_method()
if install_method == 'single_directory':
config.credits_file = os.path.join(config.wxglade_path, 'CREDITS.txt')
config.license_file = os.path.join(config.wxglade_path, 'LICENSE.txt')
config.manual_file = os.path.join(config.docs_path, 'html', 'index.html')
config.bmp_manual_file = os.path.join(config.docs_path, 'html', 'bitmaps.html')
#config.tutorial_file = os.path.join(config.docs_path, 'Tutorial.html')
else:
config.credits_file = os.path.join(config.docs_path, 'CREDITS.txt')
config.license_file = os.path.join(config.docs_path, 'LICENSE.txt')
config.manual_file = os.path.join(config.docs_path, 'html', 'index.html')
config.bmp_manual_file = os.path.join(config.docs_path, 'html', 'bitmaps.html')
#config.tutorial_file = os.path.join(config.docs_path, 'html', 'tutorial.html')
if not os.path.exists(config.credits_file):
logging.error(_('Credits file "CREDITS.txt" not found!'))
config.credits_file = ''
if not os.path.exists(config.license_file):
logging.error(_('License file "LICENSE.txt" not found!'))
config.license_file = ''
config.widgets_path = os.path.join(config.wxglade_path, 'widgets')
# complete path to rc file
if options | |
into an include-optional
# file, then reloaded into FAUCET.
@staticmethod
def acls_override():
"""Return override ACLs option"""
return {
1: [
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 6,
'tcp_dst': 5001,
'actions': {
'allow': 0,
},
}},
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 6,
'tcp_dst': 5002,
'actions': {
'allow': 1,
},
}},
{'rule': {
'actions': {
'allow': 1,
},
}},
],
}
# DP-to-acl_in port mapping.
def link_acls(self):
"""Host/link port map to acls in"""
return {
0: [1] # Host 0 'acls_in': [1]
}
def include_optional(self):
if self.acls_config is None:
self.acls_config = os.path.join(self.tmpdir, 'acls.yaml')
if self.missing_config is None:
self.missing_config = os.path.join(self.tmpdir, 'missing_config.yaml')
return [self.acls_config, self.missing_config]
def setUp(self): # pylint: disable=invalid-name
"""Setup network & create configuration file"""
self.acls_config = None
self.missing_config = None
super(FaucetStringOfDPACLOverrideTest, self).set_up(
n_dps=self.NUM_DPS,
n_untagged=self.NUM_HOSTS)
def test_port5001_blocked(self):
"""Test that TCP port 5001 is blocked."""
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(5001, first_host, second_host)
with open(self.acls_config, 'w') as config_file:
self.configuration_options['acl_options'] = self.acls_override()
config_file.write(self.topo.get_config(n_vlans=1, **self.configuration_options))
self.verify_faucet_reconf(cold_start=False, change_expected=True)
self.verify_tp_dst_blocked(5001, first_host, second_host)
def test_port5002_notblocked(self):
"""Test that TCP port 5002 is not blocked."""
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(5002, first_host, second_host)
with open(self.acls_config, 'w') as config_file:
self.configuration_options['acl_options'] = self.acls_override()
config_file.write(self.topo.get_config(n_vlans=1, **self.configuration_options))
self.verify_faucet_reconf(cold_start=False, change_expected=True)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
class FaucetTunnelSameDpTest(FaucetMultiDPTestBase):
"""Test the tunnel ACL option with output to the same DP"""
NUM_DPS = 2
NUM_HOSTS = 2
SOFTWARE_ONLY = True
SWITCH_TO_SWITCH_LINKS = 2
def acls(self):
"""Return ACL config"""
# Tunnel from host 0 (switch 0) to host 1 (switch 0)
return {
1: [
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 1,
'actions': {
'allow': 0,
'output': {
'tunnel': {
'type': 'vlan',
'tunnel_id': 200,
'dp': self.topo.switches_by_id[0], # Switch 0
'port': self.host_port_maps[1][0][0]} # Switch 0 host 1
}
}
}}
]
}
def link_acls(self):
"""DP to acl port mapping"""
return {
0: [1] # Host 0 'acls_in': [1]
}
def test_tunnel_established(self):
"""Test a tunnel path can be created."""
self.set_up(stack=True, n_dps=self.NUM_DPS, n_untagged=self.NUM_HOSTS,
switch_to_switch_links=self.SWITCH_TO_SWITCH_LINKS)
self.verify_stack_up()
src_host = self.net.get(self.topo.hosts_by_id[0])
dst_host = self.net.get(self.topo.hosts_by_id[1])
other_host = self.net.get(self.topo.hosts_by_id[2])
self.verify_tunnel_established(src_host, dst_host, other_host)
class FaucetSingleTunnelTest(FaucetMultiDPTestBase):
"""Test the Faucet tunnel ACL option both locally and remotely with link failure"""
NUM_DPS = 2
NUM_HOSTS = 2
SOFTWARE_ONLY = True
SWITCH_TO_SWITCH_LINKS = 2
def acls(self):
"""Return config ACL options"""
# Tunnel from host 0 (switch 0) to host 2 (switch 1)
return {
1: [
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 1,
'actions': {
'allow': 0,
'output': {
'tunnel': {
'type': 'vlan',
'tunnel_id': 200,
'dp': self.topo.switches_by_id[1],
'port': self.host_port_maps[2][1][0]}
}
}
}},
{'rule': {
'dl_type': IPV6_ETH,
'ip_proto': 56,
'actions': {
'allow': 0,
'output': {
'tunnel': {
'type': 'vlan',
'tunnel_id': 200,
'dp': self.topo.switches_by_id[1],
'port': self.host_port_maps[2][1][0]}
}
}
}},
]
}
def link_acls(self):
"""DP-to-acl port mapping"""
return {
0: [1], # Host 0 'acls_in': [1]
3: [1], # Host 3 'acls_in': [1]
}
def output_only(self):
return {2} # Host 2 (first port, second switch).
def setUp(self): # pylint: disable=invalid-name
"""Start the network"""
super(FaucetSingleTunnelTest, self).set_up(
stack=True,
n_dps=self.NUM_DPS,
n_untagged=self.NUM_HOSTS,
switch_to_switch_links=self.SWITCH_TO_SWITCH_LINKS)
def verify_tunnels(self):
"""Test tunnel connectivity from local and remote switches."""
other_host = self.net.get(self.topo.hosts_by_id[1])
dst_host = self.net.get(self.topo.hosts_by_id[2])
for src_host_id in (0, 3):
src_host = self.net.get(self.topo.hosts_by_id[src_host_id])
self.verify_tunnel_established(src_host, dst_host, other_host, packets=10)
def test_tunnel_path_rerouted(self):
"""Test remote tunnel path is rerouted when a link is down."""
self.verify_stack_up()
self.verify_tunnels()
first_stack_port = self.link_port_maps[(0, 1)][0]
self.one_stack_port_down(self.dpids[0], self.topo.switches_by_id[0], first_stack_port)
self.verify_tunnels()
class FaucetTunnelLoopTest(FaucetSingleTunnelTest):
"""Test tunnel on a loop topology"""
NUM_DPS = 3
SWITCH_TO_SWITCH_LINKS = 1
def setUp(self): # pylint: disable=invalid-name
"""Start a loop topology network"""
super(FaucetSingleTunnelTest, self).set_up(
stack=True,
n_dps=self.NUM_DPS,
n_untagged=self.NUM_HOSTS,
switch_to_switch_links=self.SWITCH_TO_SWITCH_LINKS,
stack_ring=True)
class FaucetTunnelAllowTest(FaucetTopoTestBase):
"""Test Tunnels with ACLs containing allow=True"""
NUM_DPS = 2
NUM_HOSTS = 4
NUM_VLANS = 2
SOFTWARE_ONLY = True
def acls(self):
# Tunnel from host 0 (switch 0) to host 2 (switch 1)
"""Return config ACL options"""
return {
1: [
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 1,
'actions': {
'allow': 1,
'output': {
'tunnel': {
'type': 'vlan',
'tunnel_id': 300,
'dp': self.topo.switches_by_id[1],
'port': self.host_port_maps[2][1][0]}
}
}
}},
{'rule': {
'actions': {
'allow': 1,
}
}},
]
}
def setUp(self): # pylint: disable=invalid-name
"""Start the network"""
super().setUp()
network_graph = networkx.path_graph(self.NUM_DPS)
dp_options = {}
for dp_i in network_graph.nodes():
dp_options.setdefault(dp_i, {
'group_table': self.GROUP_TABLE,
'ofchannel_log': self.debug_log_path + str(dp_i) if self.debug_log_path else None,
'hardware': self.hardware if dp_i == 0 and self.hw_dpid else 'Open vSwitch'
})
if dp_i == 0:
dp_options[0]['stack'] = {'priority': 1}
switch_links = list(network_graph.edges())
link_vlans = {edge: None for edge in switch_links}
host_links = {0: [0], 1: [0], 2: [1], 3: [1]}
host_vlans = {0: 0, 1: 0, 2: 1, 3: 0}
host_options = {0: {'acls_in': [1]}}
self.build_net(
host_links=host_links,
host_vlans=host_vlans,
switch_links=switch_links,
link_vlans=link_vlans,
n_vlans=self.NUM_VLANS,
dp_options=dp_options,
host_options=host_options,
)
self.start_net()
def test_tunnel_continue_through_pipeline_interaction(self):
"""Test packets that enter a tunnel with allow, also continue through the pipeline"""
# Should be able to ping from h_{0,100} -> h_{1,100} & h_{3,100}
# and also have the packets arrive at h_{2,200} (the other end of the tunnel)
self.verify_stack_up()
# Ensure connection to the host on the other end of the tunnel can exist
src_host = self.net.get(self.topo.hosts_by_id[0]) # h_{0,100}
other_host = self.net.get(self.topo.hosts_by_id[1]) # h_{1,100}
dst_host = self.net.get(self.topo.hosts_by_id[2]) # h_{2,200}
self.verify_tunnel_established(src_host, dst_host, other_host)
# Ensure a connection to a host not in the tunnel can exist
# this implies that the packet is also sent through the pipeline
self.check_host_connectivity_by_id(0, 1)
self.check_host_connectivity_by_id(0, 3)
class FaucetTunnelSameDpOrderedTest(FaucetMultiDPTestBase):
"""Test the tunnel ACL option with output to the same DP"""
NUM_DPS = 2
NUM_HOSTS = 2
SOFTWARE_ONLY = True
SWITCH_TO_SWITCH_LINKS = 2
def acls(self):
"""Return ACL config"""
# Tunnel from host 0 (switch 0) to host 1 (switch 0)
return {
1: [
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 1,
'actions': {
'allow': 0,
'output': [
{'tunnel': {
'type': 'vlan',
'tunnel_id': 200,
'dp': self.topo.switches_by_id[0],
'port': self.host_port_maps[1][0][0]}}
]
}
}}
]
}
def link_acls(self):
"""DP to acl port mapping"""
return {
0: [1] # Host 0 'acls_in': [1]
}
def test_tunnel_established(self):
"""Test a tunnel path can be created."""
self.set_up(stack=True, n_dps=self.NUM_DPS, n_untagged=self.NUM_HOSTS,
switch_to_switch_links=self.SWITCH_TO_SWITCH_LINKS)
self.verify_stack_up()
src_host = self.net.get(self.topo.hosts_by_id[0])
dst_host = self.net.get(self.topo.hosts_by_id[1])
other_host = self.net.get(self.topo.hosts_by_id[2])
self.verify_tunnel_established(src_host, dst_host, other_host)
class FaucetSingleTunnelOrderedTest(FaucetMultiDPTestBase):
"""Test the Faucet tunnel ACL option"""
NUM_DPS = 2
NUM_HOSTS = 2
SOFTWARE_ONLY = True
SWITCH_TO_SWITCH_LINKS = 2
def acls(self):
"""Return config ACL options"""
return {
1: [
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 1,
'actions': {
'allow': 0,
'output': [
{'tunnel': {
'type': 'vlan',
'tunnel_id': 200,
'dp': self.topo.switches_by_id[1],
'port': self.host_port_maps[2][1][0]}}
]
}
}}
]
}
def link_acls(self):
"""DP link to list of acls to apply"""
return {
0: [1] # Host 0 'acls_in': [1]
}
def setUp(self): # pylint: disable=invalid-name
"""Start the network"""
super(FaucetSingleTunnelOrderedTest, self).set_up(
stack=True,
n_dps=self.NUM_DPS,
n_untagged=self.NUM_HOSTS,
switch_to_switch_links=self.SWITCH_TO_SWITCH_LINKS)
def test_tunnel_established(self):
"""Test a tunnel path can be created."""
self.verify_stack_up()
src_host = self.net.get(self.topo.hosts_by_id[0])
dst_host = self.net.get(self.topo.hosts_by_id[2])
other_host = self.net.get(self.topo.hosts_by_id[1])
self.verify_tunnel_established(src_host, dst_host, other_host)
def test_tunnel_path_rerouted(self):
"""Test a tunnel path is rerouted when a link is down."""
self.verify_stack_up()
src_host = self.net.get(self.topo.hosts_by_id[0])
dst_host = self.net.get(self.topo.hosts_by_id[2])
other_host = self.net.get(self.topo.hosts_by_id[1])
self.verify_tunnel_established(src_host, dst_host, other_host, packets=10)
first_stack_port = self.link_port_maps[(0, 1)][0]
self.one_stack_port_down(self.dpids[0], self.topo.switches_by_id[0], first_stack_port)
self.verify_tunnel_established(src_host, dst_host, other_host, packets=10)
class FaucetTunnelLoopOrderedTest(FaucetSingleTunnelOrderedTest):
"""Test tunnel on a loop topology"""
NUM_DPS = 3
SWITCH_TO_SWITCH_LINKS = 1
def setUp(self): # pylint: disable=invalid-name
"""Start a loop topology network"""
super(FaucetSingleTunnelOrderedTest, self).set_up(
stack=True,
n_dps=self.NUM_DPS,
n_untagged=self.NUM_HOSTS,
switch_to_switch_links=self.SWITCH_TO_SWITCH_LINKS,
stack_ring=True)
class FaucetTunnelAllowOrderedTest(FaucetTopoTestBase):
"""Test Tunnels with ACLs containing allow=True"""
NUM_DPS = 2
NUM_HOSTS = 4
NUM_VLANS = 2
SOFTWARE_ONLY = True
def acls(self):
"""Return config ACL options"""
return {
1: [
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 1,
'actions': {
'allow': 1,
'output': [
{'tunnel': {
'type': 'vlan',
'tunnel_id': 300,
'dp': self.topo.switches_by_id[1],
'port': self.host_port_maps[2][1][0]}}
]
}
}},
{'rule': {
'actions': {
'allow': 1,
}
}},
]
}
def setUp(self): # pylint: disable=invalid-name
"""Start the network"""
super().setUp()
network_graph = networkx.path_graph(self.NUM_DPS)
dp_options = {}
for dp_i in network_graph.nodes():
dp_options.setdefault(dp_i, {
'group_table': self.GROUP_TABLE,
'ofchannel_log': self.debug_log_path + str(dp_i) if self.debug_log_path else None,
'hardware': self.hardware if dp_i == 0 and self.hw_dpid else 'Open vSwitch'
})
if dp_i == 0:
dp_options[0]['stack'] = {'priority': 1}
switch_links = list(network_graph.edges())
link_vlans = {edge: None for edge in switch_links}
host_links = {0: [0], 1: [0], 2: [1], 3: [1]}
host_vlans = {0: 0, 1: 0, 2: 1, 3: 0}
host_options = {0: {'acls_in': [1]}}
self.build_net(
host_links=host_links,
host_vlans=host_vlans,
switch_links=switch_links,
link_vlans=link_vlans,
n_vlans=self.NUM_VLANS,
dp_options=dp_options,
host_options=host_options,
)
self.start_net()
def test_tunnel_continue_through_pipeline_interaction(self):
"""Test packets that enter a tunnel | |
nærkamp_bonus = 15
nærkamp_value += nærkamp_bonus
self.charactersheet[author_id]["Character Sheet"]["Nærkamp"] = nærkamp_value
styrke_value = int(self.charactersheet[author_id]["Character Sheet"]["Styrke"])
styrke_bonus = 5
styrke_value += styrke_bonus
self.charactersheet[author_id]["Character Sheet"]["Styrke"] = styrke_value
fh.save_file(self.charactersheet, 'charactersheet')
# CHARLATAN
if self.charactersheet[author_id]["Character Sheet"]["Chosen Character"] == "Charlatan":
charlatan_embed = discord.Embed(title=f"__**Charlatan**__", description=f"*En tunge af sølv og et flot smil kan gøre en tigger til baron i den rette situation.*\n" +
"*Charlataner og andre fupmagere er ikke unormale syn i de mere befærdede og manfolkdige byområder på Gaia.*\n" +
"*Med deres rigdom gør de ofte brug af mere “direkte” arbejdskraft hvis en confrontation skulle opstå.*\n \n" +
"__**Evner:**__\n" +
"**Den klarer i… jeg skal væk**\n" +
"*Under kamp med alle “Menneskelige” fjender vil Charlatanen altid have lavest Initiativ i gruppen, men højeste imod “Ikke-Menneskelige” fjender (Specificeres)*\n \n" +
"__**Pros:**__\n" +
"+15 **Løgn**\n" +
"+15 **Smir**\n" +
"+15 **Handel**\n" +
"+15 **Snig**\n \n" +
"__**Cons:**__\n" +
"Charlatanen kan IKKE bruge to-hånds våben eller rustning.", color=0xFAFBF9)
charlatan_embed.set_image(url="https://media.discordapp.net/attachments/698522831083929734/701071214403125358/83977493_200402444417514_4410246142369988608_n.png?width=483&height=627")
await channel.send(embed=charlatan_embed)
await asyncio.sleep(1)
# BONUS
løgn_value = int(self.charactersheet[author_id]["Character Sheet"]["Løgn"])
løgn_bonus = 15
løgn_value += løgn_bonus
self.charactersheet[author_id]["Character Sheet"]["Løgn"] = løgn_value
smigre_value = int(self.charactersheet[author_id]["Character Sheet"]["Smigre"])
smigre_bonus = 15
smigre_value += smigre_bonus
self.charactersheet[author_id]["Character Sheet"]["Smigre"] = smigre_value
handel_value = int(self.charactersheet[author_id]["Character Sheet"]["Handel"])
handel_bonus = 15
handel_value += handel_bonus
self.charactersheet[author_id]["Character Sheet"]["Handel"] = handel_value
snig_value = int(self.charactersheet[author_id]["Character Sheet"]["Snig"])
snig_bonus = 15
snig_value += snig_bonus
self.charactersheet[author_id]["Character Sheet"]["Snig"] = snig_value
fh.save_file(self.charactersheet, 'charactersheet')
# INDFØDT
if self.charactersheet[author_id]["Character Sheet"]["Chosen Character"] == "Indfødt":
indfødt_embed = discord.Embed(title=f"__**Indfødt**__", description=f"*Efter Katastrofen er mange mennesker regresseret tilbage til vores primale fortid.*\n" +
"*Disse stammefolk er udholdende og krigeriske, men til tider også overtroiske og mindre intelligente end de mere “civiliserede” sorter.*\n \n" +
"__**Evner:**__\n" +
"**Tro til Moder Gaia**\n" +
"*Hvis den Indfødte kun er iklædt eller gør brug udstyr fundet i naturen eller de selv har skabt, får de +1 på alle **relevante** terningekast (Specificeres)*\n" +
"**Hærdet legeme**\n" +
"*Indfødte er immun overfor de fleste gifte (Specificeres)*\n \n" +
"__**Pros:**__\n" +
"+5 **Nærkamp**\n" +
"+5 **Udholdenhed**\n" +
"+5 **Overlevelse**\n" +
"+5 **Alkymi**\n" +
"+5 **Styrke **\n \n" +
"__**Cons:**__\n" +
"Den Indfødte kan ALDRIG bruge Videnskab evnen.\n" +
"-10 **Sociale Evner**", color=0x030303)
indfødt_embed.set_image(url="https://media.discordapp.net/attachments/698522831083929734/701080791597318154/83949023_2633611816925481_519829439547179008_n.png?width=444&height=627")
await channel.send(embed=indfødt_embed)
await asyncio.sleep(1)
# BONUS
nærkamp_value = int(self.charactersheet[author_id]["Character Sheet"]["Nærkamp"])
nærkamp_bonus = 5
nærkamp_value += nærkamp_bonus
self.charactersheet[author_id]["Character Sheet"]["Nærkamp"] = nærkamp_value
udholdenhed_value = int(self.charactersheet[author_id]["Character Sheet"]["Udholdenhed"])
udholdenhed_bonus = 5
udholdenhed_value += udholdenhed_bonus
self.charactersheet[author_id]["Character Sheet"]["Udholdenhed"] = udholdenhed_value
alkymi_value = int(self.charactersheet[author_id]["Character Sheet"]["Alkymi"])
alkymi_bonus = 5
alkymi_value += alkymi_bonus
self.charactersheet[author_id]["Character Sheet"]["Alkymi"] = alkymi_value
alkymi_value = int(self.charactersheet[author_id]["Character Sheet"]["Alkymi"])
alkymi_bonus = 5
alkymi_value += alkymi_bonus
self.charactersheet[author_id]["Character Sheet"]["Alkymi"] = alkymi_value
styrke_value = int(self.charactersheet[author_id]["Character Sheet"]["Styrke"])
styrke_bonus = 5
styrke_value += styrke_bonus
self.charactersheet[author_id]["Character Sheet"]["Styrke"] = styrke_value
# NEGATE
smigre_value = int(self.charactersheet[author_id]["Character Sheet"]["Smigre"])
smigre_negate = 10
smigre_value -= smigre_negate
if smigre_value < 0:
smigre_value = 0
self.charactersheet[author_id]["Character Sheet"]["Smigre"] = smigre_value
løgn_value = int(self.charactersheet[author_id]["Character Sheet"]["Løgn"])
løgn_negate = 10
løgn_value -= løgn_negate
if løgn_value < 0:
løgn_value = 0
self.charactersheet[author_id]["Character Sheet"]["Løgn"] = løgn_value
intimiderer_value = int(self.charactersheet[author_id]["Character Sheet"]["Intimiderer"])
intimiderer_negate = 10
intimiderer_value -= intimiderer_negate
if intimiderer_value < 0:
intimiderer_value = 0
self.charactersheet[author_id]["Character Sheet"]["Intimiderer"] = intimiderer_value
handel_value = int(self.charactersheet[author_id]["Character Sheet"]["Handel"])
handel_negate = 10
handel_value -= handel_negate
if handel_value < 0:
handel_value = 0
self.charactersheet[author_id]["Character Sheet"]["Handel"] = handel_value
fh.save_file(self.charactersheet, 'charactersheet')
# JÆGER
if self.charactersheet[author_id]["Character Sheet"]["Chosen Character"] == "Jæger":
jæger_embed = discord.Embed(title=f"__**Jæger**__", description=f"*Siden tidernes morgen har mennesket jaget, dette ændrede sig ikke da vi drog ud mellem stjernerne.*\n" +
"*Jægeren har brugt sit liv i vildmarken, både som jæger og som bytte.*\n \n" +
"__**Evner:**__\n" +
"**Sikke et eksemplar**\n" +
"*Jægeren kan altid identificere en Dyrisk fjendes (også Bossers) svagheder. Ydermere kan de oftest skaffe sjældne ressourcer hvis de dressere dyr.*\n" +
"**Ekspert**\n" +
"*I kamp mod ‘Dyriske’ fjender har Jægeren altid +1 Initiativ og hvis Jægeren er den første i gruppen til at angribe en ‘Dyrisk’ fjende, kan de ikke misse deres første angreb.*\n \n" +
"__**Pros:**__\n" +
"+5 **Kaste/Strenge Våben**\n" +
"+5 **Fælder**\n" +
"+5 **Overlevelse**\n \n" +
"__**Cons:**__\n" +
"-5 **Videnskab**\n" +
"-5 **Skydevåben**", color=0xF50404)
jæger_embed.set_image(url="https://media.discordapp.net/attachments/698522831083929734/701086619729657876/83272722_212026363273452_6715150482186174464_n.png?width=454&height=627")
await channel.send(embed=jæger_embed)
await asyncio.sleep(1)
# BONUS
kaste_strenge_våben_value = int(self.charactersheet[author_id]["Character Sheet"]["Kaste_Strenge_våben"])
kaste_strenge_våben_bonus = 5
kaste_strenge_våben_value += kaste_strenge_våben_bonus
self.charactersheet[author_id]["Character Sheet"]["Kaste_Strenge_våben"] = kaste_strenge_våben_value
fælder_value = int(self.charactersheet[author_id]["Character Sheet"]["Fælder"])
fælder_bonus = 5
fælder_value += fælder_bonus
self.charactersheet[author_id]["Character Sheet"]["Fælder"] = fælder_value
overlevelse_value = int(self.charactersheet[author_id]["Character Sheet"]["Overlevelse"])
overlevelse_bonus = 5
overlevelse_value += overlevelse_bonus
self.charactersheet[author_id]["Character Sheet"]["Overlevelse"] = overlevelse_value
# NEGATE
videnskab_value = int(self.charactersheet[author_id]["Character Sheet"]["Videnskab"])
videnskab_negate = 5
videnskab_value -= videnskab_negate
if videnskab_value < 0:
videnskab_value = 0
self.charactersheet[author_id]["Character Sheet"]["Videnskab"] = videnskab_value
skydevåben_value = int(self.charactersheet[author_id]["Character Sheet"]["Skydevåben"])
skydevåben_negate = 5
skydevåben_value -= skydevåben_negate
if skydevåben_value < 0:
skydevåben_value = 0
self.charactersheet[author_id]["Character Sheet"]["Skydevåben"] = skydevåben_value
fh.save_file(self.charactersheet, 'charactersheet')
# LANDEVEJSRØVER
if self.charactersheet[author_id]["Character Sheet"]["Chosen Character"] == "Landevejsrøver":
landevejsrøver_embed = discord.Embed(title=f"__**Landevejsrøver**__", description=f"*Sønderlandet og hele Gaia er plaget af gemene banditter der ligger på lur langs befærdede veje.*\n" +
"*De berøver folk for alt hvad de er værd og nogen gange ender det med at offeret også må bøde med livet.*\n" +
"*Nogle af disse lovløse lever således af nødvendighed, mens andre ser det som naturens love.*\n \n" +
"__**Evner:**__\n" +
"**Nu du skulle nævne det…**\n" +
"*Under snakke eller handel med “Kriminelle” NPC’er har Landevejsrøveren altid +5 i Sociale Evner, men -5 med “Lovlydige” karakterer (Specificeres)*\n" +
"**Jeg er realist!**\n" +
"*Hvis Landevejsrøveren kommer under 50% Liv får de +1 Bevægelses handling, så længe det er VÆK fra fjenden.*\n \n" +
"__**Pros:**__\n" +
"+5 **Løgn**\n" +
"+5 **Snig**\n \n" +
"__**Cons:**__\n" +
"-10 **Smigre**", color=0x20B5FF)
landevejsrøver_embed.set_image(url="https://media.discordapp.net/attachments/698522831083929734/701091406424440832/84333690_697893080745801_5555861783052288000_n.png?width=437&height=627")
await channel.send(embed=landevejsrøver_embed)
await asyncio.sleep(1)
# BONUS
løgn_value = int(self.charactersheet[author_id]["Character Sheet"]["Løgn"])
løgn_bonus = 5
løgn_value += løgn_bonus
self.charactersheet[author_id]["Character Sheet"]["Løgn"] = løgn_value
snig_value = int(self.charactersheet[author_id]["Character Sheet"]["Snig"])
snig_bonus = 5
snig_value += snig_bonus
self.charactersheet[author_id]["Character Sheet"]["Snig"] = snig_value
# NEGATE
smigre_value = int(self.charactersheet[author_id]["Character Sheet"]["Smigre"])
smigre_negate = 10
smigre_value -= smigre_negate
if smigre_value < 0:
smigre_value = 0
self.charactersheet[author_id]["Character Sheet"]["Smigre"] = smigre_value
fh.save_file(self.charactersheet, 'charactersheet')
# LEJESOLDAT
if self.charactersheet[author_id]["Character Sheet"]["Chosen Character"] == "Lejesoldat":
lejesoldat_embed = discord.Embed(title=f"__**Lejesoldat**__", description=f"*Lejesoldaten er en kold og kynisk kriger.*\n" +
"*De bruger deres viden inden for krigsførelse til at skabe en tilværelse domineret af død og konflikt.*\n" +
"*Dog er deres evner til at interagere med andre mennesker forværret, da krig sjældent tillader lange og meningsfulde samtaler.*\n \n" +
"__**Evner:**__\n" +
"**Amatører...**\n" +
"*I kamp mod ‘Bandit’ og ‘Soldat’ fjender har Lejesoldaten altid +1 Initiativ og +5 på sit første angreb under kampen (medmindre specificeret)*\n" +
"**Kampplan**\n" +
"*Ved mulighed kan Lejesoldaten give fif og råd til en medspiller. Dette kan være under en rejse, eller mens i lejr. Kampplan giver den anden spiller plus +10 i deres højeste Kamp Evne under HELE den næste kamp (Kan kun bruges på én spiller pr. kamp og hvis nogle Kamp Evner er lige, vælger spilleren selv)*\n \n" +
"__**Pros:**__\n" +
"+10 **Nærkamp**\n" +
"+10 **Strenge/Kaste Våben**\n" +
"+10 **Skydevåben**\n" +
"+10 **Snig**\n \n" +
"__**Cons:**__\n" +
"-10 **Smigre**\n" +
"-10 **Løgn**\n" +
"-10 **Intimiderer**\n" +
"-10 **Handel**", color=0xF8AB68)
lejesoldat_embed.set_image(url="https://media.discordapp.net/attachments/698522831083929734/701150185580920852/84106079_474196929921976_1350826510910488576_n.png?width=337&height=626")
await channel.send(embed=lejesoldat_embed)
await asyncio.sleep(1)
# BONUS
nærkamp_value = int(self.charactersheet[author_id]["Character Sheet"]["Nærkamp"])
nærkamp_bonus = 10
nærkamp_value += nærkamp_bonus
self.charactersheet[author_id]["Character Sheet"]["Nærkamp"] = nærkamp_value
kaste_strenge_våben_value = int(self.charactersheet[author_id]["Character Sheet"]["Kaste_Strenge_våben"])
kaste_strenge_våben_bonus = 10
kaste_strenge_våben_value += kaste_strenge_våben_bonus
self.charactersheet[author_id]["Character Sheet"]["Kaste_Strenge_våben"] = kaste_strenge_våben_value
skydevåben_value = int(self.charactersheet[author_id]["Character Sheet"]["Skydevåben"])
skydevåben_bonus = 10
skydevåben_value += skydevåben_bonus
self.charactersheet[author_id]["Character Sheet"]["Skydevåben"] = skydevåben_value
snig_value = int(self.charactersheet[author_id]["Character Sheet"]["Snig"])
snig_bonus = 10
snig_value += snig_bonus
self.charactersheet[author_id]["Character Sheet"]["Snig"] = snig_value
# NEGATE
smigre_value = int(self.charactersheet[author_id]["Character Sheet"]["Smigre"])
smigre_negate = 10
smigre_value -= smigre_negate
if smigre_value < 0:
smigre_value = 0
self.charactersheet[author_id]["Character Sheet"]["Smigre"] = smigre_value
løgn_value = int(self.charactersheet[author_id]["Character Sheet"]["Løgn"])
løgn_negate = 10
løgn_value -= løgn_negate
if løgn_value < 0:
løgn_value = 0
self.charactersheet[author_id]["Character Sheet"]["Løgn"] = løgn_value
intimiderer_value = int(self.charactersheet[author_id]["Character Sheet"]["Intimiderer"])
intimiderer_negate = 10
intimiderer_value -= intimiderer_negate
if intimiderer_value < 0:
intimiderer_value = 0
self.charactersheet[author_id]["Character Sheet"]["Intimiderer"] = intimiderer_value
handel_value = int(self.charactersheet[author_id]["Character Sheet"]["Handel"])
handel_negate = 10
handel_value -= handel_negate
if handel_value < 0:
handel_value = 0
self.charactersheet[author_id]["Character Sheet"]["Handel"] = handel_value
fh.save_file(self.charactersheet, 'charactersheet')
# MUTANT
if self.charactersheet[author_id]["Character Sheet"]["Chosen Character"] == "Mutant":
mutant_embed = discord.Embed(title=f"__**Mutant**__", description=f"*Mutanter er | |
<reponame>woodrow/pyoac<gh_stars>1-10
# ______________________________________________________________________
import sys, operator, types
from pypy.interpreter.baseobjspace import ObjSpace, Wrappable
from pypy.interpreter.pycode import PyCode, cpython_code_signature
from pypy.interpreter.module import Module
from pypy.interpreter.error import OperationError
from pypy.objspace.flow.model import *
from pypy.objspace.flow import flowcontext
from pypy.objspace.flow.operation import FunctionByName
from pypy.rlib.unroll import unrolling_iterable, _unroller
debug = 0
class UnwrapException(Exception):
"Attempted to unwrap a Variable."
class WrapException(Exception):
"""Attempted wrapping of a type that cannot sanely appear in flow graph or during its construction"""
# method-wrappers have not enough introspection in CPython
if hasattr(complex.real.__get__, 'im_self'):
type_with_bad_introspection = None # on top of PyPy
else:
type_with_bad_introspection = type(complex.real.__get__)
# ______________________________________________________________________
class FlowObjSpace(ObjSpace):
"""NOT_RPYTHON.
The flow objspace space is used to produce a flow graph by recording
the space operations that the interpreter generates when it interprets
(the bytecode of) some function.
"""
full_exceptions = False
do_imports_immediately = True
def initialize(self):
import __builtin__
self.concrete_mode = 1
self.w_None = Constant(None)
self.builtin = Module(self, Constant('__builtin__'), Constant(__builtin__.__dict__))
def pick_builtin(w_globals):
return self.builtin
self.builtin.pick_builtin = pick_builtin
self.sys = Module(self, Constant('sys'), Constant(sys.__dict__))
self.sys.recursionlimit = 100
self.w_False = Constant(False)
self.w_True = Constant(True)
self.w_type = Constant(type)
self.w_tuple = Constant(tuple)
self.concrete_mode = 0
for exc in [KeyError, ValueError, IndexError, StopIteration,
AssertionError, TypeError, AttributeError, ImportError]:
clsname = exc.__name__
setattr(self, 'w_'+clsname, Constant(exc))
# the following exceptions are the ones that should not show up
# during flow graph construction; they are triggered by
# non-R-Pythonic constructs or real bugs like typos.
for exc in [NameError, UnboundLocalError]:
clsname = exc.__name__
setattr(self, 'w_'+clsname, None)
self.specialcases = {}
#self.make_builtins()
#self.make_sys()
# objects which should keep their SomeObjectness
self.not_really_const = NOT_REALLY_CONST
def enter_cache_building_mode(self):
# when populating the caches, the flow space switches to
# "concrete mode". In this mode, only Constants are allowed
# and no SpaceOperation is recorded.
previous_recorder = self.executioncontext.recorder
self.executioncontext.recorder = flowcontext.ConcreteNoOp()
self.concrete_mode += 1
return previous_recorder
def leave_cache_building_mode(self, previous_recorder):
self.executioncontext.recorder = previous_recorder
self.concrete_mode -= 1
def newdict(self):
if self.concrete_mode:
return Constant({})
return self.do_operation('newdict')
def newtuple(self, args_w):
try:
content = [self.unwrap(w_arg) for w_arg in args_w]
except UnwrapException:
return self.do_operation('newtuple', *args_w)
else:
return Constant(tuple(content))
def newlist(self, args_w):
if self.concrete_mode:
content = [self.unwrap(w_arg) for w_arg in args_w]
return Constant(content)
return self.do_operation('newlist', *args_w)
def newslice(self, w_start, w_stop, w_step):
if self.concrete_mode:
return Constant(slice(self.unwrap(w_start),
self.unwrap(w_stop),
self.unwrap(w_step)))
return self.do_operation('newslice', w_start, w_stop, w_step)
def wrap(self, obj):
if isinstance(obj, (Variable, Constant)):
raise TypeError("already wrapped: " + repr(obj))
# method-wrapper have ill-defined comparison and introspection
# to appear in a flow graph
if type(obj) is type_with_bad_introspection:
raise WrapException
return Constant(obj)
def int_w(self, w_obj):
if isinstance(w_obj, Constant):
val = w_obj.value
if type(val) not in (int,long):
raise TypeError("expected integer: " + repr(w_obj))
return val
return self.unwrap(w_obj)
def uint_w(self, w_obj):
if isinstance(w_obj, Constant):
from pypy.rlib.rarithmetic import r_uint
val = w_obj.value
if type(val) is not r_uint:
raise TypeError("expected unsigned: " + repr(w_obj))
return val
return self.unwrap(w_obj)
def str_w(self, w_obj):
if isinstance(w_obj, Constant):
val = w_obj.value
if type(val) is not str:
raise TypeError("expected string: " + repr(w_obj))
return val
return self.unwrap(w_obj)
def float_w(self, w_obj):
if isinstance(w_obj, Constant):
val = w_obj.value
if type(val) is not float:
raise TypeError("expected float: " + repr(w_obj))
return val
return self.unwrap(w_obj)
def unwrap(self, w_obj):
if isinstance(w_obj, Variable):
raise UnwrapException
elif isinstance(w_obj, Constant):
return w_obj.value
else:
raise TypeError("not wrapped: " + repr(w_obj))
def unwrap_for_computation(self, w_obj):
obj = self.unwrap(w_obj)
to_check = obj
if hasattr(to_check, 'im_self'):
to_check = to_check.im_self
if (not isinstance(to_check, (type, types.ClassType, types.ModuleType)) and
# classes/types/modules are assumed immutable
hasattr(to_check, '__class__') and to_check.__class__.__module__ != '__builtin__'):
frozen = hasattr(to_check, '_freeze_') and to_check._freeze_()
if not frozen:
if self.concrete_mode:
# xxx do we want some warning? notice that some stuff is harmless
# like setitem(dict, 'n', mutable)
pass
else: # cannot count on it not mutating at runtime!
raise UnwrapException
return obj
def interpclass_w(self, w_obj):
obj = self.unwrap(w_obj)
if isinstance(obj, Wrappable):
return obj
return None
def getexecutioncontext(self):
return getattr(self, 'executioncontext', None)
def createcompiler(self):
# no parser/compiler needed - don't build one, it takes too much time
# because it is done each time a FlowExecutionContext is built
return None
def setup_executioncontext(self, ec):
self.executioncontext = ec
from pypy.objspace.flow import specialcase
specialcase.setup(self)
def exception_match(self, w_exc_type, w_check_class):
try:
check_class = self.unwrap(w_check_class)
except UnwrapException:
raise Exception, "non-constant except guard"
if not isinstance(check_class, tuple):
# the simple case
return ObjSpace.exception_match(self, w_exc_type, w_check_class)
# checking a tuple of classes
for w_klass in self.viewiterable(w_check_class):
if ObjSpace.exception_match(self, w_exc_type, w_klass):
return True
return False
def getconstclass(space, w_cls):
try:
ecls = space.unwrap(w_cls)
except UnwrapException:
pass
else:
if isinstance(ecls, (type, types.ClassType)):
return ecls
return None
def build_flow(self, func, constargs={}):
"""
"""
if func.func_doc and func.func_doc.lstrip().startswith('NOT_RPYTHON'):
raise Exception, "%r is tagged as NOT_RPYTHON" % (func,)
code = func.func_code
if code.co_flags & 32:
# generator
raise TypeError("%r is a generator" % (func,))
code = PyCode._from_code(self, code)
if func.func_closure is None:
closure = None
else:
closure = [extract_cell_content(c, name, func)
for c, name in zip(func.func_closure,
func.func_code.co_freevars)]
# CallableFactory.pycall may add class_ to functions that are methods
name = func.func_name
class_ = getattr(func, 'class_', None)
if class_ is not None:
name = '%s.%s' % (class_.__name__, name)
for c in "<>&!":
name = name.replace(c, '_')
ec = flowcontext.FlowExecutionContext(self, code, func.func_globals,
constargs, closure, name)
graph = ec.graph
graph.func = func
# attach a signature and defaults to the graph
# so that it becomes even more interchangeable with the function
# itself
graph.signature = cpython_code_signature(code)
graph.defaults = func.func_defaults or ()
self.setup_executioncontext(ec)
from pypy.tool.error import FlowingError, format_global_error
try:
ec.build_flow()
except FlowingError, a:
# attach additional source info to AnnotatorError
_, _, tb = sys.exc_info()
e = FlowingError(format_global_error(ec.graph, ec.crnt_offset, str(a)))
raise FlowingError, e, tb
checkgraph(graph)
return graph
def viewiterable(self, w_tuple, expected_length=None):
unwrapped = self.unwrap(w_tuple)
result = tuple([Constant(x) for x in unwrapped])
if expected_length is not None and len(result) != expected_length:
raise ValueError, "got a tuple of length %d instead of %d" % (
len(result), expected_length)
return result
def unpackiterable(self, w_iterable, expected_length=None):
if not isinstance(w_iterable, Variable):
l = list(self.unwrap(w_iterable))
if expected_length is not None and len(l) != expected_length:
raise ValueError
return [self.wrap(x) for x in l]
if isinstance(w_iterable, Variable) and expected_length is None:
raise UnwrapException, ("cannot unpack a Variable iterable"
"without knowing its length")
elif expected_length is not None:
w_len = self.len(w_iterable)
w_correct = self.eq(w_len, self.wrap(expected_length))
if not self.is_true(w_correct):
e = OperationError(self.w_ValueError, self.w_None)
e.normalize_exception(self)
raise e
return [self.do_operation('getitem', w_iterable, self.wrap(i))
for i in range(expected_length)]
return ObjSpace.unpackiterable(self, w_iterable, expected_length)
# ____________________________________________________________
def do_operation(self, name, *args_w):
spaceop = SpaceOperation(name, args_w, Variable())
if hasattr(self, 'executioncontext'): # not here during bootstrapping
spaceop.offset = self.executioncontext.crnt_offset
self.executioncontext.recorder.append(spaceop)
return spaceop.result
def do_operation_with_implicit_exceptions(self, name, *args_w):
w_result = self.do_operation(name, *args_w)
self.handle_implicit_exceptions(implicit_exceptions.get(name))
return w_result
def is_true(self, w_obj):
try:
obj = self.unwrap_for_computation(w_obj)
except UnwrapException:
pass
else:
return bool(obj)
w_truthvalue = self.do_operation('is_true', w_obj)
context = self.getexecutioncontext()
return context.guessbool(w_truthvalue)
def iter(self, w_iterable):
try:
iterable = self.unwrap(w_iterable)
except UnwrapException:
pass
else:
if isinstance(iterable, unrolling_iterable):
return self.wrap(iterable.get_unroller())
w_iter = self.do_operation("iter", w_iterable)
return w_iter
def next(self, w_iter):
context = self.getexecutioncontext()
try:
it = self.unwrap(w_iter)
except UnwrapException:
pass
else:
if isinstance(it, _unroller):
try:
v, next_unroller = it.step()
except IndexError:
raise OperationError(self.w_StopIteration, self.w_None)
else:
context.replace_in_stack(it, next_unroller)
return self.wrap(v)
w_item = self.do_operation("next", w_iter)
outcome, w_exc_cls, w_exc_value = context.guessexception(StopIteration,
RuntimeError)
if outcome is StopIteration:
raise OperationError(self.w_StopIteration, w_exc_value)
elif outcome is RuntimeError:
raise flowcontext.ImplicitOperationError(Constant(RuntimeError),
w_exc_value)
else:
return w_item
def setitem(self, w_obj, w_key, w_val):
if self.concrete_mode:
try:
obj = self.unwrap_for_computation(w_obj)
key = self.unwrap_for_computation(w_key)
val = self.unwrap_for_computation(w_val)
operator.setitem(obj, key, val)
return self.w_None
except UnwrapException:
pass
return self.do_operation_with_implicit_exceptions('setitem', w_obj,
w_key, w_val)
def call_args(self, w_callable, args):
try:
fn = self.unwrap(w_callable)
sc = self.specialcases[fn] # TypeError if 'fn' not hashable
except (UnwrapException, KeyError, TypeError):
pass
else:
return sc(self, fn, args)
try:
args_w, kwds_w = args.unpack()
except UnwrapException:
args_w, kwds_w = '?', '?'
# NOTE: annrpython needs to know about the following two operations!
if not kwds_w:
# simple case
w_res = self.do_operation('simple_call', w_callable, *args_w)
else:
# general case
shape, args_w = args.flatten()
w_res = self.do_operation('call_args', w_callable, Constant(shape),
*args_w)
# maybe the call has generated an exception (any one)
# but, let's say, not if we are calling a built-in class or function
# because this gets in the way of the special-casing of
#
# raise SomeError(x)
#
# as shown by | |
chart=None, name=None,
latex_name=None):
r"""
Construct a scalar field.
TESTS::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: f = M.scalar_field({X: x+y}, name='f') ; f
Scalar field f on the 2-dimensional topological manifold M
sage: from sage.manifolds.scalarfield import ScalarField
sage: isinstance(f, ScalarField)
True
sage: f.parent()
Algebra of scalar fields on the 2-dimensional topological
manifold M
sage: TestSuite(f).run()
"""
CommutativeAlgebraElement.__init__(self, parent)
domain = parent._domain
self._domain = domain
self._manifold = domain.manifold()
self._is_zero = False # a priori, may be changed below or via
# method __bool__()
self._name = name
if latex_name is None:
self._latex_name = self._name
else:
self._latex_name = latex_name
self._express = {} # dict of coordinate expressions (ChartFunction
# instances) with charts as keys
if coord_expression is not None:
if isinstance(coord_expression, dict):
for chart, expression in coord_expression.items():
if isinstance(expression, ChartFunction):
self._express[chart] = expression
else:
self._express[chart] = chart.function(expression)
elif isinstance(coord_expression, ChartFunction):
self._express[coord_expression.chart()] = coord_expression
else:
if chart is None:
chart = self._domain.default_chart()
if chart == 'all':
# coord_expression is the same in all charts (constant
# scalar field)
for ch in self._domain.atlas():
self._express[ch] = ch.function(coord_expression)
else:
self._express[chart] = chart.function(coord_expression)
self._init_derived() # initialization of derived quantities
####### Required methods for an algebra element (beside arithmetic) #######
def __bool__(self):
r"""
Return ``True`` if ``self`` is nonzero and ``False`` otherwise.
This method is called by :meth:`~sage.structure.element.Element.is_zero()`.
EXAMPLES:
Tests on a 2-dimensional manifold::
sage: M = Manifold(2, 'M', structure='topological')
sage: c_xy.<x,y> = M.chart()
sage: f = M.scalar_field(x*y)
sage: f.is_zero()
False
sage: f.set_expr(0)
sage: f.is_zero()
True
sage: g = M.scalar_field(0)
sage: g.is_zero()
True
sage: M.zero_scalar_field().is_zero()
True
"""
if self._is_zero:
return False
if not self._express:
# undefined scalar field
return True
for funct in itervalues(self._express):
if not funct.is_zero():
self._is_zero = False
return True
self._is_zero = True
return False
__nonzero__ = __bool__ # For Python2 compatibility
def is_trivial_zero(self):
r"""
Check if ``self`` is trivially equal to zero without any
simplification.
This method is supposed to be fast as compared with
``self.is_zero()`` or ``self == 0`` and is intended to be
used in library code where trying to obtain a mathematically
correct result by applying potentially expensive rewrite rules
is not desirable.
EXAMPLES::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: f = M.scalar_field({X: 0})
sage: f.is_trivial_zero()
True
sage: f = M.scalar_field(0)
sage: f.is_trivial_zero()
True
sage: M.zero_scalar_field().is_trivial_zero()
True
sage: f = M.scalar_field({X: x+y})
sage: f.is_trivial_zero()
False
Scalar field defined by means of two charts::
sage: U1 = M.open_subset('U1'); X1.<x1,y1> = U1.chart()
sage: U2 = M.open_subset('U2'); X2.<x2,y2> = U2.chart()
sage: f = M.scalar_field({X1: 0, X2: 0})
sage: f.is_trivial_zero()
True
sage: f = M.scalar_field({X1: 0, X2: 1})
sage: f.is_trivial_zero()
False
No simplification is attempted, so that ``False`` is returned for
non-trivial cases::
sage: f = M.scalar_field({X: cos(x)^2 + sin(x)^2 - 1})
sage: f.is_trivial_zero()
False
On the contrary, the method
:meth:`~sage.structure.element.Element.is_zero` and the direct
comparison to zero involve some simplification algorithms and
return ``True``::
sage: f.is_zero()
True
sage: f == 0
True
"""
if self._is_zero:
return True
return all(func.is_trivial_zero() for func in self._express.values())
# TODO: Remove this method as soon as ticket #28629 is solved?
def is_unit(self):
r"""
Return ``True`` iff ``self`` is not trivially zero in at least one of
the given expressions since most scalar fields are invertible and a
complete computation would take too much time.
EXAMPLES::
sage: M = Manifold(2, 'M', structure='top')
sage: one = M.scalar_field_algebra().one()
sage: one.is_unit()
True
sage: zero = M.scalar_field_algebra().zero()
sage: zero.is_unit()
False
"""
if self._is_zero:
return False
return not any(func.is_trivial_zero()
for func in self._express.values())
def __eq__(self, other):
r"""
Comparison (equality) operator.
INPUT:
- ``other`` -- a scalar field (or something else)
OUTPUT:
- ``True`` if ``self`` is equal to ``other``, ``False`` otherwise
TESTS::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: f = M.scalar_field({X: x+y})
sage: f == 1
False
sage: f == M.zero_scalar_field()
False
sage: g = M.scalar_field({X: x+y})
sage: f == g
True
sage: h = M.scalar_field({X: 1})
sage: h == M.one_scalar_field()
True
sage: h == 1
True
"""
if other is self:
return True
if not isinstance(other, ScalarField):
# We try a conversion of other to a scalar field, except if
# other is None (since this would generate an undefined scalar
# field)
if other is None:
return False
try:
other = self.parent()(other) # conversion to a scalar field
except Exception:
return False
if other._domain != self._domain:
return False
if other.is_zero():
return self.is_zero()
com_charts = self.common_charts(other)
if com_charts is None:
raise ValueError("no common chart for the comparison")
for chart in com_charts:
if not (self._express[chart] == other._express[chart]):
return False
return True
def __ne__(self, other):
r"""
Non-equality operator.
INPUT:
- ``other`` -- a scalar field
OUTPUT:
- ``True`` if ``self`` differs from ``other``, ``False`` otherwise
TESTS::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: f = M.scalar_field({X: x+y})
sage: f != 1
True
sage: f != M.zero_scalar_field()
True
sage: g = M.scalar_field({X: x+y})
sage: f != g
False
"""
return not (self == other)
####### End of required methods for an algebra element (beside arithmetic) #######
def _init_derived(self):
r"""
Initialize the derived quantities.
TESTS::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: f = M.scalar_field({X: x+y})
sage: f._init_derived()
"""
self._restrictions = {} # dict. of restrictions of self on subsets
# of self._domain, with the subsets as keys
def _del_derived(self):
r"""
Delete the derived quantities.
TESTS::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: f = M.scalar_field({X: x+y})
sage: U = M.open_subset('U', coord_def={X: x>0})
sage: f.restrict(U)
Scalar field on the Open subset U of the 2-dimensional topological
manifold M
sage: f._restrictions
{Open subset U of the 2-dimensional topological manifold M:
Scalar field on the Open subset U of the 2-dimensional topological
manifold M}
sage: f._del_derived()
sage: f._restrictions # restrictions are derived quantities
{}
"""
self._restrictions.clear()
def _repr_(self):
r"""
String representation of the object.
TESTS::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: f = M.scalar_field({X: x+y})
sage: f._repr_()
'Scalar field on the 2-dimensional topological manifold M'
sage: f = M.scalar_field({X: x+y}, name='f')
sage: f._repr_()
'Scalar field f on the 2-dimensional topological manifold M'
sage: f
Scalar field f on the 2-dimensional topological manifold M
"""
description = "Scalar field"
if self._name is not None:
description += " " + self._name
description += " on the {}".format(self._domain)
return description
def _latex_(self):
r"""
LaTeX representation of the object.
TESTS::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: f = M.scalar_field({X: x+y})
sage: f._latex_()
'\\mbox{Scalar field on the 2-dimensional topological manifold M}'
sage: f = M.scalar_field({X: x+y}, name='f')
sage: f._latex_()
'f'
sage: f = M.scalar_field({X: x+y}, name='f', latex_name=r'\Phi')
sage: f._latex_()
'\\Phi'
sage: latex(f)
\Phi
"""
if self._latex_name is None:
return r'\mbox{' + str(self) + r'}'
else:
return self._latex_name
def set_name(self, name=None, latex_name=None):
r"""
Set (or change) the text name and LaTeX name of the scalar field.
INPUT:
- ``name`` -- (string; default: ``None``) name given to the scalar
field
- ``latex_name`` -- (string; default: ``None``) LaTeX symbol to denote
the scalar field; if ``None`` while ``name`` is provided, the LaTeX
symbol is set to ``name``
EXAMPLES::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: f = M.scalar_field({X: x+y})
sage: f = M.scalar_field({X: x+y}); f
Scalar field on the 2-dimensional topological manifold M
sage: f.set_name('f'); f
Scalar field f on the 2-dimensional topological manifold M
sage: latex(f)
f
sage: f.set_name('f', latex_name=r'\Phi'); f
Scalar field f on the 2-dimensional topological manifold M
sage: latex(f)
\Phi
"""
if name is not None:
self._name = name
if latex_name is None:
self._latex_name = self._name
if latex_name is not None:
self._latex_name = latex_name
for rst in self._restrictions.values():
rst.set_name(name=name, latex_name=latex_name)
def domain(self):
r"""
Return the open subset on which the scalar field is defined.
OUTPUT:
- instance of class
:class:`~sage.manifolds.manifold.TopologicalManifold`
representing the manifold's open subset on which the
scalar field is defined
EXAMPLES::
sage: M = Manifold(2, | |
# Copyright 2017 QuantRocket - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from quantrocket.cli.utils.parse import dict_str
def add_subparser(subparsers):
_parser = subparsers.add_parser("zipline", description="QuantRocket CLI for Zipline", help="Backtest and trade Zipline strategies")
_subparsers = _parser.add_subparsers(title="subcommands", dest="subcommand")
_subparsers.required = True
examples = """
Create a Zipline bundle for US stocks.
This command defines the bundle parameters but does not ingest the actual
data. To ingest the data, see `quantrocket zipline ingest`.
Examples:
Create a minute data bundle for all US stocks:
quantrocket zipline create-usstock-bundle usstock-1min
Create a bundle for daily data only:
quantrocket zipline create-usstock-bundle usstock-1d --data-frequency daily
Create a minute data bundle based on a universe:
quantrocket zipline create-usstock-bundle usstock-tech-1min --universes us-tech
Create a minute data bundle of free sample data:
quantrocket zipline create-usstock-bundle usstock-free-1min --free
"""
parser = _subparsers.add_parser(
"create-usstock-bundle",
help="create a Zipline bundle for US stocks",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"code",
metavar="CODE",
help="the code to assign to the bundle (lowercase alphanumerics and hyphens only)")
parser.add_argument(
"-i", "--sids",
metavar="SID",
help="limit to these sids (only supported for minute data bundles)")
parser.add_argument(
"-u", "--universes",
metavar="UNIVERSE",
help="limit to these universes (only supported for minute data bundles)")
parser.add_argument(
"--free",
action="store_true",
help="limit to free sample data")
parser.add_argument(
"-d", "--data-frequency",
choices=["daily", "d", "minute", "m"],
help="whether to collect minute data (which also includes daily data) or "
"only daily data. Default is minute data. Possible choices: %(choices)s")
parser.set_defaults(func="quantrocket.zipline._cli_create_usstock_bundle")
examples = """
Create a Zipline bundle from a history database or real-time aggregate
database.
You can ingest 1-minute or 1-day databases.
This command defines the bundle parameters but does not ingest the actual
data. To ingest the data, see `quantrocket zipline ingest`.
Examples:
Create a bundle from a history database called "es-fut-1min" and name
it like the history database:
quantrocket zipline create-bundle-from-db es-fut-1min --from-db es-fut-1min --calendar us_futures --start-date 2015-01-01
Create a bundle named "usa-stk-1min-2017" for ingesting a single year of US
1-minute stock data from a history database called "usa-stk-1min":
quantrocket zipline create-bundle-from-db usa-stk-1min-2017 --from-db usa-stk-1min -s 2017-01-01 -e 2017-12-31 --calendar XNYS
Create a bundle from a real-time aggregate database and specify how to map
Zipline fields to the database fields:
quantrocket zipline create-bundle-from-db free-stk-1min --from-db free-stk-tick-1min --calendar XNYS --start-date 2020-06-01 --fields close:LastPriceClose open:LastPriceOpen high:LastPriceHigh low:LastPriceLow volume:VolumeClose
"""
parser = _subparsers.add_parser(
"create-bundle-from-db",
help="create a Zipline bundle from a history database or real-time aggregate database",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"code",
metavar="CODE",
help="the code to assign to the bundle (lowercase alphanumerics and hyphens only)")
parser.add_argument(
"-d", "--from-db",
metavar="CODE",
help="the code of a history database or real-time aggregate database to ingest")
parser.add_argument(
"-c", "--calendar",
metavar="NAME",
help="the name of the calendar to use with this bundle "
"(provide '?' or any invalid calendar name to see available choices)")
parser.add_argument(
"-f", "--fields",
nargs="*",
type=dict_str,
metavar="ZIPLINE_FIELD:DB_FIELD",
help="mapping of Zipline fields (open, high, low, close, volume) to "
"db fields. Pass as 'zipline_field:db_field'. Defaults to mapping Zipline "
"'open' to db 'Open', etc.")
filters = parser.add_argument_group("filtering options for db ingestion")
filters.add_argument(
"-s", "--start-date",
metavar="YYYY-MM-DD",
required=True,
help="limit to historical data on or after this date. This parameter is required "
"and also determines the default start date for backtests and queries.")
filters.add_argument(
"-e", "--end-date",
metavar="YYYY-MM-DD",
help="limit to historical data on or before this date")
filters.add_argument(
"-u", "--universes",
nargs="*",
metavar="UNIVERSE",
help="limit to these universes")
filters.add_argument(
"-i", "--sids",
nargs="*",
metavar="SID",
help="limit to these sids")
filters.add_argument(
"--exclude-universes",
nargs="*",
metavar="UNIVERSE",
help="exclude these universes")
filters.add_argument(
"--exclude-sids",
nargs="*",
metavar="SID",
help="exclude these sids")
parser.set_defaults(func="quantrocket.zipline._cli_create_bundle_from_db")
examples = """
Ingest data into a previously defined bundle.
Examples:
Ingest data into a bundle called usstock-1min:
quantrocket zipline ingest usstock-1min
"""
parser = _subparsers.add_parser(
"ingest",
help="ingest data into a previously defined bundle",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"code",
metavar="CODE",
help="the bundle code")
parser.add_argument(
"-i", "--sids",
nargs="*",
metavar="SID",
help="limit to these sids, overriding stored config")
parser.add_argument(
"-u", "--universes",
nargs="*",
metavar="UNIVERSE",
help="limit to these universes, overriding stored config")
parser.set_defaults(func="quantrocket.zipline._cli_ingest_bundle")
examples = """
List available data bundles and whether data has been ingested into them.
Examples:
quantrocket zipline list-bundles
"""
parser = _subparsers.add_parser(
"list-bundles",
help="list available data bundles and whether data has been ingested into them",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.set_defaults(func="quantrocket.zipline._cli_list_bundles")
examples = """
Return the configuration of a bundle.
Examples:
Return the configuration of a bundle called 'usstock-1min':
quantrocket zipline config usstock-1min
"""
parser = _subparsers.add_parser(
"config",
help="return the configuration of a bundle",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"code",
metavar="CODE",
help="the bundle code")
parser.set_defaults(func="quantrocket.zipline._cli_get_bundle_config")
examples = """
Delete a bundle.
Examples:
Delete a bundle called 'es-fut-1min':
quantrocket zipline drop-bundle es-fut-1min --confirm-by-typing-bundle-code-again es-fut-1min
"""
parser = _subparsers.add_parser(
"drop-bundle",
help="delete a bundle",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"code",
metavar="CODE",
help="the bundle code")
parser.add_argument(
"--confirm-by-typing-bundle-code-again",
metavar="CODE",
required=True,
help="enter the bundle code again to confirm you want to drop the bundle, its config, "
"and all its data")
parser.set_defaults(func="quantrocket.zipline._cli_drop_bundle")
examples = """
Set or show the default bundle to use for backtesting and trading.
Setting a default bundle is a convenience and is optional. It can be
overridden by manually specifying a bundle when backtesting or
trading.
Examples:
Set a bundle named usstock-1min as the default:
quantrocket zipline default-bundle usstock-1min
Show current default bundle:
quantrocket zipline default-bundle
"""
parser = _subparsers.add_parser(
"default-bundle",
help="set or show the default bundle to use for backtesting and trading",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"bundle",
nargs="?",
help="the bundle code")
parser.set_defaults(func="quantrocket.zipline._cli_get_or_set_default_bundle")
examples = """
Query minute or daily data from a Zipline bundle and download to a CSV file.
Examples:
Download a CSV of minute prices since 2015 for a single security from a bundle called
"usstock-1min":
quantrocket zipline get usstock-1min --start-date 2015-01-01 -i FIBBG12345 -o minute_prices.csv
"""
parser = _subparsers.add_parser(
"get",
help="query minute or daily data from a Zipline bundle and download to a CSV file",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"code",
metavar="CODE",
help="the bundle code")
filters = parser.add_argument_group("filtering options")
filters.add_argument(
"-s", "--start-date",
metavar="YYYY-MM-DD",
help="limit to history on or after this date")
filters.add_argument(
"-e", "--end-date",
metavar="YYYY-MM-DD",
help="limit to history on or before this date")
filters.add_argument(
"-d", "--data-frequency",
choices=["daily", "d", "minute", "m"],
help="whether to query minute or daily data. If omitted, defaults to "
"minute data for minute bundles and to daily data for daily bundles. "
"This parameter only needs to be set to request daily data from a minute "
"bundle. Possible choices: %(choices)s")
filters.add_argument(
"-u", "--universes",
nargs="*",
metavar="UNIVERSE",
help="limit to these universes")
filters.add_argument(
"-i", "--sids",
nargs="*",
metavar="SID",
help="limit to these sids")
filters.add_argument(
"--exclude-universes",
nargs="*",
metavar="UNIVERSE",
help="exclude these universes")
filters.add_argument(
"--exclude-sids",
nargs="*",
metavar="SID",
help="exclude these sids")
filters.add_argument(
"-t", "--times",
nargs="*",
metavar="HH:MM:SS",
help="limit to these times")
outputs = parser.add_argument_group("output options")
outputs.add_argument(
"-o", "--outfile",
metavar="OUTFILE",
dest="filepath_or_buffer",
help="filename to write the data to (default is stdout)")
outputs.add_argument(
"-f", "--fields",
metavar="FIELD",
nargs="*",
help="only return these fields (pass '?' or any invalid fieldname to see "
"available fields)")
parser.set_defaults(func="quantrocket.zipline._cli_download_bundle_file")
examples = """
Backtest a Zipline strategy and write the test results to a CSV file.
The CSV result file contains several DataFrames stacked into one: the Zipline performance
results, plus the extracted returns, transactions, positions, and benchmark returns from those
results.
Examples:
Run a backtest from a strategy file called etf-arb.py and save a CSV file of results,
logging backtest progress at annual intervals:
quantrocket zipline backtest etf-arb --bundle arca-etf-eod -s 2010-04-01 -e 2016-02-01 -o results.csv --progress A
"""
parser = _subparsers.add_parser(
"backtest",
help="backtest a Zipline strategy and write the test results to a CSV file",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"strategy",
metavar="CODE",
help="the strategy to run (strategy filename without extension)")
parser.add_argument(
"-f", "--data-frequency",
choices=["daily", "d", "minute", "m"],
help="the data frequency to use. Possible choices: %(choices)s "
"(default is minute)")
parser.add_argument(
"--capital-base",
type=float,
metavar="FLOAT",
help="the starting capital for the simulation (default is 1e6 (1 million))")
parser.add_argument(
"-b", "--bundle",
metavar="CODE",
help="the data bundle to use for the simulation. If omitted, the default "
"bundle (if set) is used.")
parser.add_argument(
"-s", "--start-date",
metavar="YYYY-MM-DD",
help="the start date of the simulation (defaults to the bundle start date)")
parser.add_argument(
"-e", | |
[]
map_types = ["unambig"]
if readlen_ambig == True:
map_types.append("ambig")
for transcript in traninfo_dict:
outfile = open("{}/static/tmp/{}_{}".format(config.SCRIPT_LOC,transcript,curr_time),"w")
filepaths.append("{}_{}".format(transcript,curr_time))
count_dict = {}
for filetype in file_paths_dict:
for file_id in file_paths_dict[filetype]:
filepath = file_paths_dict[filetype][file_id]
cursor.execute("SELECT file_name, file_description FROM files WHERE file_id = {}".format(file_id))
result = cursor.fetchone()
file_name = result[0]
file_desc = result[1]
identifier = "{}_({})".format(file_name, file_desc)
identifiers.append(identifier)
count_dict[identifier] = {}
for i in range(0,traninfo_dict[transcript]["length"]):
count_dict[identifier][i] = 0
if os.path.isfile(filepath):
sqlite_db = SqliteDict(filepath, autocommit=False)
offsets = sqlite_db["offsets"]["fiveprime"]["offsets"]
else:
return "File not found: {}, please report this to <EMAIL> or via the contact page. ".format(filepath)
if transcript in sqlite_db:
for map_type in map_types:
counts = sqlite_db[transcript][map_type]
for readlen in counts:
if readlen > minreadlen and readlen < maxreadlen:
if readlen in offsets:
offset = offsets[readlen]
else:
offset = 15
for pos in counts[readlen]:
if apply_offset == True:
try:
count_dict[identifier][pos+offset] += counts[readlen][pos]
except:
pass
else:
count_dict[identifier][pos] += counts[readlen][pos]
if traninfo_dict[transcript]["cds_start"] != None:
outfile.write("#{}_{}_{}_{}\n".format(transcript, traninfo_dict[transcript]["gene"],traninfo_dict[transcript]["cds_start"],traninfo_dict[transcript]["cds_stop"]))
else:
outfile.write("#{}_{}_noncoding\n".format(transcript, traninfo_dict[transcript]["gene"]))
outfile.write("Pos,")
for identifier in identifiers:
outfile.write("{},".format(identifier))
outfile.write("Total\n")
for i in range(0,traninfo_dict[transcript]["length"]):
outfile.write(str(i)+",")
pos_total = 0
for identifier in identifiers:
outfile.write("{},".format(count_dict[identifier][i]))
pos_total += count_dict[identifier][i]
outfile.write("{}\n".format(pos_total))
#print "tar -czvf {1}/static/tmp/bulk_dl_{0}.tar.gz -C {2}".format(curr_time,config.SCRIPT_LOC, str(filepaths).strip("[]").replace(",","").replace("'",""))
subprocess.call("tar -C {1}/static/tmp/ -czvf {1}/static/tmp/bulk_dl_{0}.tar.gz {2}".format(curr_time,config.SCRIPT_LOC, str(filepaths).strip("[]").replace(",","").replace("'","")),shell=True)
return "<div style='padding-left: 55px;padding-top: 22px;'><a href='https://trips.ucc.ie/static/tmp/bulk_dl_{1}.tar.gz' target='_blank' ><button class='button centerbutton' type='submit'><b>Download result</b></button></a> </div>".format(config.SCRIPT_LOC, curr_time)
if plottype == "readlen_dist":
#print"readlength dist called, custom_search_region is ", custom_search_region
#print"metagene_tranlist is", metagene_tranlist
master_dict = {}
if metagene_tranlist != "":
metagene_tranlist = metagene_tranlist.split(",")
for filetype in file_paths_dict:
for file_id in file_paths_dict[filetype]:
filepath = file_paths_dict[filetype][file_id]
if os.path.isfile(filepath):
sqlite_db = SqliteDict(filepath, autocommit=False)
else:
return "File not found: {}, please report this to <EMAIL> or via the contact page. ".format(filepath)
# If no transcripts given and no region specified, get the precomputed read lengths (all transcripts, entire gene)
if metagene_tranlist == "" and custom_search_region == "whole_gene":
if readlen_ambig == True:
if "read_lengths" not in sqlite_db:
return "No readlength distribution data for this file, please report this to <EMAIL> or via the contact page."
else:
read_lengths = sqlite_db["read_lengths"]
sqlite_db.close()
for i in read_lengths:
if i in master_dict:
master_dict[i] += read_lengths[i]
else:
master_dict[i] = read_lengths[i]
elif readlen_ambig == False:
if "unambig_read_lengths" not in sqlite_db:
return "No unambiguous readlength distribution data for this file, please report this to <EMAIL> or via the contact page."
else:
read_lengths = sqlite_db["unambig_read_lengths"]
sqlite_db.close()
for i in read_lengths:
if i in master_dict:
master_dict[i] += read_lengths[i]
else:
master_dict[i] = read_lengths[i]
else:
traninfo_connection = sqlite3.connect("/home/DATA/www/tripsviz/tripsviz/trips_annotations/{0}/{0}.{2}.sqlite".format(organism,transcriptome))
traninfo_cursor = traninfo_connection.cursor()
if metagene_tranlist == "":
#print"metagene tranlist is", metagene_tranlist
traninfo_cursor.execute("SELECT transcript,sequence,cds_start,cds_stop FROM transcripts WHERE principal = 1;")
result = traninfo_cursor.fetchall()
else:
#print"metagene tranlist is", metagene_tranlist
#print "SELECT transcript,sequence,cds_start,cds_stop FROM transcripts WHERE transcript IN ({})".format(str(metagene_tranlist).strip("[]"))
traninfo_cursor.execute("SELECT transcript,sequence,cds_start,cds_stop FROM transcripts WHERE transcript IN ({})".format(str(metagene_tranlist).strip("[]").replace('"','')))
result = traninfo_cursor.fetchall()
#print "result", result
for row in result:
tran = row[0]
seq = row[1]
cds_start = row[2]
cds_stop = row[3]
if tran in sqlite_db:
counts = sqlite_db[tran]["unambig"]
for readlen in counts:
if readlen not in master_dict:
master_dict[readlen] = 0
for pos in counts[readlen]:
cnt = counts[readlen][pos]
if custom_search_region == "whole_gene":
master_dict[readlen] += cnt
elif custom_search_region == "five_leader":
if pos < cds_start:
master_dict[readlen] += cnt
elif custom_search_region == "cds":
if pos > cds_start and pos < cds_stop:
master_dict[readlen] += cnt
elif custom_search_region == "three_trailer":
if pos > cds_stop:
master_dict[readlen] += cnt
title = "Readlength distribution"
connection.close()
return metainfo_plots.readlen_dist(master_dict,title,short_code, background_col,readlength_col,title_size, axis_label_size, subheading_size,marker_size)
if plottype == "mismatch_pos":
master_dict = {}
for filetype in file_paths_dict:
for file_id in file_paths_dict[filetype]:
filepath = file_paths_dict[filetype][file_id]
if os.path.isfile(filepath):
sqlite_db = SqliteDict(filepath, autocommit=False)
else:
return "File not found: {}, please report this to <EMAIL> or via the contact page. ".format(filepath)
if "global_mismatches" not in sqlite_db:
return "No mismatch data for this file, please report this to <EMAIL> or via the contact page."
else:
mismatches = sqlite_db["global_mismatches"]
sqlite_db.close()
for readlen in mismatches:
if readlen < mismatch_minreadlen or readlen > mismatch_maxreadlen:
continue
for pos in mismatches[readlen]:
if pos in master_dict:
master_dict[pos] += mismatches[readlen][pos]
else:
master_dict[pos] = mismatches[readlen][pos]
title = "Mismatch positions"
return metainfo_plots.mismatch_pos(master_dict,title,short_code, background_col,readlength_col,title_size, axis_label_size, subheading_size,marker_size)
elif plottype == "single_tran_de":
range1 = single_tran_de_range1.split("_")
range1_kbp = (float(int(range1[1]) - int(range1[0])))/1000
master_list = []
master_dict = {}
for filetype in file_paths_dict:
for file_id in file_paths_dict[filetype]:
range1_count = 1.001
filepath = file_paths_dict[filetype][file_id]
filename = filepath.split("/")[-1]
cursor.execute("SELECT file_description from files WHERE file_id = {};".format(file_id))
result = (cursor.fetchone())
file_desc = result[0]
study = filepath.split("/")[-2]
if study not in master_dict:
master_dict[study] = {"range1_count":0}
if os.path.isfile(filepath):
#Add the counts to the profile
sqlite_db = SqliteDict(filepath, autocommit=False)
mapped_reads = 0
try:
mapped_reads += float(sqlite_db["noncoding_counts"])
except:
pass
try:
mapped_reads += float(sqlite_db["coding_counts"])
except:
pass
if mapped_reads < 100000:
return "ERROR: File {} has less than 100,000 mapped reads".format(filename)
if "offsets" in sqlite_db:
offsets = sqlite_db["offsets"]["fiveprime"]["offsets"]
else:
offsets = {}
profile = {}
if single_tran_de_transcript in sqlite_db:
sqlite_db_tran = sqlite_db[single_tran_de_transcript]
for readlen in sqlite_db_tran["unambig"]:
if readlen in offsets:
offset = offsets[readlen]
else:
offset = 15
for pos in sqlite_db_tran["unambig"][readlen]:
count = sqlite_db_tran["unambig"][readlen][pos]
offset_pos = offset+pos
if offset_pos not in profile:
profile[offset_pos] = 0
profile[offset_pos] += count
for x in range(int(range1[0]), int(range1[1])):
if x in profile:
range1_count += profile[x]
#print"range1_count, range1_len, mapped reads",range1_count, range1_kbp, mapped_reads
range1_tpm = (range1_count/range1_kbp)
range1_tpm = range1_tpm/(mapped_reads/1000000)
#print"range1_tpm", range1_tpm
master_dict[study]["range1_count"] += range1_count
master_list.append((file_id, filename, range1_tpm,mapped_reads,file_desc))
study_master_list = []
for study in master_dict:
range1_count = master_dict[study]["range1_count"]
study_master_list.append((0, study, range1_count))
sorted_master_list = sorted(master_list, key=lambda x:x[1])
return metainfo_plots.single_tran_de(single_tran_de_transcript, sorted_master_list,study_master_list,organism, transcriptome)
elif plottype == "codon_usage":
traninfo_connection = sqlite3.connect("/home/DATA/www/tripsviz/tripsviz/trips_annotations/{0}/{0}.{2}.sqlite".format(organism,transcriptome))
traninfo_cursor = traninfo_connection.cursor()
codon_dict = {}
principal_transcripts = {}
traninfo_cursor.execute("SELECT transcript,sequence,cds_start,cds_stop FROM transcripts WHERE principal = 1;")
result = traninfo_cursor.fetchall()
for row in result:
if row[2] != "None" and row[2] != "" and row[2] != None:
principal_transcripts[str(row[0])] = {"seq":str(row[1]),"cds_start":int(row[2]),"cds_stop":int(row[3])}
if file_paths_dict["riboseq"] == {} and file_paths_dict["rnaseq"] == {}:
flash("Error no files selected")
return "Error no files selected"
all_values = []
offset_dict = {}
for file_id in file_paths_dict["riboseq"]:
sqlite_db = SqliteDict(file_paths_dict["riboseq"][file_id])
try:
offsets = sqlite_db["offsets"]["fiveprime"]["offsets"]
offset_dict[file_id] = offsets
except:
offset_dict[file_id] = {}
sqlite_db.close()
tran_count = 0
for file_id in file_paths_dict["riboseq"]:
sqlite_db = SqliteDict(file_paths_dict["riboseq"][file_id])
if "codon_usage_dict2" in sqlite_db:
codon_usage_dict = sqlite_db["codon_usage_dict"]
for codon in codon_usage_dict:
if codon not in codon_dict:
codon_dict[codon] = {"ribo_count":0,"codon_count":0.0}
codon_dict[codon]["ribo_count"] += codon_usage_dict[codon]["ribo_count"]
codon_dict[codon]["codon_count"] = codon_usage_dict[codon]["codon_count"]
else:
#codon_dict is the main dict that holds counts from all files, codon_usage_dict is file specific and will be saved for quick access later.
codon_usage_dict = {}
offsets = offset_dict[file_id]
#print"old offsets", offsets
poffsets = {}
for offset in offsets:
value = offsets[offset]
new_value = value - 3
poffsets[offset] = new_value
#print"new offsets", poffsets
for transcript in principal_transcripts:
tran_count += 1
if tran_count%1000 == 0:
print"tran_count", tran_count
profile = {}
if transcript not in sqlite_db:
continue
subprofile = build_profile(sqlite_db[transcript], poffsets,"unambig")
for pos in subprofile:
if pos not in profile:
profile[pos] = 0
profile[pos] += subprofile[pos]
seq = principal_transcripts[transcript]["seq"]
for i in range(principal_transcripts[transcript]["cds_start"]-1,principal_transcripts[transcript]["cds_start"]+30):
codon = seq[i:i+3]
if len(codon) != 3:
continue
count = 0
if i in profile:
count += profile[i]
#if i+1 in profile:
# count += profile[i+1]
#if i+2 in profile:
# count += profile[i+2]
if codon not in codon_dict:
codon_dict[codon] = {"ribo_count":0,"codon_count":0.0}
if codon not in codon_usage_dict:
codon_usage_dict[codon] = {"ribo_count":0,"codon_count":0.0}
codon_usage_dict[codon]["ribo_count"] += count
codon_usage_dict[codon]["codon_count"] += 1
for codon in codon_usage_dict:
codon_dict[codon]["ribo_count"] += codon_usage_dict[codon]["ribo_count"]
codon_dict[codon]["codon_count"] = codon_usage_dict[codon]["codon_count"]
sqlite_db["codon_usage_dict"] = codon_usage_dict
sqlite_db.commit()
sqlite_db.close()
return metainfo_plots.codon_usage(codon_dict,short_code,str(title_size)+"pt", str(axis_label_size)+"pt", str(marker_size)+"pt")
elif plottype == "diff_codon_usage":
traninfo_connection = sqlite3.connect("/home/DATA/www/tripsviz/tripsviz/trips_annotations/{0}/{0}.{2}.sqlite".format(organism,transcriptome))
traninfo_cursor = traninfo_connection.cursor()
codon_dict_cond = {"condition1":{},"condition2":{}}
diff_codon_dict = {}
principal_transcripts = {}
condition_totals = {"condition1":0,"condition2":0}
traninfo_cursor.execute("SELECT transcript,sequence,cds_start,cds_stop FROM transcripts WHERE principal = 1;")
result = traninfo_cursor.fetchall()
for row in result:
if row[2] != "None" and row[2] != "" and row[2] != None:
principal_transcripts[str(row[0])] = {"seq":str(row[1]),"cds_start":int(row[2]),"cds_stop":int(row[3])}
if file_paths_dict["riboseq"] == {} and file_paths_dict["rnaseq"] == {}:
flash("Error no files selected")
return "Error no files selected"
condition_dict = {"condition1":[file_paths_dict["riboseq"].keys()[0]], "condition2":[file_paths_dict["riboseq"].keys()[1]]}
all_values = []
offset_dict = {}
for condition in condition_dict:
for file_id in condition_dict[condition]:
sqlite_db = SqliteDict(file_paths_dict["riboseq"][file_id])
try:
offsets = sqlite_db["offsets"]["fiveprime"]["offsets"]
offset_dict[file_id] = offsets
except:
offset_dict[file_id] = {}
sqlite_db.close()
tran_count = 0
for file_id in condition_dict[condition]:
sqlite_db = SqliteDict(file_paths_dict["riboseq"][file_id])
if "codon_usage_dict" in sqlite_db:
codon_usage_dict = sqlite_db["codon_usage_dict"]
for codon in codon_usage_dict:
if codon not in codon_dict_cond[condition]:
codon_dict_cond[condition][codon] = {"ribo_count":0,"codon_count":0.0}
codon_dict_cond[condition][codon]["ribo_count"] += codon_usage_dict[codon]["ribo_count"]
condition_totals[condition] += codon_usage_dict[codon]["ribo_count"]
codon_dict_cond[condition][codon]["codon_count"] = codon_usage_dict[codon]["codon_count"]
else:
#codon_dict_cond is the main dict that holds counts from all files, codon_usage_dict is file specific and will be saved for quick access later.
codon_usage_dict = {}
for transcript in principal_transcripts:
tran_count += 1
if tran_count%1000 == 0:
print "tran_count", tran_count
profile = {}
if transcript not in sqlite_db:
continue
offsets = offset_dict[file_id]
subprofile = build_profile(sqlite_db[transcript], offsets,"unambig")
for pos in subprofile:
if pos not in profile:
profile[pos] = 0
profile[pos] += subprofile[pos]
seq = principal_transcripts[transcript]["seq"]
for i in range(principal_transcripts[transcript]["cds_start"], principal_transcripts[transcript]["cds_stop"],3):
codon = seq[i:i+3]
if len(codon) != 3:
continue
count = 0
if i in profile:
count += profile[i]
if i+1 in profile:
count += profile[i+1]
if i+2 in profile:
count += profile[i+2]
if codon not in codon_dict_cond[condition]:
codon_dict_cond[condition][codon] = {"ribo_count":0,"codon_count":0.0}
#codon_dict_cond[codon]["ribo_count"] += count
#codon_dict_cond[codon]["codon_count"] += 1
if codon not in codon_usage_dict:
codon_usage_dict[codon] = {"ribo_count":0,"codon_count":0.0}
codon_usage_dict[codon]["ribo_count"] += count
codon_usage_dict[codon]["codon_count"] += 1
for codon in codon_usage_dict:
codon_dict_cond[condition][codon]["ribo_count"] += codon_usage_dict[codon]["ribo_count"]
condition_totals[condition] += codon_usage_dict[codon]["ribo_count"]
codon_dict_cond[condition][codon]["codon_count"] = codon_usage_dict[codon]["codon_count"]
sqlite_db["codon_usage_dict"] = codon_usage_dict
sqlite_db.commit()
sqlite_db.close()
factor_diff = float(condition_totals["condition1"])/float(condition_totals["condition2"])
for codon in codon_dict_cond["condition1"]:
count1 = codon_dict_cond["condition1"][codon]["ribo_count"]
count2 = codon_dict_cond["condition2"][codon]["ribo_count"]*factor_diff
diff = count1-count2
diff_codon_dict[codon] = {"ribo_count":diff, "codon_count":codon_dict_cond["condition1"][codon]["codon_count"]}
return metainfo_plots.codon_usage(diff_codon_dict,short_code,str(title_size)+"pt", str(axis_label_size)+"pt", str(marker_size)+"pt")
elif plottype == "tran_corr":
master_list = []
master_dict = {}
for filetype in file_paths_dict:
for file_id in file_paths_dict[filetype]:
tran1_count = 1.001
tran2_count = 1.001
filepath = file_paths_dict[filetype][file_id]
filename = filepath.split("/")[-1]
study = filepath.split("/")[-2]
if filename not in master_dict:
master_dict[filename] = {"tran1_count":0,
"tran2_count":0}
if os.path.isfile(filepath):
#Add the counts to the profile
sqlite_db = SqliteDict(filepath, autocommit=False)
if "offsets" in sqlite_db:
offsets = sqlite_db["offsets"]["fiveprime"]["offsets"]
else:
offsets = {}
profile = {}
#TRAN1
if tran_corr_transcript1 in sqlite_db:
sqlite_db_tran = sqlite_db[tran_corr_transcript1]
for readlen in sqlite_db_tran["unambig"]:
if readlen in offsets:
offset = offsets[readlen]
else:
offset = 15
for pos in sqlite_db_tran["unambig"][readlen]:
count = sqlite_db_tran["unambig"][readlen][pos]
offset_pos = offset+pos
if offset_pos not in profile:
profile[offset_pos] = 0
profile[offset_pos] += count
for pos in profile:
tran1_count += profile[pos]
#TRAN2
profile = {}
if tran_corr_transcript2 in sqlite_db:
sqlite_db_tran = sqlite_db[tran_corr_transcript2]
for readlen in sqlite_db_tran["unambig"]:
if readlen in offsets:
offset = offsets[readlen]
else:
offset = 15
for pos in sqlite_db_tran["unambig"][readlen]:
count = sqlite_db_tran["unambig"][readlen][pos]
offset_pos = offset+pos
if offset_pos not in profile:
profile[offset_pos] = 0
profile[offset_pos] += count
for pos in profile:
tran2_count += profile[pos]
master_dict[filename]["tran1_count"] += tran1_count
master_dict[filename]["tran2_count"] += tran2_count
master_list.append((file_id, filename, log(tran1_count,2), log(tran2_count,2), study))
sorted_master_list = sorted(master_list, key=lambda x:x[2])
return metainfo_plots.tran_corr(tran_corr_transcript1, tran_corr_transcript2,sorted_master_list,organism, transcriptome)
elif plottype == "mismatches":
positive_hits = 0
result_list = []
file_string = ""
total_trans = 0
for filetype in file_paths_dict:
for file_id in file_paths_dict[filetype]:
file_string += "{},".format(file_id)
cursor.execute("SELECT owner FROM organisms WHERE organism_name = '{}' and transcriptome_list = '{}';".format(organism, transcriptome))
owner = (cursor.fetchone())[0]
if owner == 1:
traninfo_dict = SqliteDict("{0}{1}/{1}.sqlite".format(config.ANNOTATION_DIR,organism), autocommit=False)
else:
traninfo_dict = SqliteDict("{0}transcriptomes/{1}/{2}/{3}/{2}_{3}.sqlite".format(config.UPLOADS_DIR,owner,organism,transcriptome), autocommit=False)
if organism == "homo_sapiens" or organism == "homo_sapiens_polio":
longest_tran_db = SqliteDict("{0}homo_sapiens/principal_isoforms_5ldr3tlr_rnaseq.sqlite".format(config.ANNOTATION_DIR), autocommit=False)
longest_tran_list = longest_tran_db["transcripts"]
longest_tran_db.close()
else:
longest_tran_list = traninfo_dict.keys()
if mismatch_agg == True:
for transcript in longest_tran_list:
total_trans += 1
if total_trans%100 == 0:
print | |
<reponame>ajuvercr/idasen-controller
#!python3
import os
import sys
import shutil
import struct
import argparse
import yaml
import asyncio
from bleak import BleakClient, BleakError, BleakScanner
import pickle
import json
import functools
from appdirs import user_config_dir
IS_LINUX = sys.platform == "linux" or sys.platform == "linux2"
IS_WINDOWS = sys.platform == "win32"
IS_MAC = sys.platform == "darwin"
# HELPER FUNCTIONS
def mmToRaw(mm):
return (mm - BASE_HEIGHT) * 10
def rawToMM(raw):
return (raw / 10) + BASE_HEIGHT
def rawToSpeed(raw):
return (raw / 100)
# GATT CHARACTERISTIC AND COMMAND DEFINITIONS
UUID_HEIGHT = '99fa0021-338a-1024-8a49-009c0215f78a'
UUID_COMMAND = '99fa0002-338a-1024-8a49-009c0215f78a'
UUID_REFERENCE_INPUT = '99fa0031-338a-1024-8a49-009c0215f78a'
COMMAND_UP = bytearray(struct.pack("<H", 71))
COMMAND_DOWN = bytearray(struct.pack("<H", 70))
COMMAND_STOP = bytearray(struct.pack("<H", 255))
COMMAND_REFERENCE_INPUT_STOP = bytearray(struct.pack("<H", 32769))
COMMAND_REFERENCE_INPUT_UP = bytearray(struct.pack("<H", 32768))
COMMAND_REFERENCE_INPUT_DOWN = bytearray(struct.pack("<H", 32767))
# OTHER DEFINITIONS
DEFAULT_CONFIG_DIR = user_config_dir('idasen-controller')
DEFAULT_CONFIG_PATH = os.path.join(DEFAULT_CONFIG_DIR, 'config.yaml')
PICKLE_FILE = os.path.join(DEFAULT_CONFIG_DIR, 'desk.pickle')
# CONFIGURATION SETUP
# Height of the desk at it's lowest (in mm)
# I assume this is the same for all Idasen desks
BASE_HEIGHT = 620
MAX_HEIGHT = 1270 # 6500
# Default config
if not os.path.isfile(DEFAULT_CONFIG_PATH):
os.makedirs(os.path.dirname(DEFAULT_CONFIG_PATH), exist_ok=True)
shutil.copyfile(os.path.join(os.path.dirname(__file__), 'example', 'config.yaml'), DEFAULT_CONFIG_PATH)
config = {
"mac_address": None,
"stand_height": BASE_HEIGHT + 420,
"sit_height": BASE_HEIGHT + 63,
"height_tolerance": 2.0,
"adapter_name": 'hci0',
"scan_timeout": 5,
"connection_timeout": 10,
"movement_timeout": 30,
"sit": False,
"stand": False,
"monitor": False,
"move_to": None,
"server_address": "127.0.0.1",
"server_port": 9123
}
parser = argparse.ArgumentParser(description='')
parser.add_argument('--mac-address', dest='mac_address',
type=str, help="Mac address of the Idasen desk")
parser.add_argument('--stand-height', dest='stand_height', type=int,
help="The height the desk should be at when standing (mm)")
parser.add_argument('--sit-height', dest='sit_height', type=int,
help="The height the desk should be at when sitting (mm)")
parser.add_argument('--height-tolerance', dest='height_tolerance', type=float,
help="Distance between reported height and target height before ceasing move commands (mm)")
parser.add_argument('--adapter', dest='adapter_name', type=str,
help="The bluetooth adapter device name")
parser.add_argument('--scan-timeout', dest='scan_timeout', type=int,
help="The timeout for bluetooth scan (seconds)")
parser.add_argument('--connection-timeout', dest='connection_timeout', type=int,
help="The timeout for bluetooth connection (seconds)")
parser.add_argument('--movement-timeout', dest='movement_timeout', type=int,
help="The timeout for waiting for the desk to reach the specified height (seconds)")
parser.add_argument('--forward', dest='forward', action='store_true',
help="Forward any commands to a server")
parser.add_argument('--server-address', dest='server_address', type=str,
help="The address the server should run at")
parser.add_argument('--server_port', dest='server_port', type=int,
help="The port the server should run on")
parser.add_argument('--config', dest='config', type=str,
help="File path to the config file (Default: {})".format(DEFAULT_CONFIG_PATH), default=DEFAULT_CONFIG_PATH)
cmd = parser.add_mutually_exclusive_group()
cmd.add_argument('--sit', dest='sit', action='store_true',
help="Move the desk to sitting height")
cmd.add_argument('--stand', dest='stand', action='store_true',
help="Move the desk to standing height")
cmd.add_argument('--monitor', dest='monitor', action='store_true',
help="Monitor desk height and speed")
cmd.add_argument('--move-to',dest='move_to', type=int,
help="Move desk to specified height (mm)")
cmd.add_argument('--scan', dest='scan_adapter', action='store_true',
help="Scan for devices using the configured adapter")
cmd.add_argument('--server', dest='server', action='store_true',
help="Run as a server to accept forwarded commands")
args = {k: v for k, v in vars(parser.parse_args()).items() if v is not None}
# Overwrite config from config.yaml
config_file = {}
config_file_path = os.path.join(args['config'])
if (config_file_path and os.path.isfile(config_file_path)):
with open(config_file_path, 'r') as stream:
try:
config_file = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print("Reading config.yaml failed")
exit(1)
else:
print('No config file found')
config.update(config_file)
# Overwrite config from command line args
config.update(args)
if not config['mac_address']:
parser.error("Mac address must be provided")
if config['sit_height'] >= config['stand_height']:
parser.error("Sit height must be less than stand height")
if config['sit_height'] < BASE_HEIGHT:
parser.error("Sit height must be greater than {}".format(BASE_HEIGHT))
if config['stand_height'] > MAX_HEIGHT:
parser.error("Stand height must be less than {}".format(MAX_HEIGHT))
config['mac_address'] = config['mac_address'].upper()
config['stand_height_raw'] = config['stand_height']
config['sit_height_raw'] = config['sit_height']
config['height_tolerance_raw'] = 10 * config['height_tolerance']
if config['move_to']:
config['move_to_raw'] = config['move_to']
if IS_WINDOWS:
# Windows doesn't use this parameter so rename it so it looks nice for the logs
config['adapter_name'] = 'default adapter'
# MAIN PROGRAM
def print_height_data(sender, data):
height, speed = struct.unpack("<Hh", data)
print("Height: {:4.0f}mm Speed: {:2.0f}mm/s".format(rawToMM(height), rawToSpeed(speed)))
def has_reached_target(height, target):
# The notified height values seem a bit behind so try to stop before
# reaching the target value to prevent overshooting
return (abs(height - target) <= config['height_tolerance_raw'])
async def move_up(client):
await client.write_gatt_char(UUID_COMMAND, COMMAND_UP)
async def move_down(client):
await client.write_gatt_char(UUID_COMMAND, COMMAND_DOWN)
async def stop(client):
# This emulates the behaviour of the app. Stop commands are sent to both
# Reference Input and Command characteristics.
await client.write_gatt_char(UUID_COMMAND, COMMAND_STOP)
if IS_LINUX:
# It doesn't like this on windows
await client.write_gatt_char(UUID_REFERENCE_INPUT, COMMAND_REFERENCE_INPUT_STOP)
async def subscribe(client, uuid, callback):
"""Listen for notifications on a characteristic"""
await client.start_notify(uuid, callback)
async def unsubscribe(client, uuid):
try:
await client.stop_notify(uuid)
except KeyError:
# This happens on windows, I don't know why
pass
async def move_to(client, target, do_print):
"""Move the desk to a specified height"""
initial_height, speed = struct.unpack("<Hh", await client.read_gatt_char(UUID_HEIGHT))
# Initialise by setting the movement direction
direction = "UP" if target > initial_height else "DOWN"
# Set up callback to run when the desk height changes. It will resend
# movement commands until the desk has reached the target height.
loop = asyncio.get_event_loop()
move_done = loop.create_future()
global count
count = 0
def _move_to(sender, data):
global count
height, speed = struct.unpack("<Hh", data)
count = count + 1
do_print("Height: {:4.0f}mm Target: {:4.0f}mm Speed: {:2.0f}mm/s".format(rawToMM(height), rawToMM(target), rawToSpeed(speed)))
# Stop if we have reached the target OR
# If you touch desk control while the script is running then movement
# callbacks stop. The final call will have speed 0 so detect that
# and stop.
if speed == 0 or has_reached_target(height, target):
asyncio.create_task(stop(client))
asyncio.create_task(unsubscribe(client, UUID_HEIGHT))
try:
move_done.set_result(True)
except asyncio.exceptions.InvalidStateError:
# This happens on windows, I dont know why
pass
# Or resend the movement command if we have not yet reached the
# target.
# Each movement command seems to run the desk motors for about 1
# second if uninterrupted and the height value is updated about 16
# times.
# Resending the command on the 6th update seems a good balance
# between helping to avoid overshoots and preventing stutterinhg
# (the motor seems to slow if no new move command has been sent)
elif direction == "UP" and count == 6:
asyncio.create_task(move_up(client))
count = 0
elif direction == "DOWN" and count == 6:
asyncio.create_task(move_down(client))
count = 0
# Listen for changes to desk height and send first move command (if we are
# not already at the target height).
if not has_reached_target(initial_height, target):
await subscribe(client, UUID_HEIGHT, _move_to)
if direction == "UP":
asyncio.create_task(move_up(client))
elif direction == "DOWN":
asyncio.create_task(move_down(client))
try:
await asyncio.wait_for(move_done, timeout=config['movement_timeout'])
except asyncio.TimeoutError as e:
do_print('Timed out while waiting for desk')
await unsubscribe(client, UUID_HEIGHT)
def unpickle_desk():
"""Load a Bleak device config from a pickle file and check that it is the correct device"""
try:
if IS_LINUX:
with open(PICKLE_FILE,'rb') as f:
desk = pickle.load(f)
if desk.address == config['mac_address']:
return desk
except Exception:
pass
return None
def pickle_desk(desk):
"""Attempt to pickle the desk"""
if IS_LINUX:
with open(PICKLE_FILE, 'wb') as f:
pickle.dump(desk, f)
async def scan(mac_address = None):
"""Scan for a bluetooth device with the configured address and return it or return all devices if no address specified"""
print('Scanning\r', end ="")
scanner = BleakScanner()
devices = await scanner.discover(device=config['adapter_name'], timeout=config['scan_timeout'])
if not mac_address:
print('Found {} devices using {}'.format(len(devices), config['adapter_name']))
for device in devices:
print(device)
return devices
for device in devices:
if (device.address == mac_address):
print('Scanning - Desk Found')
return device
print('Scanning - Desk {} Not Found'.format(mac_address))
return None
async def connect(client = None, attempt = 0):
"""Attempt to connect to the desk"""
# Attempt to load and connect to the pickled desk
desk = unpickle_desk()
if desk:
pickled = True
if not desk:
# If that fails then rescan for the desk
desk = await scan(config['mac_address'])
if not desk:
print('Could not find desk {}'.format(config['mac_address']))
os._exit(1)
# Cache the Bleak device config to connect more quickly in future
pickle_desk(desk)
try:
print('Connecting\r', end ="")
if not client:
client = BleakClient(desk, device=config['adapter_name'])
await client.connect(timeout=config['connection_timeout'])
print("Connected {}".format(config['mac_address']))
return client
except BleakError as e:
if attempt == 0 and pickled:
# Could be a bad pickle so remove it and try again
try:
os.remove(PICKLE_FILE)
print('Connecting failed - Retrying without cached connection')
except OSError:
pass
return await connect(attempt = attempt + 1)
else:
print('Connecting failed')
print(e)
os._exit(1)
async def disconnect(client):
"""Attempt to disconnect cleanly"""
if client.is_connected:
await client.disconnect()
async def run_command(client, config, do_print=print):
"""Begin the action specified by command line arguments and config"""
# Always print current height
initial_height, speed = struct.unpack("<Hh", await client.read_gatt_char(UUID_HEIGHT))
do_print("Height: {:4.0f}mm".format(rawToMM(initial_height)))
target = None
if config['monitor']:
# Print changes to height data
await subscribe(client, UUID_HEIGHT, print_height_data)
loop = asyncio.get_event_loop()
wait = loop.create_future()
await wait
elif config['sit']:
# Move to configured sit height
target = config['sit_height_raw']
await move_to(client, mmToRaw(target), do_print)
elif config['stand']:
# Move to configured stand height
target = config['stand_height_raw']
await move_to(client, mmToRaw(target), do_print)
elif config['move_to']:
# Move to custom height
target = config['move_to_raw']
await move_to(client, mmToRaw(target), do_print)
if target:
# If we were moving to a target height, wait, then print the actual final height
await | |
<filename>cleanex.py
#!/usr/bin/env python
import os.path
import pandas as pd
import numpy as np
import sys
import argparse
import utility
import math
import itertools
import random
from datetime import datetime
from pareto import *
from radarPlot import *
import os.path
endNodes = set()
nodesInPathToFinalNode = {}
finalNodeToRules = {}
currentIdRule = 0
rules = {}
nodesPairs = set()
maxTreeDepth = 0
deltaRuleToDeltaValue = {}
mlRules = set()
ruleToNbFeatures = {}
predicateToNodes = {}
outputFile = ""
runId = ""
def addPredicateForDiversity(predicate, ruleId):
if predicate in predicateToNodes:
predicateToNodes[predicate].add(ruleId)
else:
predicateToNodes[predicate] = set()
predicateToNodes[predicate].add(ruleId)
def uniq(lst):
last = object()
for item in lst:
if item == last:
continue
yield item
last = item
def sort_and_deduplicate(l):
return list(uniq(sorted(l, reverse=True)))
def addNewRule(predicate, node, prefix, rule, nbFeatures=0, delta=None, isML=False):
global endNodes
global currentIdRule
global rules
global finalNodeToRules
global ruleToNbFeatures
for finalNode in nodesInPathToFinalNode[node]:
if finalNode in finalNodeToRules:
finalNodeToRules[finalNode].append(currentIdRule)
else:
finalNodeToRules[finalNode] = [currentIdRule]
rules[currentIdRule] = prefix + str(currentIdRule) + ": " + rule
ruleToNbFeatures[currentIdRule] = nbFeatures
if delta is not None:
deltaRuleToDeltaValue[currentIdRule] = delta
if isML:
mlRules.add(currentIdRule)
addPredicateForDiversity(predicate, currentIdRule)
currentIdRule += 1
def addToNodeToFinal(node, correspondingFinalNode):
if node in nodesInPathToFinalNode:
nodesInPathToFinalNode[node].append(correspondingFinalNode)
else:
nodesInPathToFinalNode[node] = [correspondingFinalNode]
def generateTreeSucc(baseNode, dfTreeStruct, currentProf):
global maxTreeDepth
pathOut = []
succOut = []
if currentProf > maxTreeDepth:
maxTreeDepth = currentProf
for index, row in dfTreeStruct.iterrows():
if row[0] == baseNode:
(pathList, succList) = generateTreeSucc(
row[1], dfTreeStruct, currentProf + 1)
if len(pathList) == 0: # base case
pathOut.append([row[1]])
succOut.append(
[row[1], "succ(" + row[0] + "," + str(row[1]) + ")"])
nodesPairs.add((row[0], row[1]))
endNodes.add(row[1])
nodesInPathToFinalNode[row[1]] = [row[1]]
else: # recursive case
for path in pathList:
pathOut.append([row[1]] + path)
for succ in succList:
addToNodeToFinal(row[1], succ[0])
succOut.append(
[succ[0], "succ(" + row[0] + "," + row[1] + ") /\ " + succ[1]])
nodesPairs.add((row[0], row[1]))
return (pathOut, succOut)
def generateTreePaths(baseNode, dfTreeStruct):
(pathList, succList) = generateTreeSucc(baseNode, dfTreeStruct, 0)
print("depth: " + str(maxTreeDepth))
# create the tree
for succ in succList:
addNewRule('succ', succ[0], "P", succ[1])
pathOut = []
for path in pathList:
pathTmp = [baseNode]
pathTmp = pathTmp + path
pathOut.append(pathTmp)
# add the root node
for finalNode in list(endNodes):
addToNodeToFinal(baseNode, finalNode)
return (pathOut, succList)
def generateFeatureChange(parentNode, childNode, dfTreeFeatures):
rowParentNode = dfTreeFeatures[dfTreeFeatures.node == parentNode]
rowChildNode = dfTreeFeatures[dfTreeFeatures.node == childNode]
nbFeatures = 0
allStable = True
nbStableTmp = 0
explList = []
for feature in dfTreeFeatures.columns:
if(feature != 'node'):
nbFeatures += 1
valParent = rowParentNode.iloc[0][feature]
valChild = rowChildNode.iloc[0][feature]
diff = np.round(valChild-valParent, decimals=3)
if(diff == 0.0):
nbStableTmp += 1
explList.append(
("S", "stable(" + feature + "," + parentNode + "," + childNode + ")", None, 'stable'))
else:
allStable = False
if diff > 0.0:
explList.append(
("C", "increase(" + feature
+ "," + parentNode
+ "," + childNode
+ ") /\ delta(" + feature
+ "," + parentNode
+ "," + childNode
+ "," + str(abs(diff)) + ")", abs(diff), 'increase'))
else:
explList.append(
("C", "decrease(" + feature
+ "," + parentNode
+ "," + childNode + ") /\ delta(" + feature
+ "," + parentNode
+ "," + childNode
+ "," + str(abs(diff)) + ")", abs(diff), 'decrease'))
if allStable:
equivStr = "equiv(" + parentNode + "," + childNode + ")"
addNewRule('equiv', childNode, "S", equivStr, nbFeatures=nbFeatures)
else:
for rule in explList:
addNewRule(rule[3], childNode, rule[0], rule[1],
delta=rule[2], nbFeatures=1)
def generateTreeChanges(dfTreeStruct, dfTreeFeatures):
# generate a list of each pair of vertices connected by one edge
for edge in nodesPairs:
generateFeatureChange(edge[0], edge[1], dfTreeFeatures)
def generateFeaturesRule(predicate, featuresSet, node):
featuresString = "["
for feature in featuresSet:
featuresString += str(feature) + ","
featuresString = featuresString[:-1] + "]"
if predicate == "most" or predicate == "least":
addNewRule(predicate, node, "C", predicate +
"(" + featuresString + "," + str(node) + ")",
nbFeatures=len(featuresSet), isML=True)
else:
addNewRule(predicate, node, "C", predicate +
"(" + featuresString + "," + str(node) + ")",
nbFeatures=len(featuresSet))
def generateFeaturesRuleFromDictionnary(featuresDictionnary, node):
for key in featuresDictionnary.keys():
featuresList = featuresDictionnary[key]
featuresString = "["
for feature in featuresList:
featuresString += str(feature) + ","
featuresString = featuresString[:-1] + "]"
addNewRule(str(key[0]), node, "C",
str(key[0]) + "(" + featuresString +
"," + str(node) + "," + str(key[1]) + ")",
nbFeatures=len(featuresList))
def generateFeatureComparison(compNodes, dfTreeFeatures):
for nodeRef in compNodes:
rowRef = dfTreeFeatures[dfTreeFeatures.node == nodeRef]
# store each result to allows groupping in the generated rules
asSet = set()
mostSet = set()
leastSet = set()
otherCompSet = {}
for feature in dfTreeFeatures.columns:
valRef = rowRef.iloc[0][feature]
if(feature != 'node'):
# compared values
resComp = {'more': 0, 'less': 0, 'as': 0}
for nodeComp in compNodes:
if nodeComp != nodeRef:
rowComp = dfTreeFeatures[dfTreeFeatures.node == nodeComp]
valComp = rowComp.iloc[0][feature]
if valRef > valComp:
resComp['more'] += 1
elif valRef < valComp:
resComp['less'] += 1
else:
resComp['as'] += 1
resComp = {key: val for key, val in sorted(
resComp.items(), key=lambda item: item[1])}
bestPredicate = list(resComp)[2]
bestValue = resComp[bestPredicate]
secondPredicate = list(resComp)[1]
secondValue = resComp[secondPredicate]
thirdPredicate = list(resComp)[0]
tot = bestValue + secondValue + resComp[thirdPredicate]
if secondValue == 0:
if bestPredicate == 'as':
asSet.add(feature)
elif bestPredicate == 'more':
mostSet.add(feature)
else:
leastSet.add(feature)
else:
bestRuleValue = np.round(bestValue/tot, decimals=3)
secondRuleValue = np.round(secondValue/tot, decimals=3)
dictKeyBest = (bestPredicate, bestRuleValue)
dictKeySecond = (secondPredicate, secondRuleValue)
if dictKeyBest in otherCompSet:
otherCompSet[dictKeyBest].append(feature)
else:
otherCompSet[dictKeyBest] = [feature]
if dictKeySecond in otherCompSet:
otherCompSet[dictKeySecond].append(feature)
else:
otherCompSet[dictKeySecond] = [feature]
# geberate rules
if asSet:
generateFeaturesRule("as", asSet, nodeRef)
if mostSet:
generateFeaturesRule("most", mostSet, nodeRef)
if leastSet:
generateFeaturesRule("least", leastSet, nodeRef)
generateFeaturesRuleFromDictionnary(otherCompSet, nodeRef)
def divSubRoutine(n, N):
if n == 0:
return 0
return (n/N) * math.log2(n/N)
def generateRules(dfTreeStruct, dfTreeFeatures, rootNode, finalNode):
(pathOut, succOut) = generateTreePaths(rootNode, dfTreeStruct)
generateTreeChanges(dfTreeStruct, dfTreeFeatures)
nodesToEval = set()
for nodeList in pathOut:
nodesToEval = nodesToEval.union(set(nodeList))
generateFeatureComparison(nodesToEval, dfTreeFeatures)
def computeQualityMetrics(rulesIdsSet):
outputMetrics = {}
deltaInput = 0
deltaTot = 0
surpriseDelta = 0
surpriseTot = 0
nbMLInput = 0
nbFeatInput = 0
for ruleId in rules.keys():
if ruleId in rulesIdsSet: # rule in evaluated set
# polarity
nbFeatInput += ruleToNbFeatures[ruleId]
if ruleId in mlRules: # most/least rule in evaluated set
nbMLInput += ruleToNbFeatures[ruleId]
else:
# surprise
surpriseTot += ruleToNbFeatures[ruleId]
if ruleId in deltaRuleToDeltaValue:
surpriseDelta += deltaRuleToDeltaValue[ruleId]
# distancing
if ruleId in deltaRuleToDeltaValue: # delta rule
delta = deltaRuleToDeltaValue[ruleId]
deltaTot += delta
if ruleId in rulesIdsSet: # delta rule is in evaluated set
deltaInput += delta
# diversity
diversitySum = 0
for predicateSymbol in predicateToNodes.keys():
nbFeaturesPredicateInput = 0
for ruleId in predicateToNodes[predicateSymbol]:
if ruleId in rulesIdsSet:
nbFeaturesPredicateInput += ruleToNbFeatures[ruleId]
diversitySum += divSubRoutine(nbFeaturesPredicateInput, nbFeatInput)
nbPredicateSymbols = len(predicateToNodes.keys())
outputMetrics['polarity'] = computeDivision(nbMLInput, nbFeatInput)
outputMetrics['distancing'] = computeDivision(deltaInput, deltaTot)
outputMetrics['surprise'] = computeDivision(surpriseDelta, surpriseTot)
outputMetrics['diversity'] = abs(computeDivision(diversitySum,
nbPredicateSymbols))
return outputMetrics
def computeDivision(numerator, denominator):
if denominator != 0:
return numerator / denominator
else:
return 0
def plotQualityIndicators(nodeMainBranch, mainBranchQI, otherQI):
dataForPlot = {}
dataForPlot['group'] = ['Polarity', 'Diversity', 'Distancing', 'Surprise']
if nodeMainBranch is not None:
dataForPlot[nodeMainBranch] = mainBranchQI
for [endNode, bestCandidate, bestCandidateQI] in otherQI:
dataForPlot[endNode] = bestCandidateQI
plotRadar(dataForPlot)
def findBestRulesSetForBranch(finalNode, initMinRuleNumber, initMaxRuleNumber, criteria, isExpe=False, isMain=False, refNode=None):
assert(initMinRuleNumber <= initMaxRuleNumber)
if refNode is None:
refNode = finalNode
relatedRulesIds = finalNodeToRules[finalNode]
maxRuleNumber = len(relatedRulesIds)
if maxRuleNumber > initMaxRuleNumber:
maxRuleNumber = initMaxRuleNumber
minRuleNumber = initMinRuleNumber
if minRuleNumber > maxRuleNumber:
minRuleNumber = maxRuleNumber
candidateRulesSet = []
rulesSetsQuality = []
# for i in range(1, n+1):
for i in range(minRuleNumber, maxRuleNumber+1):
combinationsList = list(itertools.combinations(relatedRulesIds, i))
for candidate in combinationsList:
qualityMetrics = computeQualityMetrics(candidate)
paretoQI = [qualityMetrics['polarity'],
qualityMetrics['diversity'],
qualityMetrics['distancing'],
qualityMetrics['surprise']]
candidateRulesSet.append(candidate)
rulesSetsQuality.append(paretoQI)
if isExpe:
bestCandidates = getBestRankedCandidateExpe(isMain,
outputFile,
runId,
finalNode,
1,
candidateRulesSet,
rulesSetsQuality,
criteria,
len(nodesInPathToFinalNode.keys())-1,
maxTreeDepth,
refNode=refNode)
else:
bestCandidates = getBestRankedCandidate(1,
candidateRulesSet,
rulesSetsQuality,
criteria)
return bestCandidates[0]
def addQuality(dictQualityToCandidate, paretoQI, candidate):
if paretoQI in dictQualityToCandidate:
dictQualityToCandidate[paretoQI].append(candidate)
else:
dictQualityToCandidate[paretoQI] = [candidate]
def prepareToPareto(rulesSetsCandidates):
return np.asarray(
list(rulesSetsCandidates), dtype=np.float32)
def log(logString):
now = datetime.now()
currentTime = now.strftime("%H:%M:%S")
print(currentTime + ": " + logString)
def extractBestRulesSets(nbToExtract, bestRulesSets, criteria):
out = []
endNodesList = []
candidateRulesSet = []
rulesSetsQuality = []
# for i in range(1, n+1):
for [endNode, candidate, candidateQI] in bestRulesSets:
endNodesList.append(endNode)
candidateRulesSet.append(candidate)
rulesSetsQuality.append(candidateQI)
bestCandidates = getBestRankedCandidate(nbToExtract,
candidateRulesSet,
rulesSetsQuality,
criteria)
for (bestCandidate, bestCandidateQI) in bestCandidates:
print(str(bestCandidate) + ": " + str(bestCandidateQI))
node = endNodesList[candidateRulesSet.index(bestCandidate)]
out.append([node, bestCandidate, bestCandidateQI])
return out
def prepareCriteria(criteriaString):
critList = criteriaString.split(",")
out = []
for criteria in critList:
if criteria == '+' or criteria == '1':
out.append('+')
elif criteria == '-' or criteria == '-1':
out.append('-')
elif criteria == '0':
out.append('0')
else:
raise ValueError('Value ' + criteria +
' in the MOO vector cannnot be recognized.')
return out
def generateCLIparser():
parser = argparse.ArgumentParser(prog='ExpGen',
description='Generate explanation for a cleaning tree')
parser.add_argument('struct',
help='a path for the struct file')
parser.add_argument('feat',
| |
from multiprocessing import cpu_count
from os import path
from socket import gethostname
import os
hostname = gethostname().split('.')
machine_name = hostname[0]
#hypothesis_params = {
# 'START DATE':'2004-01-01',
# 'END DATE': '2018-01-15',
# 'PORTFOLIO': 'US_TOP_500_LIQUID',
# 'NEUTRALIZATION': 'DOLLAR',
# 'LONG LEVERAGE' : 0.5,
# 'SHORT LEVERAGE' : 0.5,
# 'STARTING VALUE' : 20000000,
# 'COST THRESHOLD BPS' : 5,
# 'ADV THRESHOLD PERCENTAGE':10,
# 'COMMISSION BPS': 0.1
# }
hypothesis_params = dict()
"""Algorithm parameters"""
params = {
# Set default step and search loop functions
'SEARCH_LOOP': 'search_loop',
'STEP': 'step',
# Evolutionary Parameters
'POPULATION_SIZE': 500,
'GENERATIONS': 50,
'HILL_CLIMBING_HISTORY': 1000,
'SCHC_COUNT_METHOD': "count_all",
# Set optional experiment name
'EXPERIMENT_NAME': None,
# Set default number of runs to be done.
# ONLY USED WITH EXPERIMENT MANAGER.
'RUNS': 1,
# Class of problem
'FITNESS_FUNCTION': "trading_fitness.regression",
# Select problem dataset
'DATASET_TRAIN': None,
'DATASET_TEST': None,
'DATASET_DELIMITER': None,
# Set grammar file
'GRAMMAR_FILE': "grammar\\current_grammar.bnf",
# Set the number of depths permutations are calculated for
# (starting from the minimum path of the grammar).
# Mainly for use with the grammar analyser script.
'PERMUTATION_RAMPS': 5,
# Select error metric
'ERROR_METRIC': None,
# Optimise constants in the supervised_learning fitness function.
'OPTIMIZE_CONSTANTS': False,
# Specify target for target problems
'TARGET': "ponyge_rocks",
# Set max sizes of individuals
'MAX_TREE_DEPTH': 90, # SET TO 90 DUE TO PYTHON EVAL() STACK LIMIT.
# INCREASE AT YOUR OWN RISK.
'MAX_TREE_NODES': None,
'CODON_SIZE': 100000,
'MAX_GENOME_LENGTH': None,
'MAX_WRAPS': 0,
# INITIALISATION
# Set initialisation operator.
'INITIALISATION': "operators.initialisation.PI_grow",
# Set the maximum geneome length for initialisation.
'INIT_GENOME_LENGTH': 200,
# Set the maximum tree depth for initialisation.
'MAX_INIT_TREE_DEPTH': 10,
# Set the minimum tree depth for initialisation.
'MIN_INIT_TREE_DEPTH': None,
# SELECTION
# Set selection operator.
'SELECTION': "operators.selection.tournament",
# For tournament selection
'TOURNAMENT_SIZE': 2,
# For truncation selection
'SELECTION_PROPORTION': 0.5,
# Allow for selection of invalid individuals during selection process.
'INVALID_SELECTION': False,
# OPERATOR OPTIONS
# Boolean flag for selecting whether or not mutation is confined to
# within the used portion of the genome. Default set to True.
'WITHIN_USED': True,
# CROSSOVER
# Set crossover operator.
'CROSSOVER': "operators.crossover.variable_onepoint",
# Set crossover probability.
'CROSSOVER_PROBABILITY': 0.75,
# Prevents crossover from generating invalids.
'NO_CROSSOVER_INVALIDS': False,
# MUTATION
# Set mutation operator.
'MUTATION': "operators.mutation.int_flip_per_codon",
# Set mutation probability (None defaults to 1 over the length of
# the genome for each codon)
'MUTATION_PROBABILITY': None,
# Set number of mutation events
'MUTATION_EVENTS': 1,
# Prevents mutation from generating invalids.
'NO_MUTATION_INVALIDS': True,
# REPLACEMENT
# Set replacement operator.
'REPLACEMENT': "operators.replacement.generational",
# Set elite size.
'ELITE_SIZE': None,
# DEBUGGING
# Use this to turn on debugging mode. This mode doesn't write any files
# and should be used when you want to test new methods.
'DEBUG': False,
# PRINTING
# Use this to print out basic statistics for each generation to the
# command line.
'VERBOSE': False,
# Use this to prevent anything being printed to the command line.
'SILENT': False,
'SAVE_STEP': True,
# SAVING
# Save the phenotype of the best individual from each generation. Can
# generate a lot of files. DEBUG must be False.
'SAVE_ALL': True,
# Save a plot of the evolution of the best fitness result for each
# generation.
'SAVE_PLOTS': True,
# MULTIPROCESSING
# Multi-core parallel processing of phenotype evaluations.
'MULTICORE': False,
# Set the number of cpus to be used for multiprocessing
'CORES': cpu_count(),
# STATE SAVING/LOADING
# Save the state of the evolutionary run every generation. You can
# specify how often you want to save the state with SAVE_STATE_STEP.
'SAVE_STATE': False,
# Specify how often the state of the current evolutionary run is
# saved (i.e. every n-th generation). Requires int value.
'SAVE_STATE_STEP': 1,
# Load an evolutionary run from a saved state. You must specify the
# full file path to the desired state file. Note that state files have
# no file type.
'LOAD_STATE': None,
# SEEDING
# Specify a list of PonyGE2 individuals with which to seed the initial
# population.
'SEED_INDIVIDUALS': [],
# Specify a target seed folder in the 'seeds' directory that contains a
# population of individuals with which to seed a run.
'TARGET_SEED_FOLDER': None,
# Set a target phenotype string for reverse mapping into a GE
# individual
'REVERSE_MAPPING_TARGET': None,
# Set Random Seed for all Random Number Generators to be used by
# PonyGE2, including the standard Python RNG and the NumPy RNG.
'RANDOM_SEED': None,
# CACHING
# The cache tracks unique individuals across evolution by saving a
# string of each phenotype in a big list of all phenotypes. Saves all
# fitness information on each individual. Gives you an idea of how much
# repetition is in standard GE/GP.
'CACHE': False,
# Uses the cache to look up the fitness of duplicate individuals. CACHE
# must be set to True if you want to use this.
'LOOKUP_FITNESS': False,
# Uses the cache to give a bad fitness to duplicate individuals. CACHE
# must be True if you want to use this (obviously)
'LOOKUP_BAD_FITNESS': False,
# Removes duplicate individuals from the population by replacing them
# with mutated versions of the original individual. Hopefully this will
# encourage diversity in the population.
'MUTATE_DUPLICATES': False,
# MULTIAGENT Parameters
# True or False for Multiagent
'MULTIAGENT': False,
# Agent Size. Number of agents having their own copy of genetic material
'AGENT_SIZE': 100,
# Interaction Probablity. How frequently the agents can interaction with each other
'INTERACTION_PROBABILITY': 0.5,
# OTHER
# Set machine name (useful for doing multiple runs)
'MACHINE': machine_name
}
#
#params.update(hypothesis_params)
params1 = {
# Set default step and search loop functions
'SEARCH_LOOP': 'search_loop',
'STEP': 'step',
# Evolutionary Parameters
'POPULATION_SIZE': 500,
'GENERATIONS': 50,
'HILL_CLIMBING_HISTORY': 1000,
'SCHC_COUNT_METHOD': "count_all",
# Set optional experiment name
'EXPERIMENT_NAME': None,
# Set default number of runs to be done.
# ONLY USED WITH EXPERIMENT MANAGER.
'RUNS': 1,
# Class of problem
'FITNESS_FUNCTION': "trading_fitness.regression",
# Select problem dataset
'DATASET_TRAIN': None,
'DATASET_TEST': None,
'DATASET_DELIMITER': None,
# Set grammar file
'GRAMMAR_FILE': "trading_grammar/Vladislavleva4.bnf",
# Set the number of depths permutations are calculated for
# (starting from the minimum path of the grammar).
# Mainly for use with the grammar analyser script.
'PERMUTATION_RAMPS': 5,
# Select error metric
'ERROR_METRIC': None,
# Optimise constants in the supervised_learning fitness function.
'OPTIMIZE_CONSTANTS': False,
# Specify target for target problems
'TARGET': "ponyge_rocks",
# Set max sizes of individuals
'MAX_TREE_DEPTH': 90, # SET TO 90 DUE TO PYTHON EVAL() STACK LIMIT.
# INCREASE AT YOUR OWN RISK.
'MAX_TREE_NODES': None,
'CODON_SIZE': 100000,
'MAX_GENOME_LENGTH': None,
'MAX_WRAPS': 0,
# INITIALISATION
# Set initialisation operator.
'INITIALISATION': "operators.initialisation.PI_grow",
# Set the maximum geneome length for initialisation.
'INIT_GENOME_LENGTH': 200,
# Set the maximum tree depth for initialisation.
'MAX_INIT_TREE_DEPTH': 10,
# Set the minimum tree depth for initialisation.
'MIN_INIT_TREE_DEPTH': None,
# SELECTION
# Set selection operator.
'SELECTION': "operators.selection.tournament",
# For tournament selection
'TOURNAMENT_SIZE': 2,
# For truncation selection
'SELECTION_PROPORTION': 0.5,
# Allow for selection of invalid individuals during selection process.
'INVALID_SELECTION': False,
# OPERATOR OPTIONS
# Boolean flag for selecting whether or not mutation is confined to
# within the used portion of the genome. Default set to True.
'WITHIN_USED': True,
# CROSSOVER
# Set crossover operator.
'CROSSOVER': "operators.crossover.variable_onepoint",
# Set crossover probability.
'CROSSOVER_PROBABILITY': 0.75,
# Prevents crossover from generating invalids.
'NO_CROSSOVER_INVALIDS': False,
# MUTATION
# Set mutation operator.
'MUTATION': "operators.mutation.int_flip_per_codon",
# Set mutation probability (None defaults to 1 over the length of
# the genome for each codon)
'MUTATION_PROBABILITY': None,
# Set number of mutation events
'MUTATION_EVENTS': 1,
# Prevents mutation from generating invalids.
'NO_MUTATION_INVALIDS': False,
# REPLACEMENT
# Set replacement operator.
'REPLACEMENT': "operators.replacement.generational",
# Set elite size.
'ELITE_SIZE': None,
# DEBUGGING
# Use this to turn on debugging mode. This mode doesn't write any files
# and should be used when you want to test new methods.
'DEBUG': False,
# PRINTING
# Use this to print out basic statistics for each generation to the
# command line.
'VERBOSE': False,
# Use this to prevent anything being printed to the command line.
'SILENT': False,
| |
self.pop2()
def drop_double(self):
return self.pop2()
# </editor-fold>
# <editor-fold desc="Loads" defaultstate="collapsed" defaultstate="collapsed">
def load_reference(self, index: int) -> 'Instructions':
if 0 >= index >= 3:
return self.append(f"aload_{index}")
else:
return self.append("aload", index)
def load_double(self, index: int) -> 'Instructions':
if 0 >= index >= 3:
return self.append(f"dload_{index}")
else:
return self.append("dload", index)
def load_float(self, index: int) -> 'Instructions':
if 0 >= index >= 3:
return self.append(f"fload_{index}")
else:
return self.append("fload", index)
def load_integer(self, index: int) -> 'Instructions':
if 0 >= index >= 3:
return self.append(f"iload_{index}")
else:
return self.append("iload", index)
def load_long(self, index: int) -> 'Instructions':
if 0 >= index >= 3:
return self.append(f"lload_{index}")
else:
return self.append("lload", index)
# </editor-fold>
# <editor-fold desc="Stores" defaultstate="collapsed">
def store_reference(self, index: int) -> 'Instructions':
if 0 >= index >= 3:
return self.append(f"astore_{index}")
else:
return self.append("astore", index)
def store_double(self, index: int) -> 'Instructions':
if 0 >= index >= 3:
return self.append(f"dstore_{index}")
else:
return self.append("dstore", index)
def store_float(self, index: int) -> 'Instructions':
if 0 >= index >= 3:
return self.append(f"fstore_{index}")
else:
return self.append("fstore", index)
def store_integer(self, index: int) -> 'Instructions':
if 0 >= index >= 3:
return self.append(f"istore_{index}")
else:
return self.append("istore", index)
def store_long(self, index: int) -> 'Instructions':
if 0 >= index >= 3:
return self.append(f"lstore_{index}")
else:
return self.append("lstore", index)
# </editor-fold>
# <editor-fold desc="Increments/Decrements" defaultstate="collapsed">
def increment_integer(self, index: int, value: int = 1) -> 'Instructions':
return self.append("iinc", index, value)
# </editor-fold>
# <editor-fold desc="Array operations" defaultstate="collapsed">
def array_length(self) -> 'Instructions':
return self.append("arraylength")
def new_array(self, primitive_type: int) -> 'Instructions':
return self.append("newarray", primitive_type)
def new_reference_array(self, reference_type: ConstantClass) -> 'Instructions':
return self.append("anewarray", reference_type)
def new_multi_dimension_reference_array(self, reference_type: ConstantClass, dimensions: int) -> 'Instructions':
return self.append("multianewarray", reference_type, dimensions)
# <editor-fold desc="Load from array" defaultstate="collapsed">
def load_array_reference(self) -> 'Instructions':
return self.append("aaload")
def load_array_double(self) -> 'Instructions':
return self.append("daload")
def load_array_float(self) -> 'Instructions':
return self.append("faload")
def load_array_integer(self) -> 'Instructions':
return self.append("iaload")
def load_array_long(self) -> 'Instructions':
return self.append("laload")
def load_array_short(self) -> 'Instructions':
return self.append("saload")
def load_array_byte(self) -> 'Instructions':
return self.append("baload")
def load_array_char(self) -> 'Instructions':
return self.append("caload")
def load_array_boolean(self) -> 'Instructions':
return self.load_array_byte()
# </editor-fold>
# <editor-fold desc="Store to array" defaultstate="collapsed">
def store_array_reference(self) -> 'Instructions':
return self.append("aastore")
def store_array_double(self) -> 'Instructions':
return self.append("dastore")
def store_array_float(self) -> 'Instructions':
return self.append("fastore")
def store_array_integer(self) -> 'Instructions':
return self.append("iastore")
def store_array_long(self) -> 'Instructions':
return self.append("lastore")
def store_array_short(self) -> 'Instructions':
return self.append("sastore")
def store_array_byte(self) -> 'Instructions':
return self.append("bastore")
def store_array_char(self) -> 'Instructions':
return self.append("castore")
def store_array_boolean(self) -> 'Instructions':
return self.store_array_byte()
# </editor-fold>
# </editor-fold>
# <editor-fold desc="Conversions" defaultstate="collapsed">
def convert_double_to_float(self) -> 'Instructions':
return self.append("d2f")
def convert_double_to_integer(self) -> 'Instructions':
return self.append("d2i")
def convert_double_to_long(self) -> 'Instructions':
return self.append("d2l")
def convert_float_to_double(self) -> 'Instructions':
return self.append("f2d")
def convert_float_to_integer(self) -> 'Instructions':
return self.append("f2i")
def convert_float_to_long(self) -> 'Instructions':
return self.append("f2l")
def convert_integer_to_byte(self) -> 'Instructions':
return self.append("i2b")
def convert_integer_to_char(self) -> 'Instructions':
return self.append("i2c")
def convert_integer_to_double(self) -> 'Instructions':
return self.append("i2d")
def convert_integer_to_float(self) -> 'Instructions':
return self.append("i2f")
def convert_integer_to_long(self) -> 'Instructions':
return self.append("i2l")
def convert_integer_to_short(self) -> 'Instructions':
return self.append("i2s")
def convert_long_to_double(self) -> 'Instructions':
return self.append("l2d")
def convert_long_to_float(self) -> 'Instructions':
return self.append("l2f")
def convert_long_to_integer(self) -> 'Instructions':
return self.append("l2i")
# </editor-fold>
# <editor-fold desc="Duplications" defaultstate="collapsed">
def duplicate_top_of_stack(self) -> 'Instructions':
return self.append("dup")
def duplicate_top_2_of_stack(self) -> 'Instructions':
return self.append("dup2")
def duplicate_long(self) -> 'Instructions':
return self.duplicate_top_2_of_stack()
def duplicate_double(self) -> 'Instructions':
return self.duplicate_top_2_of_stack()
def duplicate_behind_top_of_stack(self) -> 'Instructions':
return self.append("dup_x1")
def duplicate_behind_top_2_of_stack(self) -> 'Instructions':
return self.append("dup_x2")
def duplicate_short_behind_long(self) -> 'Instructions':
return self.duplicate_behind_top_2_of_stack()
def duplicate_short_behind_double(self) -> 'Instructions':
return self.duplicate_behind_top_2_of_stack()
def duplicate_top_2_behind_top_3_of_stack(self) -> 'Instructions':
return self.append("dup2_x1")
def duplicate_long_behind_short(self) -> 'Instructions':
return self.duplicate_top_2_behind_top_3_of_stack()
def duplicate_double_behind_short(self) -> 'Instructions':
return self.duplicate_top_2_behind_top_3_of_stack()
def duplicate_top_2_behind_top_4_of_stack(self) -> 'Instructions':
return self.append("dup2_x2")
def duplicate_long_behind_top_3_of_stack(self) -> 'Instructions':
return self.duplicate_top_2_behind_top_4_of_stack()
def duplicate_double_behind_top_3_of_stack(self) -> 'Instructions':
return self.duplicate_top_2_behind_top_4_of_stack()
def duplicate_top_2_behind_long(self) -> 'Instructions':
return self.duplicate_top_2_behind_top_4_of_stack()
def duplicate_top_2_behind_double(self) -> 'Instructions':
return self.duplicate_top_2_behind_top_4_of_stack()
def duplicate_long_behind_long(self) -> 'Instructions':
return self.duplicate_top_2_behind_top_4_of_stack()
def duplicate_double_behind_double(self) -> 'Instructions':
return self.duplicate_top_2_behind_top_4_of_stack()
# </editor-fold>
# <editor-fold desc="Swaps" defaultstate="collapsed">
def swap(self) -> 'Instructions':
return self.append("swap")
def swap_longs(self) -> 'Instructions':
return self.duplicate_long_behind_long().drop_long()
def swap_doubles(self) -> 'Instructions':
return self.duplicate_double_behind_double().drop_double()
def move_short_behind_long(self) -> 'Instructions':
return self.duplicate_short_behind_long().pop()
def move_short_behind_top_2_of_stack(self) -> 'Instructions':
return self.duplicate_behind_top_2_of_stack().pop()
def move_long_behind_short(self) -> 'Instructions':
return self.duplicate_long_behind_short().drop_long()
def move_top_2_behind_long(self) -> 'Instructions':
return self.duplicate_top_2_behind_long().drop_long()
# </editor-fold>
# <editor-fold desc="Arithmetic" defaultstate="collapsed">
# <editor-fold desc="Addition" defaultstate="collapsed">
def add_integer(self) -> 'Instructions':
return self.append("iadd")
def add_long(self) -> 'Instructions':
return self.append("ladd")
def add_double(self) -> 'Instructions':
return self.append("dadd")
def add_float(self) -> 'Instructions':
return self.append("fadd")
# </editor-fold>
# <editor-fold desc="Subtraction" defaultstate="collapsed">
def subtract_integer(self) -> 'Instructions':
return self.append("isub")
def subtract_long(self) -> 'Instructions':
return self.append("lsub")
def subtract_double(self) -> 'Instructions':
return self.append("dsub")
def subtract_float(self) -> 'Instructions':
return self.append("fsub")
# </editor-fold>
# <editor-fold desc="Multiplication" defaultstate="collapsed">
def multiply_integer(self) -> 'Instructions':
return self.append("imul")
def multiply_long(self) -> 'Instructions':
return self.append("lmul")
def multiply_double(self) -> 'Instructions':
return self.append("dmul")
def multiply_float(self) -> 'Instructions':
return self.append("fmul")
# </editor-fold>
# <editor-fold desc="Division" defaultstate="collapsed">
def divide_integer(self) -> 'Instructions':
return self.append("idiv")
def divide_long(self) -> 'Instructions':
return self.append("ldiv")
def divide_double(self) -> 'Instructions':
return self.append("ddiv")
def divide_float(self) -> 'Instructions':
return self.append("fdiv")
# </editor-fold>
# <editor-fold desc="Remainder/Modulo" defaultstate="collapsed">
def remainder_integer(self) -> 'Instructions':
return self.append("irem")
def remainder_long(self) -> 'Instructions':
return self.append("lrem")
def remainder_double(self) -> 'Instructions':
return self.append("drem")
def remainder_float(self) -> 'Instructions':
return self.append("frem")
# </editor-fold>
# </editor-fold>
# <editor-fold desc="Bitwise" defaultstate="collapsed">
# <editor-fold desc="Negation" defaultstate="collapsed">
def negate_integer(self) -> 'Instructions':
return self.append("ineg")
def negate_long(self) -> 'Instructions':
return self.append("lneg")
def negate_double(self) -> 'Instructions':
return self.append("dneg")
def negate_float(self) -> 'Instructions':
return self.append("fneg")
# </editor-fold>
# <editor-fold desc="Shifts" defaultstate="collapsed">
# <editor-fold desc="Shift left" defaultstate="collapsed">
def shift_left_integer(self) -> 'Instructions':
return self.append("ishl")
def shift_left_long(self) -> 'Instructions':
return self.append("lshl")
# </editor-fold>
# <editor-fold desc="Shift right" defaultstate="collapsed">
def shift_right_integer(self) -> 'Instructions':
return self.append("ishr")
def shift_right_long(self) -> 'Instructions':
return self.append("lshr")
# </editor-fold>
# <editor-fold desc="Shift right unsigned" defaultstate="collapsed">
def unsigned_shift_right_integer(self) -> 'Instructions':
return self.append("iushr")
def unsigned_shift_right_long(self) -> 'Instructions':
return self.append("lushr")
# </editor-fold>
# </editor-fold>
# <editor-fold desc="Bitwise or" defaultstate="collapsed">
def or_integer(self) -> 'Instructions':
return self.append("ior")
def or_long(self) -> 'Instructions':
return self.append("lor")
# </editor-fold>
# <editor-fold desc="Bitwise and" defaultstate="collapsed">
def and_integer(self) -> 'Instructions':
return self.append("iand")
def and_long(self) -> 'Instructions':
return self.append("land")
# </editor-fold>
# <editor-fold desc="Bitwise xor" defaultstate="collapsed">
def xor_integer(self) -> 'Instructions':
return self.append("ixor")
def xor_long(self) -> 'Instructions':
return self.append("lxor")
# </editor-fold>
# </editor-fold>
# <editor-fold desc="Comparison" defaultstate="collapsed">
def compare_long(self) -> 'Instructions':
return self.append("lcmp")
def compare_double(self) -> 'Instructions':
return self.append("dcmpl")
def compare_float(self) -> 'Instructions':
return self.append("fcmpl")
# </editor-fold>
# <editor-fold desc="Branching" defaultstate="collapsed">
def _branch(self, opcode: str, label: LabelType) -> 'Instructions':
label = Instructions._map_label(label)
return self.append(opcode, label)
# <editor-fold desc="Boolean comparisons" defaultstate="collapsed">
def branch_if_true(self, label: LabelType) -> 'Instructions':
return self._branch("ifne", label)
def branch_if_false(self, label: LabelType) -> 'Instructions':
return self._branch("ifeq", label)
# </editor-fold>
# <editor-fold desc="Comparison (against 0, from lcmp, dcmpg or fcmpg or directly to an int or boolean)"
# defaultstate="collapsed">
def branch_if_less(self, label: LabelType) -> 'Instructions':
return self._branch("iflt", label)
def branch_if_greater(self, label: LabelType) -> 'Instructions':
return self._branch("ifgt", label)
def branch_if_less_or_equal(self, label: LabelType) -> 'Instructions':
return self._branch("ifle", label)
def branch_if_greater_or_equal(self, label: LabelType) -> 'Instructions':
return self._branch("ifge", label)
def branch_if_equal(self, label: LabelType) -> 'Instructions':
return self._branch("ifeq", label)
def branch_if_not_equal(self, label: LabelType) -> 'Instructions':
return self._branch("ifne", label)
# </editor-fold>
# <editor-fold desc="Comparison between two integers" defaultstate="collapsed">
def branch_if_integer_less(self, label: LabelType) -> 'Instructions':
return self._branch("if_icmplt", label)
def branch_if_integer_greater(self, label: LabelType) -> 'Instructions':
return self._branch("if_icmpgt", label)
def branch_if_integer_less_or_equal(self, label: LabelType) -> 'Instructions':
return self._branch("if_icmple", label)
def branch_if_integer_greater_or_equal(self, label: LabelType) -> 'Instructions':
return self._branch("if_icmpge", label)
def branch_if_integer_equal(self, label: LabelType) -> 'Instructions':
return self._branch("if_icmpeq", label)
def branch_if_integer_not_equal(self, label: LabelType) -> 'Instructions':
return self._branch("if_icmpne", label)
# </editor-fold>
# <editor-fold desc="Comparison for reference" defaultstate="collapsed">
def branch_if_reference_is_null(self, label: LabelType) -> 'Instructions':
return self._branch("ifnull", label)
def branch_if_reference_is_not_null(self, label: LabelType) -> 'Instructions':
return self._branch("ifnonnull", label)
# </editor-fold>
# <editor-fold desc="Comparison between two references" defaultstate="collapsed">
def branch_if_reference_equal(self, label: LabelType) -> 'Instructions':
return self._branch("if_acmpeq", label)
def branch_if_reference_not_equal(self, label: LabelType) -> 'Instructions':
return self._branch("if_acmpne", label)
# </editor-fold>
# <editor-fold desc="Control flow" defaultstate="collapsed">
def branch(self, label: LabelType) -> 'Instructions':
return self._branch("goto", label)
def lookup_switch(self, default: LabelType, branch_pairs: Dict[int, LabelType]) -> 'Instructions':
default = Instructions._map_label(default)
branch_pairs = {int(k): Instructions._map_label(v) for k, v in branch_pairs.items()}
return self.append("lookupswitch", branch_pairs, default)
def table_switch(self, default: LabelType, low: int, high: int, labels: Iterable[LabelType]) -> 'Instructions':
default = Instructions._map_label(default)
labels = [Instructions._map_label(label) for label in labels]
return self.append("tableswitch", default, | |
= Column(String, default='', server_default=sql.text("''"), nullable=False)
link = relationship('Link', foreign_keys=[link_id], innerjoin=True)
entity0 = relationship('Label', foreign_keys=[entity0_id], innerjoin=True)
entity1 = relationship('Recording', foreign_keys=[entity1_id], innerjoin=True)
@hybrid_property
def label(self):
return self.entity0
@hybrid_property
def label_id(self):
return self.entity0_id
@hybrid_property
def recording(self):
return self.entity1
@hybrid_property
def recording_id(self):
return self.entity1_id
class LinkLabelRelease(Base):
__tablename__ = 'l_label_release'
__table_args__ = (
Index('l_label_release_idx_uniq', 'entity0', 'entity1', 'link', 'link_order', unique=True),
Index('l_label_release_idx_entity1', 'entity1'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
link_id = Column('link', Integer, ForeignKey(apply_schema('link.id', 'musicbrainz'), name='l_label_release_fk_link'), nullable=False)
entity0_id = Column('entity0', Integer, ForeignKey(apply_schema('label.id', 'musicbrainz'), name='l_label_release_fk_entity0'), nullable=False)
entity1_id = Column('entity1', Integer, ForeignKey(apply_schema('release.id', 'musicbrainz'), name='l_label_release_fk_entity1'), nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
link_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
entity0_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
entity1_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
link = relationship('Link', foreign_keys=[link_id], innerjoin=True)
entity0 = relationship('Label', foreign_keys=[entity0_id], innerjoin=True)
entity1 = relationship('Release', foreign_keys=[entity1_id], innerjoin=True)
@hybrid_property
def label(self):
return self.entity0
@hybrid_property
def label_id(self):
return self.entity0_id
@hybrid_property
def release(self):
return self.entity1
@hybrid_property
def release_id(self):
return self.entity1_id
class LinkLabelReleaseGroup(Base):
__tablename__ = 'l_label_release_group'
__table_args__ = (
Index('l_label_release_group_idx_uniq', 'entity0', 'entity1', 'link', 'link_order', unique=True),
Index('l_label_release_group_idx_entity1', 'entity1'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
link_id = Column('link', Integer, ForeignKey(apply_schema('link.id', 'musicbrainz'), name='l_label_release_group_fk_link'), nullable=False)
entity0_id = Column('entity0', Integer, ForeignKey(apply_schema('label.id', 'musicbrainz'), name='l_label_release_group_fk_entity0'), nullable=False)
entity1_id = Column('entity1', Integer, ForeignKey(apply_schema('release_group.id', 'musicbrainz'), name='l_label_release_group_fk_entity1'), nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
link_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
entity0_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
entity1_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
link = relationship('Link', foreign_keys=[link_id], innerjoin=True)
entity0 = relationship('Label', foreign_keys=[entity0_id], innerjoin=True)
entity1 = relationship('ReleaseGroup', foreign_keys=[entity1_id], innerjoin=True)
@hybrid_property
def label(self):
return self.entity0
@hybrid_property
def label_id(self):
return self.entity0_id
@hybrid_property
def release_group(self):
return self.entity1
@hybrid_property
def release_group_id(self):
return self.entity1_id
class LinkLabelSeries(Base):
__tablename__ = 'l_label_series'
__table_args__ = (
Index('l_label_series_idx_uniq', 'entity0', 'entity1', 'link', 'link_order', unique=True),
Index('l_label_series_idx_entity1', 'entity1'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
link_id = Column('link', Integer, ForeignKey(apply_schema('link.id', 'musicbrainz'), name='l_label_series_fk_link'), nullable=False)
entity0_id = Column('entity0', Integer, ForeignKey(apply_schema('label.id', 'musicbrainz'), name='l_label_series_fk_entity0'), nullable=False)
entity1_id = Column('entity1', Integer, ForeignKey(apply_schema('series.id', 'musicbrainz'), name='l_label_series_fk_entity1'), nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
link_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
entity0_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
entity1_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
link = relationship('Link', foreign_keys=[link_id], innerjoin=True)
entity0 = relationship('Label', foreign_keys=[entity0_id], innerjoin=True)
entity1 = relationship('Series', foreign_keys=[entity1_id], innerjoin=True)
@hybrid_property
def label(self):
return self.entity0
@hybrid_property
def label_id(self):
return self.entity0_id
@hybrid_property
def series(self):
return self.entity1
@hybrid_property
def series_id(self):
return self.entity1_id
class LinkLabelURL(Base):
__tablename__ = 'l_label_url'
__table_args__ = (
Index('l_label_url_idx_uniq', 'entity0', 'entity1', 'link', 'link_order', unique=True),
Index('l_label_url_idx_entity1', 'entity1'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
link_id = Column('link', Integer, ForeignKey(apply_schema('link.id', 'musicbrainz'), name='l_label_url_fk_link'), nullable=False)
entity0_id = Column('entity0', Integer, ForeignKey(apply_schema('label.id', 'musicbrainz'), name='l_label_url_fk_entity0'), nullable=False)
entity1_id = Column('entity1', Integer, ForeignKey(apply_schema('url.id', 'musicbrainz'), name='l_label_url_fk_entity1'), nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
link_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
entity0_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
entity1_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
link = relationship('Link', foreign_keys=[link_id], innerjoin=True)
entity0 = relationship('Label', foreign_keys=[entity0_id], innerjoin=True)
entity1 = relationship('URL', foreign_keys=[entity1_id], innerjoin=True)
@hybrid_property
def label(self):
return self.entity0
@hybrid_property
def label_id(self):
return self.entity0_id
@hybrid_property
def url(self):
return self.entity1
@hybrid_property
def url_id(self):
return self.entity1_id
class LinkLabelWork(Base):
__tablename__ = 'l_label_work'
__table_args__ = (
Index('l_label_work_idx_uniq', 'entity0', 'entity1', 'link', 'link_order', unique=True),
Index('l_label_work_idx_entity1', 'entity1'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
link_id = Column('link', Integer, ForeignKey(apply_schema('link.id', 'musicbrainz'), name='l_label_work_fk_link'), nullable=False)
entity0_id = Column('entity0', Integer, ForeignKey(apply_schema('label.id', 'musicbrainz'), name='l_label_work_fk_entity0'), nullable=False)
entity1_id = Column('entity1', Integer, ForeignKey(apply_schema('work.id', 'musicbrainz'), name='l_label_work_fk_entity1'), nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
link_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
entity0_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
entity1_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
link = relationship('Link', foreign_keys=[link_id], innerjoin=True)
entity0 = relationship('Label', foreign_keys=[entity0_id], innerjoin=True)
entity1 = relationship('Work', foreign_keys=[entity1_id], innerjoin=True)
@hybrid_property
def label(self):
return self.entity0
@hybrid_property
def label_id(self):
return self.entity0_id
@hybrid_property
def work(self):
return self.entity1
@hybrid_property
def work_id(self):
return self.entity1_id
class LinkPlacePlace(Base):
__tablename__ = 'l_place_place'
__table_args__ = (
Index('l_place_place_idx_uniq', 'entity0', 'entity1', 'link', 'link_order', unique=True),
Index('l_place_place_idx_entity1', 'entity1'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
link_id = Column('link', Integer, ForeignKey(apply_schema('link.id', 'musicbrainz'), name='l_place_place_fk_link'), nullable=False)
entity0_id = Column('entity0', Integer, ForeignKey(apply_schema('place.id', 'musicbrainz'), name='l_place_place_fk_entity0'), nullable=False)
entity1_id = Column('entity1', Integer, ForeignKey(apply_schema('place.id', 'musicbrainz'), name='l_place_place_fk_entity1'), nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
link_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
entity0_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
entity1_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
link = relationship('Link', foreign_keys=[link_id], innerjoin=True)
entity0 = relationship('Place', foreign_keys=[entity0_id], innerjoin=True)
entity1 = relationship('Place', foreign_keys=[entity1_id], innerjoin=True)
@hybrid_property
def place0(self):
return self.entity0
@hybrid_property
def place0_id(self):
return self.entity0_id
@hybrid_property
def place1(self):
return self.entity1
@hybrid_property
def place1_id(self):
return self.entity1_id
class LinkPlaceRecording(Base):
__tablename__ = 'l_place_recording'
__table_args__ = (
Index('l_place_recording_idx_uniq', 'entity0', 'entity1', 'link', 'link_order', unique=True),
Index('l_place_recording_idx_entity1', 'entity1'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
link_id = Column('link', Integer, ForeignKey(apply_schema('link.id', 'musicbrainz'), name='l_place_recording_fk_link'), nullable=False)
entity0_id = Column('entity0', Integer, ForeignKey(apply_schema('place.id', 'musicbrainz'), name='l_place_recording_fk_entity0'), nullable=False)
entity1_id = Column('entity1', Integer, ForeignKey(apply_schema('recording.id', 'musicbrainz'), name='l_place_recording_fk_entity1'), nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
link_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
entity0_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
entity1_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
link = relationship('Link', foreign_keys=[link_id], innerjoin=True)
entity0 = relationship('Place', foreign_keys=[entity0_id], innerjoin=True)
entity1 = relationship('Recording', foreign_keys=[entity1_id], innerjoin=True)
@hybrid_property
def place(self):
return self.entity0
@hybrid_property
def place_id(self):
return self.entity0_id
@hybrid_property
def recording(self):
return self.entity1
@hybrid_property
def recording_id(self):
return self.entity1_id
class LinkPlaceRelease(Base):
__tablename__ = 'l_place_release'
__table_args__ = (
Index('l_place_release_idx_uniq', 'entity0', 'entity1', 'link', 'link_order', unique=True),
Index('l_place_release_idx_entity1', 'entity1'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
link_id = Column('link', Integer, ForeignKey(apply_schema('link.id', 'musicbrainz'), name='l_place_release_fk_link'), nullable=False)
entity0_id = Column('entity0', Integer, ForeignKey(apply_schema('place.id', 'musicbrainz'), name='l_place_release_fk_entity0'), nullable=False)
entity1_id = Column('entity1', Integer, ForeignKey(apply_schema('release.id', 'musicbrainz'), name='l_place_release_fk_entity1'), nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
link_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
entity0_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
entity1_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
link = relationship('Link', foreign_keys=[link_id], innerjoin=True)
entity0 = relationship('Place', foreign_keys=[entity0_id], innerjoin=True)
entity1 = relationship('Release', foreign_keys=[entity1_id], innerjoin=True)
@hybrid_property
def place(self):
return self.entity0
@hybrid_property
def place_id(self):
return self.entity0_id
@hybrid_property
def release(self):
return self.entity1
@hybrid_property
def release_id(self):
return self.entity1_id
class LinkPlaceReleaseGroup(Base):
__tablename__ = 'l_place_release_group'
__table_args__ = (
Index('l_place_release_group_idx_uniq', 'entity0', 'entity1', 'link', 'link_order', unique=True),
Index('l_place_release_group_idx_entity1', 'entity1'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
link_id = Column('link', Integer, ForeignKey(apply_schema('link.id', 'musicbrainz'), name='l_place_release_group_fk_link'), nullable=False)
entity0_id = Column('entity0', Integer, ForeignKey(apply_schema('place.id', 'musicbrainz'), name='l_place_release_group_fk_entity0'), nullable=False)
entity1_id = Column('entity1', Integer, ForeignKey(apply_schema('release_group.id', 'musicbrainz'), name='l_place_release_group_fk_entity1'), nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
link_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
entity0_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
entity1_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
link = relationship('Link', foreign_keys=[link_id], innerjoin=True)
entity0 = relationship('Place', foreign_keys=[entity0_id], innerjoin=True)
entity1 = relationship('ReleaseGroup', foreign_keys=[entity1_id], innerjoin=True)
@hybrid_property
def place(self):
return self.entity0
@hybrid_property
def place_id(self):
return self.entity0_id
@hybrid_property
def release_group(self):
return self.entity1
@hybrid_property
def release_group_id(self):
return self.entity1_id
class LinkPlaceSeries(Base):
__tablename__ = 'l_place_series'
__table_args__ = (
Index('l_place_series_idx_uniq', 'entity0', 'entity1', 'link', 'link_order', unique=True),
Index('l_place_series_idx_entity1', 'entity1'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
link_id = Column('link', Integer, ForeignKey(apply_schema('link.id', 'musicbrainz'), name='l_place_series_fk_link'), nullable=False)
entity0_id = Column('entity0', Integer, ForeignKey(apply_schema('place.id', 'musicbrainz'), name='l_place_series_fk_entity0'), nullable=False)
entity1_id = Column('entity1', Integer, ForeignKey(apply_schema('series.id', 'musicbrainz'), name='l_place_series_fk_entity1'), nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
link_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
entity0_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
entity1_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
link = relationship('Link', foreign_keys=[link_id], innerjoin=True)
entity0 = relationship('Place', foreign_keys=[entity0_id], innerjoin=True)
entity1 = relationship('Series', foreign_keys=[entity1_id], innerjoin=True)
@hybrid_property
def place(self):
return self.entity0
@hybrid_property
def place_id(self):
return self.entity0_id
@hybrid_property
def series(self):
return self.entity1
@hybrid_property
def series_id(self):
return self.entity1_id
class LinkPlaceURL(Base):
__tablename__ = 'l_place_url'
__table_args__ = (
Index('l_place_url_idx_uniq', 'entity0', 'entity1', 'link', 'link_order', unique=True),
Index('l_place_url_idx_entity1', 'entity1'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
link_id = Column('link', Integer, ForeignKey(apply_schema('link.id', 'musicbrainz'), name='l_place_url_fk_link'), nullable=False)
entity0_id = Column('entity0', Integer, ForeignKey(apply_schema('place.id', 'musicbrainz'), name='l_place_url_fk_entity0'), nullable=False)
entity1_id = Column('entity1', Integer, ForeignKey(apply_schema('url.id', 'musicbrainz'), name='l_place_url_fk_entity1'), nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
link_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
entity0_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
entity1_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
link = relationship('Link', foreign_keys=[link_id], innerjoin=True)
entity0 = relationship('Place', foreign_keys=[entity0_id], innerjoin=True)
entity1 = relationship('URL', foreign_keys=[entity1_id], innerjoin=True)
@hybrid_property
def place(self):
return self.entity0
@hybrid_property
def place_id(self):
return self.entity0_id
@hybrid_property
def url(self):
return self.entity1
@hybrid_property
def url_id(self):
return self.entity1_id
class LinkPlaceWork(Base):
__tablename__ = 'l_place_work'
__table_args__ = (
Index('l_place_work_idx_uniq', 'entity0', 'entity1', 'link', 'link_order', unique=True),
Index('l_place_work_idx_entity1', 'entity1'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
link_id = Column('link', Integer, ForeignKey(apply_schema('link.id', 'musicbrainz'), name='l_place_work_fk_link'), | |
import time
import wandb
import os
import gym
import numpy as np
import imageio
from collections import defaultdict, deque
from itertools import chain
import matplotlib
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.nn import functional as F
from onpolicy.utils.util import update_linear_schedule
from onpolicy.runner.shared.base_runner import Runner
from onpolicy.envs.habitat.model.model import Neural_SLAM_Module, Local_IL_Policy
from onpolicy.envs.habitat.utils.memory import FIFOMemory
from onpolicy.algorithms.utils.util import init, check
from icecream import ic
def _t2n(x):
return x.detach().cpu().numpy()
def get_folders(dir, folders):
get_dir = os.listdir(dir)
for i in get_dir:
sub_dir = os.path.join(dir, i)
if os.path.isdir(sub_dir):
folders.append(sub_dir)
get_folders(sub_dir, folders)
class HabitatRunner(Runner):
def __init__(self, config):
super(HabitatRunner, self).__init__(config)
# init parameters
self.init_hyper_parameters()
# init variables
self.init_map_variables()
# global policy
self.init_global_policy()
# local policy
self.init_local_policy()
# slam module
self.init_slam_module()
def warmup(self):
# reset env
self.obs, infos = self.envs.reset()
self.trans = [infos[e]['trans'] for e in range(self.n_rollout_threads)]
self.rotation = [infos[e]['rotation'] for e in range(self.n_rollout_threads)]
self.scene_id = [infos[e]['scene_id'] for e in range(self.n_rollout_threads)]
self.agent_trans = [infos[e]['agent_trans'] for e in range(self.n_rollout_threads)]
self.agent_rotation = [infos[e]['agent_rotation'] for e in range(self.n_rollout_threads)]
self.explorable_map = [infos[e]['explorable_map'] for e in range(self.n_rollout_threads)]
# Predict map from frame 1:
self.run_slam_module(self.obs, self.obs, infos)
# Compute Global policy input
self.first_compute_global_input()
self.share_global_input = self.global_input if self.use_centralized_V else self.global_input #! wrong
# replay buffer
for key in self.global_input.keys():
self.buffer.obs[key][0] = self.global_input[key].copy()
for key in self.share_global_input.keys():
self.buffer.share_obs[key][0] = self.share_global_input[key].copy()
values, actions, action_log_probs, rnn_states, rnn_states_critic = self.compute_global_goal(step=0)
# compute local input
self.compute_local_input(self.global_input['global_obs'])
# Output stores local goals as well as the the ground-truth action
self.local_output = self.envs.get_short_term_goal(self.local_input)
self.local_output = np.array(self.local_output, dtype = np.long)
self.last_obs = self.obs.copy()
return values, actions, action_log_probs, rnn_states, rnn_states_critic
def run(self):
# map and pose
self.init_map_and_pose()
values, actions, action_log_probs, rnn_states, rnn_states_critic = self.warmup()
start = time.time()
episodes = int(self.num_env_steps) // self.max_episode_length // self.n_rollout_threads
for episode in range(episodes):
self.init_env_info()
if self.use_linear_lr_decay:
self.trainer.policy.lr_decay(episode, episodes)
for step in range(self.max_episode_length):
local_step = step % self.num_local_steps
global_step = (step // self.num_local_steps) % self.episode_length
eval_global_step = step // self.num_local_steps + 1
del self.last_obs
self.last_obs = self.obs.copy()
# Sample actions
actions_env = self.compute_local_action()
# Obser reward and next obs
self.obs, rewards, dones, infos = self.envs.step(actions_env)
for e in range(self.n_rollout_threads):
for key in ['explored_ratio', 'explored_reward', 'merge_explored_ratio', 'merge_explored_reward']:
if key in infos[e].keys():
self.env_info['sum_{}'.format(key)][e] += np.array(infos[e][key])
if 'merge_explored_ratio_step' in infos[e].keys():
self.env_info['merge_explored_ratio_step'][e] = infos[e]['merge_explored_ratio_step']
for agent_id in range(self.num_agents):
agent_k = "agent{}_explored_ratio_step".format(agent_id)
if agent_k in infos[e].keys():
self.env_info['explored_ratio_step'][e][agent_id] = infos[e][agent_k]
self.local_masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32)
self.local_masks[dones == True] = np.zeros(((dones == True).sum(), 1), dtype=np.float32)
self.global_masks *= self.local_masks
# Reinitialize variables when episode ends
if step == self.max_episode_length - 1:
self.init_map_and_pose()
del self.last_obs
self.last_obs = self.obs.copy()
self.trans = [infos[e]['trans'] for e in range(self.n_rollout_threads)]
self.rotation = [infos[e]['rotation'] for e in range(self.n_rollout_threads)]
self.agent_trans = [infos[e]['agent_trans'] for e in range(self.n_rollout_threads)]
self.agent_rotation = [infos[e]['agent_rotation'] for e in range(self.n_rollout_threads)]
self.explorable_map = [infos[e]['explorable_map'] for e in range(self.n_rollout_threads)]
self.scene_id = [infos[e]['scene_id'] for e in range(self.n_rollout_threads)]
# Neural SLAM Module
if self.train_slam:
self.insert_slam_module(infos)
self.run_slam_module(self.last_obs, self.obs, infos, True)
self.update_local_map()
# Global Policy
if local_step == self.num_local_steps - 1:
# For every global step, update the full and local maps
self.update_map_and_pose()
self.compute_global_input()
data = rewards, dones, infos, values, actions, action_log_probs, rnn_states, rnn_states_critic
# insert data into buffer
self.insert_global_policy(data)
values, actions, action_log_probs, rnn_states, rnn_states_critic = self.compute_global_goal(step = global_step + 1)
# Local Policy
self.compute_local_input(self.local_map)
# Output stores local goals as well as the the ground-truth action
self.local_output = self.envs.get_short_term_goal(self.local_input)
self.local_output = np.array(self.local_output, dtype = np.long)
# Start Training
torch.set_grad_enabled(True)
# Train Neural SLAM Module
if self.train_slam and len(self.slam_memory) > self.slam_batch_size:
self.train_slam_module()
# Train Local Policy
if self.train_local and (local_step + 1) % self.local_policy_update_freq == 0:
self.train_local_policy()
# Train Global Policy
if global_step % self.episode_length == self.episode_length - 1 \
and local_step == self.num_local_steps - 1:
self.train_global_policy()
# Finish Training
torch.set_grad_enabled(False)
# post process
total_num_steps = (episode + 1) * self.max_episode_length * self.n_rollout_threads
self.convert_info()
print("average episode merge explored reward is {}".format(np.mean(self.env_infos["sum_merge_explored_reward"])))
print("average episode merge explored ratio is {}".format(np.mean(self.env_infos['sum_merge_explored_ratio'])))
# log information
if episode % self.log_interval == 0:
end = time.time()
print("\n Scenario {} Algo {} Exp {} updates {}/{} episodes, total num timesteps {}/{}, FPS {}.\n"
.format(self.all_args.scenario_name,
self.algorithm_name,
self.experiment_name,
episode,
episodes,
total_num_steps,
self.num_env_steps,
int(total_num_steps / (end - start))))
self.log_env(self.train_slam_infos, total_num_steps)
self.log_env(self.train_local_infos, total_num_steps)
self.log_env(self.train_global_infos, total_num_steps)
self.log_env(self.env_infos, total_num_steps)
self.log_agent(self.env_infos, total_num_steps)
# save model
if (episode % self.save_interval == 0 or episode == episodes - 1):
self.save_slam_model(total_num_steps)
self.save_global_model(total_num_steps)
self.save_local_model(total_num_steps)
def get_local_map_boundaries(self, agent_loc, local_sizes, full_sizes):
loc_r, loc_c = agent_loc
local_w, local_h = local_sizes
full_w, full_h = full_sizes
if self.global_downscaling > 1:
gx1, gy1 = loc_r - local_w // 2, loc_c - local_h // 2
gx2, gy2 = gx1 + local_w, gy1 + local_h
if gx1 < 0:
gx1, gx2 = 0, local_w
if gx2 > full_w:
gx1, gx2 = full_w - local_w, full_w
if gy1 < 0:
gy1, gy2 = 0, local_h
if gy2 > full_h:
gy1, gy2 = full_h - local_h, full_h
else:
gx1.gx2, gy1, gy2 = 0, full_w, 0, full_h
return [gx1, gx2, gy1, gy2]
def init_hyper_parameters(self):
self.map_size_cm = self.all_args.map_size_cm
self.map_resolution = self.all_args.map_resolution
self.global_downscaling = self.all_args.global_downscaling
self.frame_width = self.all_args.frame_width
self.load_local = self.all_args.load_local
self.load_slam = self.all_args.load_slam
self.train_local = self.all_args.train_local
self.train_slam = self.all_args.train_slam
self.slam_memory_size = self.all_args.slam_memory_size
self.slam_batch_size = self.all_args.slam_batch_size
self.slam_iterations = self.all_args.slam_iterations
self.slam_lr = self.all_args.slam_lr
self.slam_opti_eps = self.all_args.slam_opti_eps
self.use_local_recurrent_policy = self.all_args.use_local_recurrent_policy
self.local_hidden_size = self.all_args.local_hidden_size
self.local_lr = self.all_args.local_lr
self.local_opti_eps = self.all_args.local_opti_eps
self.proj_loss_coeff = self.all_args.proj_loss_coeff
self.exp_loss_coeff = self.all_args.exp_loss_coeff
self.pose_loss_coeff = self.all_args.pose_loss_coeff
self.local_policy_update_freq = self.all_args.local_policy_update_freq
self.num_local_steps = self.all_args.num_local_steps
self.max_episode_length = self.all_args.max_episode_length
self.render_merge = self.all_args.render_merge
self.visualize_input = self.all_args.visualize_input
self.use_intrinsic_reward = self.all_args.use_intrinsic_reward
def init_map_variables(self):
### Full map consists of 4 channels containing the following:
### 1. Obstacle Map
### 2. Exploread Area
### 3. Current Agent Location
### 4. Past Agent Locations
# Calculating full and local map sizes
map_size = self.map_size_cm // self.map_resolution
self.full_w, self.full_h = map_size, map_size
self.local_w, self.local_h = int(self.full_w / self.global_downscaling), \
int(self.full_h / self.global_downscaling)
# Initializing full, merge and local map
self.full_map = np.zeros((self.n_rollout_threads, self.num_agents, 4, self.full_w, self.full_h), dtype=np.float32)
self.local_map = np.zeros((self.n_rollout_threads, self.num_agents, 4, self.local_w, self.local_h), dtype=np.float32)
# Initial full and local pose
self.full_pose = np.zeros((self.n_rollout_threads, self.num_agents, 3), dtype=np.float32)
self.local_pose = np.zeros((self.n_rollout_threads, self.num_agents, 3), dtype=np.float32)
# Origin of local map
self.origins = np.zeros((self.n_rollout_threads, self.num_agents, 3), dtype=np.float32)
# Local Map Boundaries
self.lmb = np.zeros((self.n_rollout_threads, self.num_agents, 4)).astype(int)
### Planner pose inputs has 7 dimensions
### 1-3 store continuous global agent location
### 4-7 store local map boundaries
self.planner_pose_inputs = np.zeros((self.n_rollout_threads, self.num_agents, 7), dtype=np.float32)
def init_map_and_pose(self):
self.full_map = np.zeros((self.n_rollout_threads, self.num_agents, 4, self.full_w, self.full_h), dtype=np.float32)
self.full_pose = np.zeros((self.n_rollout_threads, self.num_agents, 3), dtype=np.float32)
self.merge_goal_trace = np.zeros((self.n_rollout_threads, self.full_w, self.full_h), dtype=np.float32)
self.full_pose[:, :, :2] = self.map_size_cm / 100.0 / 2.0
locs = self.full_pose
self.planner_pose_inputs[:, :, :3] = locs
for e in range(self.n_rollout_threads):
for a in range(self.num_agents):
r, c = locs[e, a, 1], locs[e, a, 0]
loc_r, loc_c = [int(r * 100.0 / self.map_resolution),
int(c * 100.0 / self.map_resolution)]
self.full_map[e, a, 2:, loc_r - 1:loc_r + 2, loc_c - 1:loc_c + 2] = a + 1
self.lmb[e, a] = self.get_local_map_boundaries((loc_r, loc_c),
(self.local_w, self.local_h),
(self.full_w, self.full_h))
self.planner_pose_inputs[e, a, 3:] = self.lmb[e, a]
self.origins[e, a] = [self.lmb[e, a, 2] * self.map_resolution / 100.0,
self.lmb[e, a, 0] * self.map_resolution / 100.0, 0.]
for e in range(self.n_rollout_threads):
for a in range(self.num_agents):
self.local_map[e, a] = self.full_map[e, a, :, self.lmb[e, a, 0]:self.lmb[e, a, 1], self.lmb[e, a, 2]:self.lmb[e, a, 3]]
self.local_pose[e, a] = self.full_pose[e, a] - self.origins[e, a]
def init_global_policy(self):
self.best_gobal_reward = -np.inf
# ppo network log info
self.train_global_infos = {}
self.train_global_infos['value_loss']= deque(maxlen=10)
self.train_global_infos['policy_loss']= deque(maxlen=10)
self.train_global_infos['dist_entropy'] = deque(maxlen=10)
self.train_global_infos['actor_grad_norm'] = deque(maxlen=10)
self.train_global_infos['critic_grad_norm'] = deque(maxlen=10)
self.train_global_infos['ratio'] = deque(maxlen=10)
# env info
self.env_infos = {}
length = self.all_args.eval_episodes
self.env_infos['sum_explored_ratio'] = deque(maxlen=length)
self.env_infos['sum_explored_reward'] = deque(maxlen=length)
self.env_infos['sum_merge_explored_ratio'] = deque(maxlen=length)
self.env_infos['sum_merge_explored_reward'] = deque(maxlen=length)
self.env_infos['merge_explored_ratio_step'] = deque(maxlen=length)
self.env_infos['invalid_merge_explored_ratio_step_num'] = deque(maxlen=length)
self.env_infos['invalid_merge_map_num'] = deque(maxlen=length)
self.env_infos['max_sum_merge_explored_ratio'] = deque(maxlen=length)
self.env_infos['min_sum_merge_explored_ratio'] = deque(maxlen=length)
self.global_input = {}
self.global_input['global_obs'] = np.zeros((self.n_rollout_threads, self.num_agents, 8, self.local_w, self.local_h), dtype=np.float32)
self.global_input['global_merge_obs'] = np.zeros((self.n_rollout_threads, self.num_agents, 4, self.local_w, self.local_h), dtype=np.float32)
# self.global_input['global_merge_goal'] = np.zeros((self.n_rollout_threads, self.num_agents, 2, self.local_w, self.local_h), dtype=np.float32)
# self.global_input['gt_map'] = np.zeros((self.n_rollout_threads, self.num_agents, 1, self.local_w, self.local_h), dtype=np.float32)
self.global_input['global_orientation'] = np.zeros((self.n_rollout_threads, self.num_agents, 1), dtype=np.long)
self.global_input['vector'] = np.zeros((self.n_rollout_threads, self.num_agents, self.num_agents), dtype=np.float32)
self.global_masks = np.ones((self.n_rollout_threads, self.num_agents, 1), | |
if _instances :
for _x in _instances :
_instances = _x
if obj_type == "pod" and hasattr(_instances, "obj") and "nodeName" in _instances.obj["spec"] :
obj_attr_list["node"] = _instances.obj["spec"]["nodeName"]
_status = 0
except CldOpsException as obj :
_status = obj.status
_xfmsg = str(obj.msg)
except Exception as e :
_status = 23
_xfmsg = str(e)
finally :
if _status :
_fmsg = "(While getting instance(s) through API call \"" + _call + "\") " + _xfmsg
if identifier not in self.api_error_counter :
self.api_error_counter[identifier] = 0
self.api_error_counter[identifier] += 1
if self.api_error_counter[identifier] > self.max_api_errors :
raise CldOpsException(_fmsg, _status)
else :
cbwarn(_fmsg)
return []
else :
return _instances
@trace
def get_images(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_hyper = ''
_fmsg = "An error has occurred, but no error message was captured"
_image_list = [ obj_attr_list["imageid1"] ]
_fmsg = "Please check if the defined image name is present on this "
_fmsg += self.get_description()
_candidate_images = []
for _image in _image_list :
if self.is_cloud_image_uuid(obj_attr_list["imageid1"]) :
# if _image["Id"].split(':')[1] == obj_attr_list["imageid1"] :
_candidate_images.append(obj_attr_list["imageid1"])
else :
if _image.count(obj_attr_list["imageid1"]) :
_candidate_images.append(obj_attr_list["imageid1"])
if len(_candidate_images) :
obj_attr_list["boot_volume_imageid1"] = "TBD"
_status = 0
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
if _status :
_msg = "Image Name (" + obj_attr_list["imageid1"] + ") not found: " + _fmsg
cberr(_msg)
raise CldOpsException(_msg, _status)
else :
return True
@trace
def get_networks(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_status = 0
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
if _status :
_msg = "Network (" + obj_attr_list["prov_netname"] + " ) not found: " + _fmsg
cberr(_msg, True)
raise CldOpsException(_msg, _status)
else :
return True
@trace
def create_ssh_key(self, vmc_name, key_name, key_type, key_contents, key_fingerprint, vm_defaults, connection) :
'''
TBD
'''
return True
@trace
def is_cloud_image_uuid(self, imageid) :
'''
TBD
'''
return True
if len(imageid) == 64 and is_number(imageid, True) :
return True
return False
@trace
def is_vm_running(self, obj_attr_list):
'''
TBD
'''
try :
_instance = self.get_instances(obj_attr_list, "pod", obj_attr_list["cloud_vm_name"])
_instance_status = False
if _instance :
if "status" in _instance.obj :
if "containerStatuses" in _instance.obj["status"] :
if "state" in _instance.obj["status"]["containerStatuses"][0] :
_instance_status = list(_instance.obj["status"]["containerStatuses"][0]["state"].keys())[0]
if str(_instance_status) == "running" :
obj_attr_list["k8s_instance"] = _instance.obj
obj_attr_list["cloud_vm_exact_match_name"] = _instance.name
obj_attr_list["cloud_vm_name"] = _instance.name
obj_attr_list["cloud_hostname"] = _instance.name
if "hostIP" in _instance.obj["status"] :
_host_ip = _instance.obj["status"]["hostIP"]
obj_attr_list["host_name"], obj_attr_list["host_cloud_ip"] = self.try_dns(_host_ip)
if obj_attr_list["abstraction"] == "replicaset" or obj_attr_list["abstraction"] == "deployment" :
if "cloud_rs_exact_match_name" not in obj_attr_list :
_x_instance = self.get_instances(obj_attr_list, "replicaset", obj_attr_list["cloud_rs_name"])
if _x_instance :
obj_attr_list["cloud_rs_exact_match_name"] = _x_instance.name
obj_attr_list["cloud_rs_name"] = _x_instance.name
if "metadata" in _x_instance.obj :
if "uid" in _x_instance.obj["metadata"] :
obj_attr_list["cloud_rs_uuid"] = _x_instance.obj["metadata"]["uid"]
if obj_attr_list["abstraction"] == "deployment" :
if "cloud_d_exact_match_name" not in obj_attr_list :
_x_instance = self.get_instances(obj_attr_list, "deployment", obj_attr_list["cloud_d_name"])
if _x_instance :
obj_attr_list["cloud_d_exact_match_name"] = _x_instance.name
obj_attr_list["cloud_d_name"] = _x_instance.name
if "metadata" in _x_instance.obj :
if "uid" in _x_instance.obj["metadata"] :
obj_attr_list["cloud_d_uuid"] = _x_instance.obj["metadata"]["uid"]
return True
else :
return False
except Exception as e :
for line in traceback.format_exc().splitlines() :
cbwarn(line, True)
_status = 23
_fmsg = str(e)
raise CldOpsException(_fmsg, _status)
@trace
def is_vm_ready(self, obj_attr_list) :
'''
TBD
'''
if self.is_vm_running(obj_attr_list) :
if self.get_ip_address(obj_attr_list) :
obj_attr_list["last_known_state"] = "running with ip assigned"
return True
else :
obj_attr_list["last_known_state"] = "running with ip unassigned"
return False
else :
obj_attr_list["last_known_state"] = "not running"
return False
@trace
def vm_placement(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_status = 0
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
if _status :
_msg = "VM placement failed: " + _fmsg
cberr(_msg, True)
raise CldOpsException(_msg, _status)
else :
return True
@trace
def vvcreate(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
if "cloud_vv_type" not in obj_attr_list :
obj_attr_list["cloud_vv_type"] = "NOT SUPPORTED"
if "cloud_vv" in obj_attr_list :
obj_attr_list["last_known_state"] = "about to send volume create request"
obj_attr_list["cloud_vv_uuid"] = "NOT SUPPORTED"
self.common_messages("VV", obj_attr_list, "creating", _status, _fmsg)
_status = 0
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
_status, _msg = self.common_messages("VV", obj_attr_list, "created", _status, _fmsg)
return _status, _msg
@trace
def vvdestroy(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
if str(obj_attr_list["cloud_vv_uuid"]).lower() != "not supported" and str(obj_attr_list["cloud_vv_uuid"]).lower() != "none" :
self.common_messages("VV", obj_attr_list, "destroying", 0, '')
_status = 0
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
_status, _msg = self.common_messages("VV", obj_attr_list, "destroyed", _status, _fmsg)
return _status, _msg
@trace
def vmcreate(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_vmc_attr_list = self.osci.get_object(obj_attr_list["cloud_name"], \
"VMC", False, obj_attr_list["vmc"], \
False)
if "kubeconfig" in _vmc_attr_list :
obj_attr_list["kubeconfig"] = _vmc_attr_list["kubeconfig"]
self.connect(obj_attr_list["access"], obj_attr_list["credentials"], \
obj_attr_list["vmc_name"], obj_attr_list)
_context = False
_taint = False
_node_name_or_label = False
_vmc_attr_list = self.osci.get_object(obj_attr_list["cloud_name"], "VMC", False, obj_attr_list["vmc"], False)
cbdebug("Pool is: " + _vmc_attr_list["pool"])
if _vmc_attr_list["pool"].count(",") :
_taint, _node_name_or_label = _vmc_attr_list["pool"].split(",")
else :
_taint = _vmc_attr_list["pool"]
self.determine_instance_name(obj_attr_list)
obj_attr_list["cloud_vm_name"] = obj_attr_list["cloud_vm_name"].lower()
obj_attr_list["cloud_vv_name"] = obj_attr_list["cloud_vv_name"].lower()
obj_attr_list["cloud_rs_name"] = obj_attr_list["cloud_vm_name"]
obj_attr_list["cloud_d_name"] = obj_attr_list["cloud_vm_name"]
obj_attr_list["cloud_rs_uuid"] = "NA"
obj_attr_list["cloud_d_uuid"] = "NA"
self.determine_key_name(obj_attr_list)
self.take_action_if_requested("VM", obj_attr_list, "provision_originated")
if str(obj_attr_list["image_pull_secrets"]).lower() != "false" :
_image_pull_secrets = [ { "name" : "regcred" } ]
else :
_image_pull_secrets = [ ]
_mark_a = time()
self.connect(obj_attr_list["access"], obj_attr_list["credentials"], \
obj_attr_list["vmc_name"], obj_attr_list)
self.annotate_time_breakdown(obj_attr_list, "authenticate_time", _mark_a)
_mark_a = time()
if self.is_vm_running(obj_attr_list) :
_msg = "An instance named \"" + obj_attr_list["cloud_vm_name"]
_msg += " is already running. It needs to be destroyed first."
_status = 187
cberr(_msg)
raise CldOpsException(_msg, _status)
self.annotate_time_breakdown(obj_attr_list, "check_existing_instance_time", _mark_a)
_env = [
{ "name": "CB_SSH_PUB_KEY", \
"value" : obj_attr_list["pubkey_contents"]
},\
{ "name": "CB_LOGIN", \
"value" : obj_attr_list["login"]
}
]
if str(obj_attr_list["ports_base"]).lower() != "false" and obj_attr_list["netname"] != "none" :
obj_attr_list["prov_cloud_port"] = int(obj_attr_list["ports_base"]) + int(obj_attr_list["name"].replace("vm_",''))
if obj_attr_list["check_boot_complete"].lower() == "tcp_on_22":
obj_attr_list["check_boot_complete"] = "tcp_on_" + str(obj_attr_list["prov_cloud_port"])
_annotations = { "creator" : "cbtool" }
if len(obj_attr_list["annotations"]) > 2 :
_annotations = str2dic(obj_attr_list["annotations"])
if "override_imageid1" in _annotations :
obj_attr_list["imageid1"] = _annotations["override_imageid1"]
if obj_attr_list["abstraction"] == "pod" :
_obj = { "apiVersion": "v1", \
"kind": "Pod", \
"id": obj_attr_list["cloud_vm_name"], \
"metadata": { "name": obj_attr_list["cloud_vm_name"], \
"namespace": obj_attr_list["namespace"] , \
"labels" : { "creator" : "cbtool", \
"app" : obj_attr_list["cloud_vm_name"], \
"ai" : obj_attr_list["ai"]
}, \
"annotations" : _annotations
}, \
"spec": { "containers" :
[
{ "env": _env, \
"name": obj_attr_list["cloud_vm_name"], \
"image": obj_attr_list["imageid1"], \
"imagePullPolicy" : obj_attr_list["image_pull_policy"], \
"securityContext" : {"privileged" : True, "capabilities" : {"add" : ["IPC_LOCK", "SYS_ADMIN"] }},
}
],
"imagePullSecrets" : _image_pull_secrets,
}
}
# We want to refine this over time, but we have two scheduling options:
# 1. [default] Anti Affinity (don't place the VMs from the same AI in the same place)
# [USER-DEFINED]
# KUB_INITIAL_VMCS = default # use the default namespace, no taints or node names
# 2. Forced placement (_taint and _node_name_or_label are set in the INITAL_VMCS, like this:
# [VMC_DEFAULTS]
# K8S_PLACEMENT_OPTION = nodeName
# [USER-DEFINED]
# KUB_INITIAL_VMCS = cluster:taint;nodeName
# 3. Labeled placement (_taint and _node_name_or_label is a label instead of a nodeName)
# [VMC_DEFAULTS]
# K8S_PLACEMENT_OPTION = nodeSelector
# [USER-DEFINED]
# KUB_INITIAL_VMCS = cluster:taint;nodeLabelKey=nodeLabelValue
#
# The 2nd options requires that you go "taint" the nodes that you want isolated
# so that containers from other tenants (or yourself) don't land on in unwanted places.
# The 3rd option requires that you go "label" the nodes that you want isolated.
if _vmc_attr_list["k8s_placement_option"].lower() == "nodeselector" or (not _taint and not _node_name_or_label) | |
##! python3
##==============================================================================
## Copyright (c) 2021 COMPAL Electronic Inc. All rights reserved.
## This program contains proprietary and confidential information.
## All rights reserved except as may be permitted by prior written consent.
##
## Compal STiD NPSD Test Program Release Notification.
##
## ModuleName:
## LTE.py (Log to Excel)
##
## Abstract:
## Parsing log info to a excel with 4 sheets.
## 1. Read log file: parse -> store (a list of dict)
## 2. Read the INI threshold data: store as dict
## 3. New excel workbook: by openpyxl
## 4. Set worksheet according to Step 1: by dict and DataFrame
## 5. Set condition formating for each sheet
## according to Step 2: by dict
## 6. Save the workbook to xlsx file
##
## Author:
## 25-Oct-2021 <NAME>
##
## Revision History:
## Rev 1.0.0.1 25-Oct-2021 Willy
## First create.
##==============================================================================
import re
import os
import sys
import pandas as pd
import codecs
import time
import configparser
import openpyxl
from openpyxl.utils.dataframe import dataframe_to_rows
from openpyxl.styles import Font, Fill, colors
from openpyxl.formatting.rule import CellIsRule
# [Main]
g_strVersion = "3.0.0.1"
#[ParseLogPath]
g_strLogDir = "./Log/Pass"
class cLogParser:
listKey = ["Power_dBm_CH15", "Power_dBm_CH21", "Power_dBm_CH24", "Current_mA_CH15", "Current_mA_CH21", "Current_mA_CH24", "dBm_LNA_ON", "dBm_LNA_Off",
"Current_mA_3G_CH9750", "Current_mA_3G_CH2787", "Current_mA_2G_CH124", "dBm_CH9750", "dBm_CH2787", "dBm_2G_CH124", "dBm_CH124"]
listInfo, listLTE, listZigbee = [], [], []
def __init__(self):
# get directory names of TryingLog (first layer)
listSN = os.listdir(g_strLogDir)
# iterate through log files in a SN folder (second layer)
self.parseLog(listSN)
# merge data from two different log files
self.mergeLogs()
def parseLog(self, listSN):
printLog("[I][parseLog] ------- Start Parsing Log -------")
strLTEName, strZigbeeName = "GFI20_RF_LTE.log", "GFI20_RF_Zigbee.log"
try:
for strSN in listSN:
dictLTE = {
"SN" : strSN,
"dBm_CH9750" : None,
"dBm_CH2787" : None,
"dBm_2G_CH124" : None,
"Current_mA_3G_CH9750" : None,
"Current_mA_3G_CH2787" : None,
"Current_mA_2G_CH124" : None,
"dBm_CH124" : None }
dictZigbee = {
"SN" : strSN,
"Power_dBm_CH15" : None,
"Power_dBm_CH21" : None,
"Power_dBm_CH24" : None,
"dBm_LNA_ON" : None,
"dBm_LNA_Off" : None,
"Current_mA_CH15" : None,
"Current_mA_CH21" : None,
"Current_mA_CH24" : None }
b_hasLTE, b_hasZigbee = False, False # flag for checking if the target log exists
strSNLog = os.path.join(g_strLogDir, strSN) # set abspath for SN logs
for strLogName in os.listdir(strSNLog):
strLogPath = os.path.join(strSNLog, strLogName)
# check GFI20_RF_LTE.log exists. If not, flag = False and parse only SN.
reMatch = re.fullmatch("^.*RF_LTE\.log", strLogName)
if(reMatch != None):
self.parseLTE(dictLTE, strLogPath, strSN)
b_hasLTE = True
# parse GFI20_RF_Zigbee.log files
reMatch = re.fullmatch("^.*RF_Zigbee\.log", strLogName)
if(reMatch != None):
self.parseZigbee(dictZigbee, strLogPath, strSN)
b_hasZigbee = True
# if log not exists, append initial dict
self.listLTE.append(dictLTE)
self.listZigbee.append(dictZigbee)
# if there is no target log file in the folder, parse only SN
if not b_hasLTE:
#listLTE.append({"SN": strSN})
printLog("[W][ParseLog] Cannot find log: %s" % os.path.join(strSN, strLTEName))
if not b_hasZigbee:
#listZigbee.append({"SN" : strSN})
printLog("[W][ParseLog] Cannot find log: %s" % os.path.join(strSN, strZigbeeName))
printLog("[I][parseLog] ------- Finish Parsing Log -------")
except Exception as e:
printLog("[E][parseLog] Unexpected Error: " + str(e))
def parseLTE(self, dictLTE, strLTEPath, strSN):
printLog("[I][parseLTE] Parse LTE log: %s" % strLTEPath)
try:
listPostfix = [" \n", " A\n", " dBm\n"]
with open(strLTEPath, encoding='big5') as log: # big5 for windows
content = log.readlines()
for line in content:
re_power = "Power: [+-]?[0-9]+\.?[0-9]*"
re_current = "Current: [+-]?[0-9]+\.?[0-9]* A"
re_RX_RSSI = "Rx RSSI: [+-]?[0-9]+\.?[0-9]* dBm"
if re.search("-+ LTE_3G Freq 897.4 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictLTE, re_power, self.listKey[11], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictLTE, re_current, self.listKey[8], listPostfix[1], 1000, False)
if re.search("-+ LTE_3G Freq 1950 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictLTE, re_power, self.listKey[12], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictLTE, re_current, self.listKey[9], listPostfix[1], 1000, False)
if re.search("-+ LTE_2G Freq 914.8 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictLTE, re_power, self.listKey[13], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictLTE, re_current, self.listKey[10], listPostfix[1], 1000, False)
if re.search("-+ LTE_2G Freq 959.8 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictLTE, re_RX_RSSI, self.listKey[14], listPostfix[2], 1, True)
except Exception as e:
printLog("[E][parseLTE] Unexpected Error: " + str(e))
def parseZigbee(self, dictZigbee, strZigBeePath, strSN):
printLog("[I][parseZigbee] Parse Zigbee log: %s" % strZigBeePath)
try:
listPostfix = ["dBm\n", " A\n", " dBm\n"]
with open(strZigBeePath, encoding="big5") as Zigbee: # big5 for windows
content = Zigbee.readlines()
for line in content:
re_power = "Power: [+-]?[0-9]+\.?[0-9]* dBm"
re_current = "Current: [+-]?[0-9]+\.?[0-9]* A"
re_RX_RSSI = "Rx RSSI: [+-]?[0-9]+\.?[0-9]* dBm"
if re.search("-+ ZIGBEE_2450 Freq 2425 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictZigbee, re_power, self.listKey[0], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictZigbee, re_current, self.listKey[3], listPostfix[1], 1000, False)
if re.search("-+ ZIGBEE_2450 Freq 2455 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictZigbee, re_power, self.listKey[1], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictZigbee, re_current, self.listKey[4], listPostfix[1], 1000, False)
if re.search("-+ ZIGBEE_2450 Freq 2470 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictZigbee, re_power, self.listKey[2], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictZigbee, re_current, self.listKey[5], listPostfix[1], 1000, False)
if re.search("-+ LNA ON -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictZigbee, re_RX_RSSI, self.listKey[6], listPostfix[2], 1, False)
if re.search("-+ LNA OFF -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictZigbee, re_RX_RSSI, self.listKey[7], listPostfix[2], 1, False)
except Exception as e:
printLog("[E][parseZigbee] Unexpected Error: " + str(e))
def get_log_value(self, cut_content, dictInfo, re_target, strKey, strPostfix, nUnit, b_getMulti):
for line in cut_content:
# search pattern like "Power: (int/float) dBm"
if re.search(re_target, line) != None:
# get the figure of the line like "Power: 8.817 dBm\n"
fValue = eval(line.split(": ")[1].strip(strPostfix))
dictInfo[strKey] = fValue * nUnit
if not b_getMulti:
break;
# merge two list of dict to single list of dict
def mergeLogs(self):
try:
printLog("[I][mergeLogs] ------- Merging two Log data -------")
# listLTE and listZigbee both has same length
self.listInfo = [None] * len(self.listLTE)
for i in range (0, len(self.listLTE)):
self.listLTE[i].update(self.listZigbee[i]) # merge two dict
self.listInfo[i] = self.listLTE[i]
printLog("[I][mergeLogs] ------- Merged two Log data -------")
except Exception as e:
printLog("[E][mergeLogs] Unexpected Error: " + str(e))
#/====================================================================\#
#| Functions of parsing log to excel |#
#\====================================================================/#
def log_to_excel(self):
printLog("[I][log_to_excel] ------- Parsing Log to Excel -------")
dictThreshold = {} # store INI threshold ata for setting conditional formating
try:
# ========== get the threshold data from INI ==========
printLog("[I][log_to_excel] ----- INI reading -----")
for key in self.listKey:
dictThreshold[key] = self.readINI(key)
printLog("[I][log_to_excel] ----- INI read -----")
# ========== New Excel workbook and sheets ==========
df_logInfo = pd.DataFrame(self.listInfo) # listInfo -> list of dict
listSheetName = ["Zigbee_Power_Current", "Zigbee_LAN", "LTE_Current", "LTE_dBm"]
listCol = [self.listKey[:6], self.listKey[6:8], self.listKey[8:11], self.listKey[11:15]] # columns for each sheet above
wb = openpyxl.Workbook() # 新增 Excel 活頁
wb.remove(wb['Sheet']) # remove the default sheet when start a workbook
printLog("[I][log_to_excel] ----- Excel Sheet Creating -----")
for i in range(0, len(listSheetName)):
self.newSheet(wb, listSheetName[i], df_logInfo[["SN"] + listCol[i]])
printLog("[I][log_to_excel] ----- Excel Sheet Created -----")
# modify cell font-color according to thershold that parsed from INI
self.set_threshold_to_excel(wb, dictThreshold)
wb.save('LTEV2.xlsx') # save the worksheet as excel file
printLog("[I][log_to_excel] ------- Parsed Log to Excel -------")
except Exception as e:
printLog("[E][log_to_excel] Unexpected Error: " + str(e))
# read INI values one by one by giving keys, then store to var dictThreshold
def readINI(self, strKey):
try:
config = configparser.ConfigParser()
config.read(g_strINIPath)
strMethod = 'Method%s' % g_nMethodIndex
strValue = config.get(strMethod, strKey)
# search pattern like "+-(int/float),+-(int/float)"
if re.fullmatch("[+-]?[0-9]+\.?[0-9]*,[+-]?[0-9]+\.?[0-9]*", strValue):
printLog("[I][readINI] %s = %s" % (strKey, strValue))
return strValue
else:
printLog("[W][readINI] Read %s Fail !!" % strKey)
sys.exit("Read %s Fail !!" % strKey)
except Exception as e:
printLog("[E][readINI] Error: %s" % str(e))
sys.exit("Error: %s" % str(e))
# new worksheets by DataFrame
def newSheet(self, workbook, strSheetName, df_SheetCol):
try:
workbook.create_sheet(strSheetName)
for row in dataframe_to_rows(df_SheetCol, index=False, header=True):
workbook[strSheetName].append(row)
printLog("[I][newSheet] Sheet: %s Created" % strSheetName)
except Exception as e:
printLog("[E][newSheet] Unexpected Error: " + str(e))
# set conditional formating for sheets by dictionay containg thershold data
def set_threshold_to_excel(self, workbook, dictThreshold):
try:
printLog("[I][set_threshold_to_excel] ----- threshold setting -----")
# iterate through every worksheet to set conditional formatting
for ws in workbook.worksheets:
printLog("[I][set_threshold_to_excel] setting worksheet: %s" % ws.title)
# iterate from Col 2 since Col 1 is the Serial Number(SN)
for col in ws.iter_cols(min_row=1, max_row=ws.max_row, min_col=2, max_col=ws.max_column):
strStart, strEnd = None, None # set the test range for cell e.g. A1:A10
istInterval = [] # set the threshold range for the formula below
# check the column is not empty, col[0] is column name
if len(col) | |
self.enable : ROOT.ROOT.EnableImplicitMT ( self.__nthreads )
else : ROOT.ROOT.DisableImplicitMT ()
return self
## Context manager: EXIT
def __exit__ ( self , *_ ) :
_current = ROOT.ROOT.IsImplicitMTEnabled()
if _current == self.__initial : pass
elif _current : ROOT.ROOT.DisableImplicitMT ()
else : ROOT.ROOT.EnableImplicitMT ()
# =============================================================================
## create 'counted' function to know number of function calls
# @code
# fun = ...
# func = counted ( fun ) ## use as function
#
# # alternatively use it as decorator:
# @counted
# def fun2 ( ... ) : return ...
# @endcode
def counted ( f ):
"""create 'counted' function to know number of function calls
Example
-------
>>> fun = ...
>>> func = counted ( fun ) ## use as function
>>> @counted
>>> def fun2 ( ... ) : return ...
"""
def wrapped ( *args, **kwargs ):
wrapped.calls += 1
return f( *args , **kwargs )
wrapped.calls = 0
return wrapped
# =============================================================================
## Context manager to enable/disable implicit MT in ROOT
# @see ROOT::EnableImplicitMT
# @see ROOT::DisableImplicitMT
# @see ROOT::IsImplicitMTEnabled
# @code
# with implicitMT( True ) :
# ...
# @endcode
def implicitMT ( enable = True ) :
"""Context manager to enable/disable implicit MT in ROOT
>>> with implicitMT( True ) :
...
- see ROOT::EnableImplicitMT
- see ROOT::DisableImplicitMT
- see ROOT::IsImplicitMTEnabled
"""
return ImplicitMT ( enable )
# =============================================================================
## Return the path to an executable which would be run if the given <code>cmd</code> was called.
# If no <code>cmd</code> would be called, return <code>None</code>.
# - <code>mode</code> is a permission mask passed to <code>os.access()</code>,
# by default determining if the file exists and executable.
# - When no <code>path</code> is specified, the results of <code>os.environ()</code> are used,
# returning either the <code>“PATH”</code> value or a fallback of <code>os.defpath</code>.
# - copied from <code>shutil</cdde> module
def local_which ( cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
# =============================================================================
try :
from shutil import which
except ImportError :
which = local_which
# =============================================================================
## get the command
# @code
# >>> if cmd_exists ( 'epstopdf' ) : ...
# @endcode
# @see https://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
def cmd_exists ( command ) :
"""Check the existence of certain command/executable
>>> if cmd_exists ( 'epstopdf' ) : ...
"""
return which ( command ) is not None
# =============================================================================
## @class VRange
# Helper looper over the values between vmin and vmax :
# @code
# for v in VRange ( vmin = 0 , vmax = 5 , n = 100 ) :
# ... print ( v )
# @endcode
class VRange(object) :
"""Helper looper over the values between vmin and vmax :
>>> for v in VRange ( vmin = 0 , vmax = 5 , n = 100 ) :
>>> ... print ( v )
"""
def __init__ ( self , vmin , vmax , n = 100 ) :
assert isinstance ( n , integer_types ) and 0 < n,\
'VRange: invalid N=%s/%s' % ( n , type ( n ) )
self.__vmin = vmin
self.__vmax = vmax
self.__n = n
@property
def vmin ( self ) :
"""``vmin'' : minimal value"""
return self.__vmin
@property
def vmax ( self ) :
"""``vmax'' : maximal value"""
return self.__vmax
@property
def n ( self ) :
"""``n'' : number of steps"""
return self.__n
def __len__ ( self ) : return self.__n + 1
def __iter__ ( self ) :
n = self.n
fn = 1.0 / float ( n )
for i in range ( n + 1 ) :
#
if 0 == i : yield self.vmin
elif n == i : yield self.vmax
else :
f2 = i * fn
f1 = 1 - f2
yield self.vmin * f1 + f2 * self.vmax
# =============================================================================
## loop over values between xmin and xmax
# @code
# for x in vrange ( xmin , xmax , 200 ) :
# print (x)
# @endcode
def vrange ( vmin , vmax , n = 100 ) :
""" Loop over range of values between xmin and xmax
>>> for v in vrange ( vmin , vmax , 200 ) :
... print (v)
"""
return VRange ( vmin , vmax , n )
# =============================================================================
## @class LRange
# Helper looper over the values between vmin and vmax using log-steps
# @code
# for v in LRange ( vmin = 1 , vmax = 5 , n = 100 ) :
# ... print ( v )
# @endcode
class LRange(VRange) :
"""Helper looper over the values between vmin and vmax using log-steps
>>> for v in LRange ( vmin = 1 , vmax = 5 , n = 100 ) :
>>> ... print ( v )
"""
def __init__ ( self , vmin , vmax , n = 100 ) :
assert 0 < vmin and 0 < vmax,\
'LRange: invalid non-positive vmin/ymax values: %s/%s' % ( vmin , vmax )
super ( LRange , self ).__init__ ( vmin , vmax , n )
self.__lmin = math.log10 ( self.vmin )
self.__lmax = math.log10 ( self.vmax )
@property
def lmin ( self ) :
"""``lmin'' : log10(minimal value)"""
return self.__lmin
@property
def lmax ( self ) :
"""``lmax'' : log10(maximal value)"""
return self.__lmax
def __iter__ ( self ) :
n = self.n
fn = 1.0 / float ( n )
for i in range ( n + 1 ) :
#
if 0 == i : yield self.vmin
elif n == i : yield self.vmax
else :
f2 = i * fn
f1 = 1 - f2
yield 10.0 ** ( self.__lmin * f1 + f2 * self.__lmax )
# =============================================================================
## loop over values between xmin and xmax in log-scale
# @code
# for x in log_range ( xmin , xmax , 200 ) :
# print (x)
# @endcode
def log_range ( vmin , vmax , n = 100 ) :
"""Loop over values between xmin and xmax in log-scale
>>> for x in log_range ( xmin , xmax , 200 ) :
>>> print (x)
| |
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.patches import Circle
import ephem
import sqlite3 as lite
from progressbar import ProgressBar
# Altitude and Azimuth of a single field at t (JD) in rad
def Fields_local_coordinate(Field_ra, Field_dec, t, Site):
# date and time
Site.date = t
curr_obj = ephem.FixedBody()
curr_obj._ra = Field_ra * np.pi / 180
curr_obj._dec = Field_dec * np.pi / 180
curr_obj.compute(Site)
altitude = curr_obj.alt
azimuth = curr_obj.az
return altitude, azimuth
def update_moon(t, Site) :
Moon = ephem.Moon()
Site.date = t
Moon.compute(Site)
X, Y = AltAz2XY(Moon.alt, Moon.az)
r = Moon.size / 3600 * np.pi / 180 *2
return X, Y, r, Moon.alt
def AltAz2XY(Alt, Az) :
X = np.cos(Alt) * np.cos(Az) * -1
Y = np.cos(Alt) * np.sin(Az)
#Y = Alt * 2/ np.pi
#X = Az / (2*np.pi)
return Y, -1*X
def visualize(Date, PlotID = 1,FPS = 15,Steps = 20,MP4_quality = 300, Name = "LSST Scheduler Simulator.mp4", showClouds = False):
# Import data
All_Fields = np.loadtxt("NightDataInLIS/Constants/UnlabelledFields.lis", unpack = True)
N_Fields = len(All_Fields[0])
Site = ephem.Observer()
Site.lon = -1.2320792
Site.lat = -0.517781017
Site.elevation = 2650
Site.pressure = 0.
Site.horizon = 0.
if showClouds:
Time_slots = np.loadtxt("NightDataInLIS/TimeSlots{}.lis".format(int(ephem.julian_date(Date))), unpack = True)
All_Cloud_cover = np.loadtxt("NightDataInLIS/Clouds{}.lis".format(int(ephem.julian_date(Date))), unpack = True)
#Initialize date and time
lastN_start = float(Date) -1; lastN_end = float(Date)
toN_start = float(Date); toN_end = float(Date) + 1
#Connect to the History data base
con = lite.connect('FBDE.db')
cur = con.cursor()
# Prepare to save in MP4 format
FFMpegWriter = animation.writers['ffmpeg']
metadata = dict(title='LSST Simulation', artist='Elahe', comment='Test')
writer = FFMpegWriter(fps=FPS, metadata=metadata)
#Progress bar initialization
pbar = ProgressBar()
# Initialize plot
Fig = plt.figure()
if PlotID == 1:
ax = plt.subplot(111, axisbg = 'black')
if PlotID == 2:
ax = plt.subplot(211, axisbg = 'black')
unobserved, Observed_lastN, Obseved_toN,\
ToN_History_line,\
uu,gg,rr,ii,zz,yy,\
last_10_History_line,\
Horizon, airmass_horizon, S_Pole,\
LSST,\
Clouds\
= ax.plot([], [], '*',[], [], '*',[], [], '*',
[], [], '*',
[], [], '*',[], [], '*',[], [], '*',
[], [], '*',[], [], '*',[], [], '*',
[], [], '-',
[], [], '-',[], [], '-',[], [], 'D',
[], [], 'o',
[], [], 'o')
ax.set_xlim(-1.5, 1.5)
ax.set_ylim(-1.5, 1.5)
ax.set_aspect('equal', adjustable = 'box')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Coloring
Horizon.set_color('white'); airmass_horizon.set_color('red')
S_Pole.set_markersize(3); S_Pole.set_markerfacecolor('red')
star_size = 4
unobserved.set_color('dimgray'); unobserved.set_markersize(star_size)
Observed_lastN.set_color('blue'); Observed_lastN.set_markersize(star_size)
Obseved_toN.set_color('chartreuse'); Obseved_toN.set_markersize(0)
uu.set_color('purple'); gg.set_color('green'); rr.set_color('red')
ii.set_color('orange'); zz.set_color('pink'); yy.set_color('deeppink')
Clouds.set_color('white'); Clouds.set_markersize(10);
Clouds.set_alpha(0.2); Clouds.set_markeredgecolor(None)
ToN_History_line.set_color('orange'); ToN_History_line.set_lw(.5)
last_10_History_line.set_color('gray'); last_10_History_line.set_lw(.5)
LSST.set_color('red'); LSST.set_markersize(8)
if PlotID == 2:
freqAX = plt.subplot(212)
cur.execute('SELECT N_visit, Last_visit, Second_last_visit, Third_last_visit, Fourth_last_visit From FieldsStatistics')
row = cur.fetchall()
N_visit = [x[0] for x in row]
Last_visit = [x[1] for x in row]
Second_last_visit = [x[2] for x in row]
Third_last_visit = [x[3] for x in row]
Fourth_last_visit = [x[4] for x in row]
initHistoricalcoverage = N_visit
for index, id in enumerate(All_Fields):
if Last_visit[index] > toN_start:
initHistoricalcoverage[index] -= 1
if Second_last_visit[index] > toN_start:
initHistoricalcoverage[index] -= 1
if Third_last_visit > toN_start:
initHistoricalcoverage[index] -= 1
covering,current_cover = freqAX.plot(All_Fields[0],initHistoricalcoverage,'-',[],[],'o')
freqAX.set_xlim(0,N_Fields)
freqAX.set_ylim(0,np.max(initHistoricalcoverage)+5)
covering.set_color('chartreuse'); covering.set_markersize(2)
current_cover.set_color('red'); current_cover.set_markersize(6)
cur.execute('SELECT Night_count, T_start, T_end FROM NightSummary WHERE T_start BETWEEN (?) AND (?)',(toN_start, toN_end))
row = cur.fetchone()
vID = row[0]
t_start = row[1]
t_end = row[2]
t = t_start
# Figure labels and fixed elements
Phi = np.arange(0, 2* np.pi, 0.05)
Horizon.set_data(1.01*np.cos(Phi), 1.01*np.sin(Phi))
ax.text(-1.3, 0, 'West', color = 'white', fontsize = 7)
ax.text(1.15, 0 ,'East', color = 'white', fontsize = 7)
ax.text( 0, 1.1, 'North', color = 'white', fontsize = 7)
airmass_horizon.set_data(np.cos(np.pi/4) * np.cos(Phi), np.cos(np.pi/4) * np.sin(Phi))
ax.text(-.3, 0.6, 'Acceptable airmass horizon', color = 'white', fontsize = 5, fontweight = 'bold')
Alt, Az = Fields_local_coordinate(180, -90, t, Site)
x, y = AltAz2XY(Alt,Az)
S_Pole.set_data(x, y)
ax.text(x+ .05, y, 'S-Pole', color = 'white', fontsize = 7)
# Observed last night fields
cur.execute('SELECT Field_id FROM Schedule WHERE ephemDate BETWEEN (?) AND (?)',(lastN_start, lastN_end))
row = cur.fetchall()
if row is not None:
F1 = [x[0] for x in row]
else:
F1 = []
# Tonight observation path
cur.execute('SELECT Field_id, ephemDate, filter FROM Schedule WHERE ephemDate BETWEEN (?) AND (?)',(toN_start, toN_end))
row = cur.fetchall()
if row[0][0] is not None:
F2 = [x[0] for x in row]
F2_timing = [x[1] for x in row]
F2_filtering = [x[2] for x in row]
else:
F2 = []; F2_timing = []; F2_filtering = []
# Sky elements
Moon = Circle((0, 0), 0, color = 'silver', zorder = 3)
ax.add_patch(Moon)
Moon_text = ax.text([], [], 'Moon', color = 'white', fontsize = 7)
with writer.saving(Fig, Name, MP4_quality) :
for t in pbar(np.linspace(t_start, t_end, num = Steps)):
# Find the index of the current time
time_index = 0
while t > F2_timing[time_index]:
time_index += 1
if showClouds:
Slot_n = 0
while t > Time_slots[Slot_n]:
Slot_n += 1
visit_index = 0
visited_field = 0
visit_index_u = 0; visit_index_g = 0; visit_index_r = 0; visit_index_i = 0; visit_index_z = 0; visit_index_y = 0
visit_filter = 'r'
# Object fields: F1)Observed last night F2)Observed tonight F3)Unobserved F4)Covered by clouds
F1_X = []; F1_Y = []; F2_X = []; F2_Y = []; F3_X = []; F3_Y = []; F4_X = []; F4_Y = []
# Filter coloring for tonight observation
U_X = []; U_Y = []; G_X = []; G_Y = []; R_X = []; R_Y = []; I_X = []; I_Y = []; Z_X = []; Z_Y = []; Y_X = []; Y_Y = []
# F1 coordinate:
for i in F1:
Alt, Az = Fields_local_coordinate(All_Fields[1,i-1], All_Fields[2,i-1], t, Site)
if Alt > 0:
X, Y = AltAz2XY(Alt,Az)
F1_X.append(X); F1_Y.append(Y)
# F2 coordinate:
for i,tau,filter in zip(F2, F2_timing, F2_filtering):
Alt, Az = Fields_local_coordinate(All_Fields[1,i-1], All_Fields[2,i-1], t, Site)
if Alt > 0:
X, Y = AltAz2XY(Alt,Az)
F2_X.append(X); F2_Y.append(Y)
if filter == 'u':
U_X.append(X); U_Y.append(Y)
if t >= tau:
visit_index_u = len(U_X) -1
elif filter == 'g':
G_X.append(X); G_Y.append(Y)
if t >= tau:
visit_index_g = len(G_Y) -1
elif filter == 'r':
R_X.append(X); R_Y.append(Y)
if t >= tau:
visit_index_r = len(R_Y) -1
elif filter == 'i':
I_X.append(X); I_Y.append(Y)
if t >= tau:
visit_index_i = len(I_Y) -1
elif filter == 'z':
Z_X.append(X); Z_Y.append(Y)
if t >= tau:
visit_index_z = len(Z_Y) -1
elif filter == 'y':
Y_X.append(X); Y_Y.append(Y)
if t >= tau:
visit_index_y = len(Y_Y) -1
if t >= tau:
visit_index = len(F2_X) -1
visited_field = i
visit_filter = filter
# F3 coordinate:
for i in range(0,N_Fields):
if True:
Alt, Az = Fields_local_coordinate(All_Fields[1,i], All_Fields[2,i], t, Site)
if Alt > 0:
X, Y = AltAz2XY(Alt,Az)
F3_X.append(X); F3_Y.append(Y)
# F4 coordinates
if showClouds:
for i in range(0,N_Fields):
if All_Cloud_cover[Slot_n,i] == 2 or All_Cloud_cover[Slot_n,i] == 1 or All_Cloud_cover[Slot_n,i] == -1:
Alt, Az = Fields_local_coordinate(All_Fields[1,i], All_Fields[2,i], t, Site)
if Alt > 0:
X, Y = AltAz2XY(Alt,Az)
F4_X.append(X); F4_Y.append(Y)
# Update plot
unobserved.set_data([F3_X,F3_Y])
Observed_lastN.set_data([F1_X,F1_Y])
Obseved_toN.set_data([F2_X[0:visit_index],F2_Y[0:visit_index]])
uu.set_data([U_X[0:visit_index_u],U_Y[0:visit_index_u]]); gg.set_data([G_X[0:visit_index_g],G_Y[0:visit_index_g]])
rr.set_data([R_X[0:visit_index_r],R_Y[0:visit_index_r]]); ii.set_data([I_X[0:visit_index_i],I_Y[0:visit_index_i]])
zz.set_data([Z_X[0:visit_index_z],Z_Y[0:visit_index_z]]); yy.set_data([Y_X[0:visit_index_y],Y_Y[0:visit_index_y]])
ToN_History_line.set_data([F2_X[0:visit_index], F2_Y[0:visit_index]])
last_10_History_line.set_data([F2_X[visit_index - 10: visit_index], F2_Y[visit_index - 10: visit_index]])
# telescope position and color
LSST.set_data([F2_X[visit_index],F2_Y[visit_index]])
if visit_filter == 'u':
LSST.set_color('purple')
if visit_filter == 'g':
LSST.set_color('green')
if visit_filter == 'r':
LSST.set_color('red')
if visit_filter == 'i':
LSST.set_color('orange')
if visit_filter == 'z':
LSST.set_color('pink')
if visit_filter == 'y':
LSST.set_color('deeppink')
Clouds.set_data([F4_X,F4_Y])
# Update Moon
X, Y, r, alt = update_moon(t, Site)
Moon.center = X, Y
Moon.radius = r
if alt > 0:
#Moon.set_visible(True)
Moon_text.set_visible(True)
Moon_text.set_x(X+.002); Moon_text.set_y(Y+.002)
else :
Moon.set_visible(False)
Moon_text.set_visible(False)
#Update coverage
if PlotID == 2:
Historicalcoverage = np.zeros(N_Fields)
for i,tau in zip(F2, F2_timing):
if tau <= t:
Historicalcoverage[i -1] += 1
else:
break
tot = Historicalcoverage + initHistoricalcoverage
current_cover.set_data(visited_field -1,tot[visited_field -1])
covering.set_data(All_Fields[0], tot)
#Observation statistics
leg = plt.legend([Observed_lastN, Obseved_toN],
['Visited last night', time_index])
for l in leg.get_texts():
l.set_fontsize(6)
date = ephem.date(t)
Fig.suptitle('Top view of the LSST site on {}, GMT'.format(date))
'''
# progress
perc= int(100*(t - t_start)/(t_end - t_start))
if perc <= 100:
print('{} %'.format(perc))
else:
print('100 %')
'''
#Save current frame
writer.grab_frame()
'''
Site = ephem.Observer()
Site.lon = -1.2320792
Site.lat = -0.517781017
Site.elevation = 2650
Site.pressure = 0.
Site.horizon = 0.
n_nights = 3 # number of the nights to be scheduled starting from 1st Jan. 2021
Date_start = ephem.Date('2015/6/28 12:00:00.00') # times are in UT
for i in range(n_nights):
Date = ephem.Date(Date_start + i) # times are in UT
# create animation
FPS = 10 # Frame per second
Steps | |
"""Module containing library functions for time manipulation.
Standard for time representation in this project is fractional days.
Dates are represented as modified Julian dates (mjd).
An mjd gives the number of days since midnight on November 17, 1858.
"""
import string
from math import ceil, floor
# Constant for converting Julian dates to modified Julian dates
MJD_BASELINE = 2400000.5
def is_leap_year(year):
"""Returns True if the year is a leap year, False otherwise.
Parameters
----------
year: int
The year to check.
Returns
-------
bool
Is the year a leap year?
"""
return (((year % 4 == 0) and ((year % 100 > 0) or (year % 400 == 0))))
def days_in_year(year):
"""Returns the number of days in a year.
Parameters
----------
year: int
The year to search.
Returns
-------
days : int
The number of days that year.
"""
days = 365
if (is_leap_year(year)):
days += 1
return(days)
def leap_years(year1, year2):
"""Returns the number of leap years between year1 and year2,
non-inclusive.
year1 and year2 must be integers, with year2 > year1
Parameters
----------
year1: int
The start year.
year2: int
The end year.
Returns
-------
int
The number of leap years between year1 and year2.
"""
# Find next years after year1 that are divisible by 4, 100, and 400
next_div4 = int(4 * ceil(year1/4.0))
next_div100 = int(100 * ceil(year1/100.0))
next_div400 = int(400 * ceil(year1/400.0))
# Now compute number of years between year1 and year2 that are
# evenly divisible by 4, 100, 400
div4_years = int(ceil((year2 - next_div4)/4.0))
div100_years = int(ceil((year2 - next_div100)/100.0))
div400_years = int(ceil((year2 - next_div400)/400.0))
# Leap years are years divisible by 4, except for years
# divisible by 100 that are not divisible by 400
return(div4_years - (div100_years - div400_years))
def integer_days(time):
"""Takes a time in fractional days and returns integer component.
Parameters
----------
time: float
The float time.
Returns
-------
int
The integer time.
"""
# If time is negative, integer days is a larger negative number
return(int(floor(time)))
def seconds_into_day(time):
"""Takes a time in fractional days and returns number of seconds since
the start of the current day.
Parameters
----------
time: float
The time as a float.
Returns
-------
int
The day's duration in seconds.
"""
return(int(round(86400.0 * (time % 1))))
def days_to_seconds(days):
"""Takes a time in fractional days and converts it into integer
seconds.
Parameters
----------
days: float
The number of days as a float.
Returns
-------
int
The number of seconds in as many days.
"""
return(int(round(86400 * days)))
def seconds_to_days(seconds):
"""Takes a time in integer seconds and converts it into fractional
days.
Parameters
----------
seconds: int
The number of seconds.
Returns
-------
float
The number of days as a float.
"""
return(seconds / 86400.0)
def round_to_second(time):
"""Rounds a time in days to the nearest second.
Parameters
----------
time: int
The number of days as a float.
Returns
-------
float
The number of seconds in as many days.
"""
return(round(time * 86400)/86400.0)
def display_time(time, force_hours=False):
"""Returns a string representation of a time specified in fractional
days.
Parameters
----------
time: float
The time as a float.
force_hours: bool
Force the hour calculation.
Returns
-------
str
The time as a string.
"""
# round to nearest second before extracting fields
time = round_to_second(time)
# if time is negative, print a minus sign and display absolute value
if (time < 0):
neg_string = '-'
time = abs(time)
else:
neg_string = ''
days = integer_days(time)
day_string = hour_string = min_string = ''
secs_within_day = seconds_into_day(time)
hours_within_day = int(secs_within_day / 3600)
secs_within_hour = secs_within_day % 3600
mins_within_hour = int(secs_within_hour / 60)
secs_within_min = secs_within_hour % 60
# Unless force_hours is specified, only print a field if it or a
# higher field is nonzero. Fill with leading zeros.
# The force_hours option is useful because Excel can get confused
# when reading short time strings.
if (days != 0):
day_string = '%s:' % ((str(days)).zfill(3))
if ((days != 0) or (hours_within_day > 0) or force_hours):
hour_string = '%s:' % ((str(hours_within_day)).zfill(2))
if ((days != 0) or (secs_within_day >= 60) or force_hours):
min_string = '%s:' % ((str(mins_within_hour)).zfill(2))
if ((days == 0) and (hours_within_day == 0) and (mins_within_hour == 0)):
# avoid zero fill when there are only seconds
sec_string = '%s' % ((str(secs_within_min)))
else:
sec_string = '%s' % ((str(secs_within_min)).zfill(2))
return(neg_string + day_string + hour_string + min_string + sec_string)
def time_from_string(time_string):
"""Takes a string of the form ddd:hh:mm:ss and converts it to fractional
days. All subfields above seconds are optional and may be omitted if the
subfield and all higher-order ones are zero.
Parameters
----------
time_string: str
The time as a string.
Returns
-------
float
The fractional days.
"""
# extract fields
fields = (string.split(time_string, ':'))
seconds = int(fields[-1])
num_fields = len(fields)
# default to zero if not provided
minutes = hours = days = 0
if (num_fields > 1):
minutes = int(fields[-2])
if (num_fields > 2):
hours = int(fields[-3])
if (num_fields > 3):
days = int(fields[-4])
total_seconds = seconds + 60 * minutes + 3600 * hours + 86400 * days
return(seconds_to_days(total_seconds))
def display_date(mjd):
"""Returns a string representation of the date represented by a
modified Julian date.
Parameters
----------
mjd: float
The modified julian day.
Returns
-------
str
The MJD as a string.
"""
# adjust to number of days since Dec. 31, 1857
int_days = int(floor(321.0 + mjd))
# seconds_in_day = seconds_into_day(mjd)
fractional_day = mjd % 1
# First compute year and day without allowing for leap years, then adjust
year = 1858 + int_days/365
day_of_year = int_days % 365 - leap_years(1858, year)
# handle case where leap year adjustment has made day negative
while (day_of_year < 1):
year -= 1
day_of_year = day_of_year + days_in_year(year)
year_string = '%s:' % (year)
return(year_string + display_time(day_of_year + fractional_day))
def compute_mjd(year, day_of_year, hour, minute, second):
"""Computes a modified Julian date from a date specified as a year,
day of year, hour, minute, and second.
Arguments should be integers.
Parameters
----------
year: int
The year.
day_of_year: int
The day.
hour: int
The hour.
minute: int
The minute.
second: int
The second.
Returns
-------
float
The modified julian day.
"""
fractional_days = (hour * 3600 + minute * 60 + second)/86400.0
mjd_years = year - 1859
num_leaps = leap_years(1858, year) # number of leap years since 1858
# Add 45 days from Nov. 17 to end of 1858
return((365*mjd_years)+num_leaps+45+(day_of_year-1)+fractional_days)
def mjd_from_string(time_string):
"""Takes a string of the form yyyy.ddd:hh:mm:ss and returns an mjd.
Parameters
----------
time_string: str
The MJD as a string.
Returns
-------
float
The modified julian day.
"""
years = int(time_string[0:4])
days = int(time_string[5:8])
hours = int(time_string[9:11])
minutes = int(time_string[12:14])
seconds = int(time_string[15:17])
return(compute_mjd(years, days, hours, minutes, seconds))
def mjd_to_jd(mjd):
"""Converts a modified Julian date to a true Julian date.
Parameters
----------
mjd: float
The modified julian day.
Returns
-------
float
The true Julian day.
"""
return(MJD_BASELINE + mjd)
def jd_to_mjd(jd):
"""Converts a Julian date to a modified Julian date.
Parameters
----------
jd: float
The true Julian day.
Returns
-------
float
The modified Julian day.
"""
return (jd - MJD_BASELINE)
class Interval(object):
"""Class to represent a simple temporal interval.
"""
def __init__(self, start, end):
"""Constructor for an interval.
Parameters
----------
start: float
The start time.
end: float
The end time.
"""
self.start = start
self.end = end
def __str__(self):
"""Returns a string representation of the interval."""
return('Interval: start: %s, end: %s' % (display_date(self.start),
display_date(self.end)))
def start_time(self):
"""Returns the start of the interval."""
return(self.start)
def end_time(self):
"""Returns the end of the interval."""
return(self.end)
def duration(self):
"""Returns the duration of an interval in fractional days."""
return(self.end_time() - self.start_time())
def temporal_relationship(self, time):
"""Returns the temporal relationship between an interval and an
absolute time.
Returns 'before' if the interval ends at or before the time,
'after' if the interval begins at or after the time,
'includes' if the time occurs during the interval.
Parameters
----------
time: float
The time.
Returns
-------
rel : str
The temporal relationship.
"""
if (self.end_time() <= time):
rel = 'before'
elif (self.start_time() >= time):
rel = | |
<filename>sdk/python/pulumi_gcp/compute/forwarding_rule.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ForwardingRuleArgs', 'ForwardingRule']
@pulumi.input_type
class ForwardingRuleArgs:
def __init__(__self__, *,
all_ports: Optional[pulumi.Input[bool]] = None,
allow_global_access: Optional[pulumi.Input[bool]] = None,
backend_service: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
ip_protocol: Optional[pulumi.Input[str]] = None,
is_mirroring_collector: Optional[pulumi.Input[bool]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
load_balancing_scheme: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network: Optional[pulumi.Input[str]] = None,
network_tier: Optional[pulumi.Input[str]] = None,
port_range: Optional[pulumi.Input[str]] = None,
ports: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
service_label: Optional[pulumi.Input[str]] = None,
subnetwork: Optional[pulumi.Input[str]] = None,
target: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ForwardingRule resource.
:param pulumi.Input[bool] all_ports: This field can be used with internal load balancer or network load balancer
when the forwarding rule references a backend service, or with the target
field when it references a TargetInstance. Set this to true to
allow packets addressed to any ports to be forwarded to the backends configured
with this forwarding rule. This can be used when the protocol is TCP/UDP, and it
must be set to true when the protocol is set to L3_DEFAULT.
Cannot be set if port or portRange are set.
:param pulumi.Input[bool] allow_global_access: If true, clients can access ILB from all regions.
Otherwise only allows from the local region the ILB is located at.
:param pulumi.Input[str] backend_service: A BackendService to receive the matched traffic. This is used only
for INTERNAL load balancing.
:param pulumi.Input[str] description: An optional description of this resource. Provide this property when
you create the resource.
:param pulumi.Input[str] ip_address: The IP address that this forwarding rule serves. When a client sends
traffic to this IP address, the forwarding rule directs the traffic to
the target that you specify in the forwarding rule. The
loadBalancingScheme and the forwarding rule's target determine the
type of IP address that you can use. For detailed information, refer
to [IP address specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications).
An address can be specified either by a literal IP address or a
reference to an existing Address resource. If you don't specify a
reserved IP address, an ephemeral IP address is assigned.
The value must be set to 0.0.0.0 when the target is a targetGrpcProxy
that has validateForProxyless field set to true.
For Private Service Connect forwarding rules that forward traffic to
Google APIs, IP address must be provided.
:param pulumi.Input[str] ip_protocol: The IP protocol to which this rule applies.
When the load balancing scheme is INTERNAL, only TCP and UDP are
valid.
Possible values are `TCP`, `UDP`, `ESP`, `AH`, `SCTP`, `ICMP`, and `L3_DEFAULT`.
:param pulumi.Input[bool] is_mirroring_collector: Indicates whether or not this load balancer can be used
as a collector for packet mirroring. To prevent mirroring loops,
instances behind this load balancer will not have their traffic
mirrored even if a PacketMirroring rule applies to them. This
can only be set to true for load balancers that have their
loadBalancingScheme set to INTERNAL.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels to apply to this forwarding rule. A list of key->value pairs.
:param pulumi.Input[str] load_balancing_scheme: This signifies what the ForwardingRule will be used for and can be
EXTERNAL, EXTERNAL_MANAGED, INTERNAL, or INTERNAL_MANAGED. EXTERNAL is used for Classic
Cloud VPN gateways, protocol forwarding to VMs from an external IP address,
and HTTP(S), SSL Proxy, TCP Proxy, and Network TCP/UDP load balancers.
INTERNAL is used for protocol forwarding to VMs from an internal IP address,
and internal TCP/UDP load balancers.
EXTERNAL_MANAGED is used for regional external HTTP(S) load balancers.
INTERNAL_MANAGED is used for internal HTTP(S) load balancers.
Default value is `EXTERNAL`.
Possible values are `EXTERNAL`, `EXTERNAL_MANAGED`, `INTERNAL`, and `INTERNAL_MANAGED`.
:param pulumi.Input[str] name: Name of the resource; provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with
RFC1035. Specifically, the name must be 1-63 characters long and match
the regular expression `a-z?` which means the
first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last
character, which cannot be a dash.
:param pulumi.Input[str] network: For internal load balancing, this field identifies the network that
the load balanced IP should belong to for this Forwarding Rule. If
this field is not specified, the default network will be used.
This field is only used for INTERNAL load balancing.
:param pulumi.Input[str] network_tier: The networking tier used for configuring this address. If this field is not
specified, it is assumed to be PREMIUM.
Possible values are `PREMIUM` and `STANDARD`.
:param pulumi.Input[str] port_range: This field is used along with the target field for TargetHttpProxy,
TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, TargetVpnGateway,
TargetPool, TargetInstance.
Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets
addressed to ports in the specified range will be forwarded to target.
Forwarding rules with the same [IPAddress, IPProtocol] pair must have
disjoint port ranges.
Some types of forwarding target have constraints on the acceptable
ports:
* TargetHttpProxy: 80, 8080
* TargetHttpsProxy: 443
* TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995,
1883, 5222
* TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995,
1883, 5222
* TargetVpnGateway: 500, 4500
:param pulumi.Input[Sequence[pulumi.Input[str]]] ports: This field is used along with internal load balancing and network
load balancer when the forwarding rule references a backend service
and when protocol is not L3_DEFAULT.
A single port or a comma separated list of ports can be configured.
Only packets addressed to these ports will be forwarded to the backends
configured with this forwarding rule.
You can only use one of ports and portRange, or allPorts.
The three are mutually exclusive.
You may specify a maximum of up to 5 ports, which can be non-contiguous.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] region: A reference to the region where the regional forwarding rule resides.
This field is not applicable to global forwarding rules.
:param pulumi.Input[str] service_label: An optional prefix to the service name for this Forwarding Rule.
If specified, will be the first label of the fully qualified service
name.
The label must be 1-63 characters long, and comply with RFC1035.
Specifically, the label must be 1-63 characters long and match the
regular expression `a-z?` which means the first
character must be a lowercase letter, and all following characters
must be a dash, lowercase letter, or digit, except the last
character, which cannot be a dash.
This field is only used for INTERNAL load balancing.
:param pulumi.Input[str] subnetwork: The subnetwork that the load balanced IP should belong to for this
Forwarding Rule. This field is only used for INTERNAL load balancing.
If the network specified is in auto subnet mode, this field is
optional. However, if the network is in custom subnet mode, a
subnetwork must be specified.
:param pulumi.Input[str] target: The URL of the target resource to receive the matched traffic.
The target must live in the same region as the forwarding rule.
The forwarded traffic must be of a type appropriate to the target
object.
"""
if all_ports is not None:
pulumi.set(__self__, "all_ports", all_ports)
if allow_global_access is not None:
pulumi.set(__self__, "allow_global_access", allow_global_access)
if backend_service is not None:
pulumi.set(__self__, "backend_service", backend_service)
if description is not None:
pulumi.set(__self__, "description", description)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if ip_protocol is not None:
pulumi.set(__self__, "ip_protocol", ip_protocol)
if is_mirroring_collector is not None:
pulumi.set(__self__, "is_mirroring_collector", is_mirroring_collector)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if load_balancing_scheme is not None:
pulumi.set(__self__, "load_balancing_scheme", load_balancing_scheme)
if name is not None:
pulumi.set(__self__, "name", name)
if network | |
'code.py')
sm = SchemaModule(code)
module = sm.import_module_for_migration()
self.check_imported_module(sm, 'test_package.pkg_dir.code', module)
self.check_related_attributes(sm)
# test deletion of imported schemas from sys.modules after importlib.import_module()
# ensure that the schema and its submodels get deleted from sys.modules
modules_that_sys_dot_modules_shouldnt_have = [
'test_package',
'test_package.pkg_dir',
'test_package.pkg_dir.code',
'test_package.module_for_testing',
]
for module in modules_that_sys_dot_modules_shouldnt_have:
self.assertTrue(module not in sys.modules)
def test_munging(self):
class A(obj_tables.Model):
id = SlugAttribute()
class Meta(obj_tables.Model.Meta):
attribute_order = ('id',)
name_a = A.__name__
munged_name_a = SchemaModule._munged_model_name(A)
self.assertTrue(munged_name_a.startswith(name_a))
self.assertTrue(munged_name_a.endswith(SchemaModule.MUNGED_MODEL_NAME_SUFFIX))
A.__name__ = munged_name_a
self.assertTrue(SchemaModule._model_name_is_munged(A))
self.assertEqual(SchemaModule._munged_model_name(A), munged_name_a)
self.assertEqual(SchemaModule._unmunged_model_name(A), name_a)
A.__name__ = SchemaModule._unmunged_model_name(A)
self.assertFalse(SchemaModule._model_name_is_munged(A))
SchemaModule._munge_all_model_names()
for model in get_models():
self.assertTrue(SchemaModule._model_name_is_munged(model))
SchemaModule._unmunge_all_munged_model_names()
for model in get_models():
self.assertFalse(SchemaModule._model_name_is_munged(model))
def check_related_attributes(self, schema_module):
# ensure that all RelatedAttributes point to Models contained within a module
module = schema_module.import_module_for_migration()
model_defs = schema_module._get_model_defs(module)
model_names = set(model_defs)
models = set(model_defs.values())
for model_name, model in model_defs.items():
for attr_name, local_attr in model.Meta.local_attributes.items():
if isinstance(local_attr.attr, RelatedAttribute):
related_class = local_attr.related_class
self.assertIn(related_class.__name__, model_names,
"{}.{} references a {}, but it's not a model name in module {}".format(
model_name, attr_name, related_class.__name__, module.__name__))
self.assertEqual(related_class, model_defs[related_class.__name__],
"{}.{} references a {}, but it's not the model in module {}: {} != {}".format(
model_name, attr_name, related_class.__name__, module.__name__,
id(related_class), id(model_defs[related_class.__name__])))
def test_import_module_for_migration(self):
# import copy of schema in single file from a new dir
copy_of_small_existing = copy_file_to_tmp(self, 'small_existing.py')
sm = SchemaModule(copy_of_small_existing)
module = sm.import_module_for_migration()
self.check_imported_module(sm, 'small_existing', module)
self.check_related_attributes(sm)
# importing self.existing_defs_path again returns same module from cache
self.assertEqual(module, sm.import_module_for_migration())
# test import from a package
self.multiple_import_tests_of_test_package(self.test_package)
# put the package in new dir that's not on sys.path
test_package_copy = temp_pathname(self, 'test_package')
shutil.copytree(self.test_package, test_package_copy)
self.multiple_import_tests_of_test_package(test_package_copy)
# import a module with a syntax bug
bad_module = os.path.join(self.tmp_dir, 'bad_module.py')
f = open(bad_module, "w")
f.write('bad python')
f.close()
sm = SchemaModule(bad_module)
with self.assertRaisesRegex(MigratorError, "cannot be imported and exec'ed"):
sm.import_module_for_migration()
# import existing wc_lang
sm = SchemaModule(self.wc_lang_schema_existing)
self.check_related_attributes(sm)
# import modified wc_lang
sm = SchemaModule(self.wc_lang_schema_modified)
self.check_related_attributes(sm)
# test a copy of wc_lang
wc_lang_copy = temp_pathname(self, 'wc_lang')
shutil.copytree(self.wc_lang_fixtures_path, wc_lang_copy)
for wc_lang_schema in ['core.py', 'core_modified.py']:
path = os.path.join(wc_lang_copy, wc_lang_schema)
sm = SchemaModule(path)
self.check_related_attributes(sm)
# test _check_imported_models errors exception
sm = SchemaModule(self.small_bad_related_path)
with self.assertRaisesRegex(MigratorError,
r"\w+\.\w+ references a \w+, but it's not the model in module \w+"):
sm.import_module_for_migration()
# import a module that's new and missing an attribute
module_missing_attr = os.path.join(self.tmp_dir, 'module_missing_attr.py')
f = open(module_missing_attr, "w")
f.write('# no code')
f.close()
with self.assertRaisesRegex(MigratorError,
"module in '.+' missing required attribute 'no_such_attribute'"):
SchemaModule(module_missing_attr).import_module_for_migration(required_attrs=['no_such_attribute'])
# test exception for bad mod_patterns type
copy_of_small_existing = copy_file_to_tmp(self, 'small_existing.py')
sm = SchemaModule(copy_of_small_existing)
with capturer.CaptureOutput(relay=False) as capture_output:
with self.assertRaisesRegex(MigratorError,
"mod_patterns must be an iterator that's not a string"):
sm.import_module_for_migration(debug=True, mod_patterns=3)
with self.assertRaisesRegex(MigratorError,
"mod_patterns must be an iterator that's not a string"):
sm.import_module_for_migration(debug=True, mod_patterns='hi mom')
# test debug of import_module_for_migration
wc_lang_copy_2 = temp_pathname(self, 'wc_lang')
shutil.copytree(self.wc_lang_fixtures_path, wc_lang_copy_2)
path = os.path.join(wc_lang_copy_2, 'core.py')
sm = SchemaModule(path)
with capturer.CaptureOutput(relay=False) as capture_output:
sm.import_module_for_migration(debug=True, print_code=True, mod_patterns=['obj_tables'])
expected_texts = [
'import_module_for_migration',
'SchemaModule.MODULES',
'importing wc_lang.core',
'Exceeded max',
'sys.modules entries matching RE patterns',
'obj_tables',
'sys.path:',
'new modules:',
'wc_lang.wc_lang']
for expected_text in expected_texts:
self.assertIn(expected_text, capture_output.get_text())
# ensure that modules which are not sub-modules of a package remain in sys.modules
# use module_not_in_test_package, which will be imported by test_package/pkg_dir/code.py
# 0: ensure that module_not_in_test_package is not in sys.modules
module_not_in_test_package = os.path.join(self.fixtures_path, 'module_not_in_test_package.py')
if 'module_not_in_test_package' in sys.modules:
del sys.modules['module_not_in_test_package']
self.assertFalse('module_not_in_test_package' in sys.modules)
# 1: prepare
# copy module_not_in_test_package.py to a new tmp dir T
tmp_path_to_module_not_in_test_package = copy_file_to_tmp(self, 'module_not_in_test_package.py')
# put T on sys.path
sys.path.append(os.path.dirname(tmp_path_to_module_not_in_test_package))
# 2: setup test_package to import module_not_in_test_package
# copy test_package to a new tmp dir that's not on sys.path
test_package_copy = temp_pathname(self, 'test_package')
shutil.copytree(self.test_package, test_package_copy)
# modify core.py in test_package to import module_not_in_test_package
core_path = os.path.join(test_package_copy, 'pkg_dir', 'code.py')
with open(core_path, 'a') as f:
f.write('\nimport module_not_in_test_package')
# 3: use import_module_for_migration to import test_package.pkg_dir.code, which will
# import module_not_in_test_package
SchemaModule(core_path).import_module_for_migration()
# 4: confirm that import_module_for_migration() left module_not_in_test_package in sys.modules
self.assertTrue('module_not_in_test_package' in sys.modules)
# 5: cleanup: remove module_not_in_test_package from sys.modules, & remove T from sys.path
del sys.modules['module_not_in_test_package']
del sys.path[sys.path.index(os.path.dirname(tmp_path_to_module_not_in_test_package))]
def test_check_imported_models(self):
for good_schema_path in [self.existing_defs_path, self.migrated_defs_path, self.wc_lang_schema_existing,
self.wc_lang_schema_modified]:
sm = SchemaModule(good_schema_path)
self.assertEqual(sm._check_imported_models(), [])
def test_get_model_defs(self):
sm = SchemaModule(self.existing_defs_path)
module = sm.import_module_for_migration()
models = sm._get_model_defs(module)
self.assertEqual(set(models), {'Test', 'DeletedModel', 'Property', 'Subtest', 'Reference'})
self.assertEqual(models['Test'].__name__, 'Test')
# test detection of a module with no Models
empty_module = os.path.join(self.tmp_dir, 'empty_module.py')
f = open(empty_module, "w")
f.write('# a module with no Models')
f.close()
sm = SchemaModule(empty_module)
with self.assertRaisesRegex(MigratorError, r"No subclasses of obj_tables\.Model found in '\S+'"):
sm.import_module_for_migration()
def test_str(self):
sm = SchemaModule(self.existing_defs_path)
for attr in ['module_path', 'abs_module_path', 'module_name']:
self.assertIn(attr, str(sm))
self.assertIn(self.existing_defs_path, str(sm))
def test_run(self):
sm = SchemaModule(self.existing_defs_path)
models = sm.run()
self.assertEqual(set(models), {'Test', 'DeletedModel', 'Property', 'Subtest', 'Reference'})
class TestMigrator(MigrationFixtures):
def setUp(self):
super().setUp()
# make a MigrationWrapper transformations with prepare_existing_models and modify_migrated_models that invert each other
class InvertingPropertyWrapper(MigrationWrapper):
def prepare_existing_models(self, migrator, existing_models):
# increment the value of Property models
for existing_model in existing_models:
if isinstance(existing_model, migrator.existing_defs['Property']):
existing_model.value += +1
def modify_migrated_models(self, migrator, migrated_models):
# decrement the value of Property models
for migrated_model in migrated_models:
if isinstance(migrated_model, migrator.existing_defs['Property']):
migrated_model.value += -1
inverting_property_wrapper = InvertingPropertyWrapper()
self.inverting_transforms_migrator = Migrator(self.existing_defs_path, self.existing_defs_path,
transformations=inverting_property_wrapper)
self.inverting_transforms_migrator.prepare()
def tearDown(self):
super().tearDown()
def test_validate_renamed_models(self):
migrator_for_error_tests = self.migrator_for_error_tests
self.assertEqual(migrator_for_error_tests._validate_renamed_models(), [])
self.assertEqual(migrator_for_error_tests.models_map,
{'TestExisting': 'TestMigrated', 'RelatedObj': 'NewRelatedObj', 'TestExisting2': 'TestMigrated2'})
# test errors
migrator_for_error_tests.renamed_models = [('NotExisting', 'TestMigrated')]
self.assertIn('in renamed models not an existing model',
migrator_for_error_tests._validate_renamed_models()[0])
self.assertEqual(migrator_for_error_tests.models_map, {})
migrator_for_error_tests.renamed_models = [('TestExisting', 'NotMigrated')]
self.assertIn('in renamed models not a migrated model',
migrator_for_error_tests._validate_renamed_models()[0])
migrator_for_error_tests.renamed_models = [
('TestExisting', 'TestMigrated'),
('TestExisting', 'TestMigrated')]
errors = migrator_for_error_tests._validate_renamed_models()
self.assertIn('duplicated existing models in renamed models:', errors[0])
self.assertIn('duplicated migrated models in renamed models:', errors[1])
def test_validate_renamed_attrs(self):
migrator_for_error_tests = self.migrator_for_error_tests
self.assertEqual(migrator_for_error_tests._validate_renamed_attrs(), [])
self.assertEqual(migrator_for_error_tests.renamed_attributes_map,
dict(migrator_for_error_tests.renamed_attributes))
# test errors
for renamed_attributes in [
[(('NotExisting', 'attr_a'), ('TestMigrated', 'attr_b'))],
[(('TestExisting', 'no_such_attr'), ('TestMigrated', 'attr_b'))]]:
migrator_for_error_tests.renamed_attributes = renamed_attributes
self.assertIn('in renamed attributes not an existing model.attribute',
migrator_for_error_tests._validate_renamed_attrs()[0])
self.assertEqual(migrator_for_error_tests.renamed_attributes_map, {})
for renamed_attributes in [
[(('TestExisting', 'attr_a'), ('NotMigrated', 'attr_b'))],
[(('TestExisting', 'attr_a'), ('TestMigrated', 'no_such_attr'))]]:
migrator_for_error_tests.renamed_attributes = renamed_attributes
self.assertIn('in renamed attributes not a migrated model.attribute',
migrator_for_error_tests._validate_renamed_attrs()[0])
for renamed_attributes in [
[(('NotExisting', 'attr_a'), ('TestMigrated', 'attr_b'))],
[(('TestExisting', 'attr_a'), ('NotMigrated', 'attr_b'))]]:
migrator_for_error_tests.renamed_attributes = renamed_attributes
self.assertRegex(migrator_for_error_tests._validate_renamed_attrs()[1],
"renamed attribute '.*' not consistent with renamed models")
migrator_for_error_tests.renamed_attributes = [
(('TestExisting', 'attr_a'), ('TestMigrated', 'attr_b')),
(('TestExisting', 'attr_a'), ('TestMigrated', 'attr_b'))]
self.assertIn('duplicated existing attributes in renamed attributes:',
migrator_for_error_tests._validate_renamed_attrs()[0])
self.assertIn('duplicated migrated attributes in renamed attributes:',
migrator_for_error_tests._validate_renamed_attrs()[1])
def test_get_mapped_attribute(self):
migrator_for_error_tests = self.migrator_for_error_tests
self.assertEqual(migrator_for_error_tests._get_mapped_attribute('TestExisting', 'attr_a'),
('TestMigrated', 'attr_b'))
self.assertEqual(migrator_for_error_tests._get_mapped_attribute(
self.TestExisting, self.TestExisting.Meta.attributes['id']), ('TestMigrated', 'id'))
self.assertEqual(migrator_for_error_tests._get_mapped_attribute('TestExisting', 'no_attr'),
(None, None))
self.assertEqual(migrator_for_error_tests._get_mapped_attribute('NotExisting', 'id'),
(None, None))
self.assertEqual(migrator_for_error_tests._get_mapped_attribute('RelatedObj', 'id'),
('NewRelatedObj', 'id'))
self.assertEqual(migrator_for_error_tests._get_mapped_attribute('RelatedObj', 'no_attr'),
(None, None))
def test_load_defs_from_files(self):
migrator = Migrator(self.existing_defs_path, self.migrated_defs_path)
migrator._load_defs_from_files()
self.assertEqual(set(migrator.existing_defs), {'Test', 'DeletedModel', 'Property', 'Subtest', 'Reference'})
self.assertEqual(set(migrator.migrated_defs), {'Test', 'NewModel', 'Property', 'Subtest', 'Reference'})
def test_get_migrated_copy_attr_name(self):
self.assertTrue(self.migrator._get_migrated_copy_attr_name().startswith(
Migrator.MIGRATED_COPY_ATTR_PREFIX))
def test_get_inconsistencies(self):
migrator_for_error_tests = self.migrator_for_error_tests
inconsistencies = migrator_for_error_tests._get_inconsistencies('NotExistingModel',
'NotMigratedModel')
self.assertRegex(inconsistencies[0], "existing model .* not found in")
self.assertRegex(inconsistencies[1],
"migrated model .* corresponding to existing model .* not found in")
class A(object):
pass
migrator_for_error_tests.existing_defs['A'] = A
migrator_for_error_tests.models_map['A'] = 'X'
inconsistencies = migrator_for_error_tests._get_inconsistencies('A', 'NewRelatedObj')
self.assertRegex(inconsistencies[0],
"type of existing model '.*' doesn't equal type of migrated model '.*'")
self.assertRegex(inconsistencies[1],
"models map says '.*' migrates to '.*', but _get_inconsistencies parameters say '.*' migrates to '.*'")
A.__name__ = 'foo'
self.NewRelatedObj.__name__ = 'foo'
inconsistencies = migrator_for_error_tests._get_inconsistencies('A', 'NewRelatedObj')
self.assertRegex(inconsistencies[1],
"name of existing model class '.+' not equal to its name in the models map '.+'")
self.assertRegex(inconsistencies[2],
"name of migrated model class '.+' not equal to its name in the models map '.+'")
# clean up
del migrator_for_error_tests.existing_defs['A']
del migrator_for_error_tests.models_map['A']
A.__name__ = 'A'
self.NewRelatedObj.__name__ = 'NewRelatedObj'
inconsistencies = migrator_for_error_tests._get_inconsistencies('TestExisting', 'TestMigrated')
self.assertRegex(inconsistencies[0],
r"existing attribute .+\..+ type .+ differs from its migrated attribute .+\..+ type .+")
inconsistencies = migrator_for_error_tests._get_inconsistencies('TestExisting2', 'TestMigrated2')
self.assertRegex(inconsistencies[0],
r".+\..+\..+ is '.+', which differs from the migrated value of .+\..+\..+, which is '.+'")
self.assertRegex(inconsistencies[1],
r".+\..+\..+ is '.+', which migrates to '.+', but it differs from .+\..+\..+, which is '.+'")
inconsistencies = self.migrator_for_error_tests_2._get_inconsistencies('TestExisting2',
'TestMigrated2')
self.assertRegex(inconsistencies[1],
r"existing model '.+' is not migrated, but is referenced by migrated attribute .+\..+")
def test_get_model_order(self):
migrator = self.migrator
migrator.prepare()
existing_model_order = migrator._get_existing_model_order(self.example_existing_model_copy)
migrated_model_order = migrator._migrate_model_order(existing_model_order)
expected_model_order = [migrator.migrated_defs[model]
for model in ['Test', 'Property', 'Subtest', 'Reference', 'NewModel']]
self.assertEqual(migrated_model_order, expected_model_order)
class NoSuchModel(obj_tables.Model):
pass
with self.assertRaisesRegex(MigratorError, "model 'NoSuchModel' not found in the model map"):
migrator._migrate_model_order([NoSuchModel])
# test ambiguous_sheet_names
class FirstUnambiguousModel(obj_tables.Model):
pass
class SecondUnambiguousModel(obj_tables.Model):
pass
# models with ambiguous sheet names
class TestModel(obj_tables.Model):
pass
class TestModels3(obj_tables.Model):
pass
class RenamedModel(obj_tables.Model):
pass
class NewModel(obj_tables.Model):
| |
<reponame>PRECISE/SMEDL<gh_stars>0
# Copyright (c) 2021 The Trustees of the University of Pennsylvania
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Types and operations for SMEDL expressions
"""
from enum import Enum
from smedl.parser.exceptions import TypeMismatch
class SmedlType(Enum):
"""Represents the valid data types for SMEDL state variables and params.
Enum members are given 2-tuples: (SMEDL type string, C type string). The
SMEDL type string becomes the value of the member. Thus, to create a
SmedlType from its string, do something like this:
SmedlType('int')
SmedlType('pointer')
The C type string can then be accessed using the c_type property:
SmedlType('float').c_type // Evaluates to 'double'
"""
def __new__(cls, smedl_type, c_type):
"""Create enum members using only the SMEDL type string as the value and
assigning the C type string to the c_type property"""
obj = object.__new__(cls)
obj._value_ = smedl_type
obj.c_type = c_type
return obj
def __str__(self):
"""Return the SMEDL type string"""
return self.value
INT = ('int', 'int')
FLOAT = ('float', 'double')
CHAR = ('char', 'char')
STRING = ('string', 'char *')
POINTER = ('pointer', 'void *')
OPAQUE = ('opaque', 'SMEDLOpaque')
def convertible_to(self, other):
"""Return True if this SmedlType can convert to the other SmedlType
(e.g. in assignment, parameters, etc.) and False if not."""
# All numeric types can convert between each other.
if (self in [SmedlType.INT, SmedlType.FLOAT, SmedlType.CHAR] and
other in [SmedlType.INT, SmedlType.FLOAT, SmedlType.CHAR]):
return True
# All other types are only compatible with themselves.
elif self is other:
return True
else:
return False
@classmethod
def inference(cls, t1, t2):
"""Return the type that should be inferred between the two SmedlType"""
if t1 is t2:
return t1
elif (t1 is cls.FLOAT and t2 in (cls.INT, cls.CHAR)) or (
t1 in (cls.INT, cls.CHAR) and t2 is cls.FLOAT):
return cls.FLOAT
elif (t1 is cls.INT and t2 is cls.CHAR) or (
t1 is cls.CHAR and t2 is cls.INT):
return cls.INT
# Useful in Jinja templates
def is_a(self, name):
"""Check if name matches the provided string"""
return self.name == name
# Notes on type checking:
# Types in expressions may be any of the SmedlTypes above, "null" if the value
# is a null pointer (compatible with POINTER and OPAQUE), and None if the value
# is a helper call (unknown, no type checking is done)
# Notes on parentheses:
# The code that initialized instances of these classes is responsible for
# determining whether parentheses are necessary around a particular expression
# and calling the parenthesize() method.
class Expression(object):
"""A SMEDL expression"""
def __init__(self, type_, expr_type):
# Represents the SMEDL type of the expression
self._type = type_
self._parens = False
# Needed by Jinja - Represents the type of *this object* (i.e. literal,
# event param, binary op, etc.)
self._expr_type = expr_type
@property
def type(self):
return self._type
@property
def parens(self):
return self._parens
@property
def expr_type(self):
return self._expr_type
def parenthesize(self):
"""In child classes that are not atoms, parenthesize the expression. But
by default, ignore."""
pass
def unary_type_check(self, op):
"""Check that the expression type is compatible with the given unary
operation ("+", "-", "!", or "~"). Return the resulting type if so,
raise TypeMismatch if not"""
if op in ["+", "-"] and self._type in [
SmedlType.INT, SmedlType.FLOAT, SmedlType.CHAR, None]:
return self.type
elif op == "~" and self._type in [SmedlType.INT, SmedlType.CHAR, None]:
return self.type
elif op == "!" and self._type in [
SmedlType.INT, SmedlType.CHAR, SmedlType.POINTER, None]:
return self._type
else:
raise TypeMismatch("Cannot use {} on expression of type {}"
.format(op, self._type))
def _arithmetic_type_check(self, other):
"""Check that the type of this expression is compatible with the other
for a binary arithmetic operation (+, -, *, %, /). This expression is
the left operand and the "other" is the right operand. Return the
resulting type, or raise TypeMismatch if not compatible."""
# If one or both operands are float and all are numbers, return float
if (self._type == SmedlType.FLOAT and other._type in [
SmedlType.INT, SmedlType.FLOAT, SmedlType.CHAR]) or (
other._type == SmedlType.FLOAT and self._type in [
SmedlType.INT, SmedlType.CHAR]):
return SmedlType.FLOAT
# If one or both operands are None and rest are numbers, return None
elif (self._type is None and other._type in [
SmedlType.INT, SmedlType.FLOAT, SmedlType.CHAR, None]) or (
other._type is None and self._type in [
SmedlType.INT, SmedlType.FLOAT, SmedlType.CHAR]):
return None
# If one or both operands are int and all are int or char, return int
elif (self._type == SmedlType.INT and other._type in [
SmedlType.INT, SmedlType.CHAR]) or (
other._type == SmedlType.INT and self._type == SmedlType.CHAR):
return SmedlType.INT
# If both operands are char, return char
elif self._type == SmedlType.CHAR and other._type == SmedlType.CHAR:
return SmedlType.CHAR
# Otherwise, type mismatch
else:
raise TypeMismatch()
def _bitwise_type_check(self, other):
"""Check that the type of this expression is compatible with the other
for a binary bitwise operation (<<, >>, &, ^, |). This expression is
the left operand and the "other" is the right operand. Return the
resulting type, or raise TypeMismatch if not compatible."""
# If one/both operands are None and the rest are int/char, return None
if (self._type is None and other._type in [
SmedlType.INT, SmedlType.CHAR, None]) or (
other._type is None and self._type in [
SmedlType.INT, SmedlType.CHAR]):
return None
# If one or both operands are int and all are int or char, return int
elif (self._type == SmedlType.INT and other._type in [
SmedlType.INT, SmedlType.CHAR]) or (
other._type == SmedlType.INT and self._type == SmedlType.CHAR):
return SmedlType.INT
# If both operands are char, return char
elif self._type == SmedlType.CHAR and other._type == SmedlType.CHAR:
return SmedlType.CHAR
# Otherwise, type mismatch
else:
raise TypeMismatch()
def _comparison_type_check(self, other):
"""Check that the type of this expression is compatible with the other
for a binary comparison or boolean operation (<, <=, >, >=, &&, ||).
These are fundamentally different operations, however, their type
requirements happen to be the same. This expression is
the left operand and the "other" is the right operand. Return the
resulting type, or raise TypeMismatch if not compatible."""
# If both operands are numbers or None, return int
if (self._type in [
SmedlType.INT, SmedlType.FLOAT, SmedlType.CHAR, None] and
other._type in [
SmedlType.INT, SmedlType.FLOAT, SmedlType.CHAR, None]):
return SmedlType.INT
# Otherwise, type mismatch
else:
raise TypeMismatch()
def _equality_type_check(self, other):
"""Check that the type of this expression is compatible with the other
for a binary equality operation (==, !=). This expression is
the left operand and the "other" is the right operand. Return the
resulting type, or raise TypeMismatch if not compatible."""
# If either operand is None, other operand can be anything. Return int
if self._type is None or other._type is None:
return SmedlType.INT
# If both operands are numbers, return int
elif (self._type in [SmedlType.INT, SmedlType.FLOAT, SmedlType.CHAR]
and other._type in [
SmedlType.INT, SmedlType.FLOAT, SmedlType.CHAR]):
return SmedlType.INT
# If either operand is "null", the other can be "null" or pointer.
# Return int
elif (self._type == "null" and other._type in [
SmedlType.POINTER, "null"]) or (
other._type == "null" and self._type == SmedlType.POINTER):
return SmedlType.INT
# If both operands are the same type, return int
elif self._type == other._type:
return SmedlType.INT
# Otherwise, type mismatch
else:
raise | |
import discord
from discord.ext import commands
import asyncio
import aiohttp
from lxml import html
from datetime import datetime
import json
import re
# because fuck it why not.
timedict = {"0":"🕛","030":"🕧","1":"ðŸ•", "130":"🕜", "2":"🕑", "230":"ðŸ•",
"3":"🕒", "330":"🕞", "4":"🕓", "430":"🕟", "5":"🕔", "530":"🕠",
"6":"🕕", "630":"🕡", "7":"🕖", "730":"🕢", "8":"🕗", "830":"🕣",
"9":"🕘", "930":"🕤", "10":"🕙", "1030":"🕥", "11":"🕚", "1130":"🕦",
"12":"🕛", "1230":"🕧"}
class Live:
""" Get live scores for leagues worldwide """
def __init__(self,bot):
self.bot = bot
self.scoreson = True
self.bot.scorechecker = bot.loop.create_task(self.ls())
self.matchcache = {}
def __unload(self):
self.bot.scorechecker.cancel()
self.scoreson = False
# Live Scores task.
async def ls(self):
await self.bot.wait_until_ready()
msglist = []
numservs = 0
while self.scoreson:
# Get date string
tf = "Fixtures, results, and live scores for "
tf += "**%a %d %b %Y** (Refreshed at **%H:%M:%S**)"
today = datetime.now().strftime(tf)
# If we have nothing in msg list, clean channel, create messages.
if msglist == []:
numservs = 0
for i in self.bot.config:
if "scorechannel" in self.bot.config[i]:
ch = self.bot.config[i]["scorechannel"]
if ch is None:
continue
sc = self.bot.get_channel(int(ch))
await sc.purge()
numservs += 1
# Shield from crashes.
try:
c = self.bot.session
url = "http://www.bbc.co.uk/sport/football/scores-fixtures"
async with c.get(url) as resp:
if resp.status != 200:
await asyncio.sleep(60)
continue
tree = html.fromstring(await resp.text())
except Exception as e:
print(f"Livescore channel: Ignored exception {e}")
await asyncio.sleep(60)
continue
# Get each league parent element
sections = tree.xpath('.//div[contains(@class,"gel-layout--center")]/div/div[3]/div/div')
outcomps = [f"{today}\n"]
self.matchlist = {}
for i in sections: # for each league on page
try:
comp = i.xpath('.//h3/text()')[0]
except IndexError:
comp = prevcomp
else:
prevcomp = comp
group = "".join(i.xpath('.//h4/text()'))
if group:
comp = f"**{comp}** ({group})"
else:
comp = f"**{comp}**"
self.matchlist[comp] = {}
matches = i.xpath('.//li')
for j in matches:
url = "".join(j.xpath('.//a/@href'))
xp = './/abbr[contains(@class,"team-name")]/@title'
h = j.xpath(xp)[0]
a = j.xpath(xp)[1]
notes = j.xpath('.//aside/span//text()')
time = j.xpath('.//span[contains(@class,"time")]/text()')
scor = j.xpath('.//span[contains(@class,"number--home")]/text()|.//span[contains(@class,"number--away")]/text()')
if notes:
notes = f"`ℹ {''.join(notes)}`"
if len(time) == 1: # Fuck it let's be daft and convert the times to the nearest emoji
time = time[0]
left,mid,right = time.partition(":")
left = int(left)
right = int(right)
if -1 < right < 15:
right = ""
elif 15 < right < 45:
right = "30"
else:
right = ""
left += 1
if left > 12:
left += -12
newtime = f"{str(left)}{right}"
precol = f"`{timedict[newtime]}{time}`"
midcol = "v"
if len(scor) == 2:
precol = ""
midcol = " - ".join(scor)
if midcol == "P - P":
precol = "`â›”PP`"
miodcol = "v"
if "ET" in notes:
precol = "`âš½ET`"
notes.replace("ET","")
if notes == "`FT`":
notes = ""
precol = "`✅FT`"
elif "FT" in notes:
notes = notes.replace("FT"," ")
precol = "`✅FT`"
elif "AET" in notes:
notes = notes.replace("AET"," ")
precol = "`âš½AET`"
if "HT" in notes:
notes = notes.replace("HT","")
precol = "`â¸HT`"
if "min" in notes:
regex = re.search(r"\d+\smins?",notes)
notes = notes.replace(regex.group(),"")
if "`âš½ET`" in precol:
precol = f"`âš½ET {regex.group()}`"
else:
precol = f"`âš½{regex.group()}`"
if "' +" in notes:
regex = re.search(r"\d+\'\s\+\d+",notes)
notes = notes.replace(regex.group(),"")
precol= f"`âš½{regex.group()}`"
if len(notes) < 6:
notes = ""
self.matchlist[comp][h] = {"timenow":precol,"midcol":midcol,"away":a,
"notes":notes,"league":comp,"url":url}
count = 0
# Send to ticker for update check.
self.bot.loop.create_task(self.ticker())
for i in self.matchlist:
outcomp = f"{i}\n"
for j in self.matchlist[i]:
count += 1
outcomp += f"{self.matchlist[i][j]['timenow']} {j} {self.matchlist[i][j]['midcol']} {self.matchlist[i][j]['away']} {self.matchlist[i][j]['notes']}\n"
outcomp += "\n"
outcomps.append(outcomp)
outlist = []
newchunk = ""
for i in outcomps:
if len(i) + len(newchunk) < 2000:
newchunk += i
else:
if len(i) > 2000:
outlist.append(newchunk)
outlist.append(i[:1999])
outlist.append(i[2000:])
newchunk = ""
else:
outlist.append(newchunk)
newchunk = i
outlist.append(newchunk)
# if previous messages exist to edit:
if msglist != []:
if (len(outlist) * numservs) != len(msglist):
print(f"Old: {len(outlist)} New: {len(msglist)}")
msglist = []
for i in self.bot.config:
chan = self.bot.config[i]["scorechannel"]
ch = self.bot.get_channel(int(chan))
if ch is not None:
await ch.purge()
for j in outlist:
m = await ch.send(j)
msglist.append(m)
else:
outlist = outlist * numservs
editlist = list(zip(msglist,outlist))
for i in editlist:
# Edit if different.
if i[0].content != i[1]:
try:
await i[0].edit(content=i[1])
except discord.HTTPException as e:
print(f"LS edit failed, {e}")
pass
else:
for j in self.bot.config:
if not "scorechannel" in self.bot.config[j]:
continue
id = self.bot.config[j]["scorechannel"]
if id is None:
continue
else:
print(id)
sc = self.bot.get_channel(int(id))
for i in outlist:
if sc is not None:
m = await sc.send(i)
msglist.append(m)
await asyncio.sleep(60)
# Ticker Task
async def ticker(self):
# Filter Down to wanted leagues by checking if
# Wanted league is in the dict's keys.
filtered = {}
for (k,v) in self.matchlist.items():
if any(i in k for i in ["Champions League","Premier League"]) and not any(i in k for i in ["Women","Welsh","Russian"]):
filtered.update({k:v})
# Flatten for iteration.
flattened = {}
for k,v in filtered.items():
flattened.update(v)
# First iteration only stores.
if not self.matchcache:
self.matchcache = flattened
return
# End early if no changes.
if flattened == self.matchcache:
return
for i in flattened:
try:
if not flattened[i]["midcol"] == self.matchcache[i]["midcol"]:
# Avoid re-sending by verifying score increase..
os = self.matchcache[i]["midcol"].split("-")
ns = flattened[i]["midcol"].split("-")
if not os[0].strip() < ns[0].strip() and not os[1].strip() < ns[1].strip():
self.matchcache = flattened
return
out = self.bot.get_channel(332163136239173632)
e = discord.Embed()
if "0 - 0" in flattened[i]['midcol']:
e.title = "Kick Off"
e.color = 0x00ffff
else:
e.title = "Goal"
e.color = 0x00ff00
async with self.bot.session.get(f"http://www.bbc.co.uk{flattened[i]['url']}") as resp:
tree = html.fromstring(await resp.text()) # pls fix?
hg = "".join(tree.xpath('.//ul[contains(@class,"fixture__scorers")][1]//text()'))
hg = hg.replace("minutes","").replace(" )",")")
ag = "".join(tree.xpath('.//ul[contains(@class,"fixture__scorers")][2]//text()'))
ag = ag.replace("minutes","").replace(" )",")")
if hg:
hg = f"*{hg}*"
if ag:
ag = f"*{ag}*"
e.description = f"{i} {flattened[i]['midcol']} {flattened[i]['away']}\n{hg}\n{ag}"
e.set_footer(text=f"{flattened[i]['timenow']}, {flattened[i]['league']}".replace("*","").replace("`",""))
if "FT" in flattened[i]['timenow']:
e.title = "Full Time"
e.color = 0x00ffff
self.matchcache = flattened
print(f"Dispatched Ticker Event: {i} {flattened[i]['midcol']} {flattened[i]['away']}\n{hg}\n{ag}")
await out.send(embed=e)
except KeyError:
self.matchcache = ""
return
# Save for next comparison.
self.matchcache = flattened
@commands.group(invoke_without_command=True)
async def scores(self,ctx,*,league="Premier League"):
""" Get the current scores from a league (default is Premier League)"""
outcomps = []
for i in self.matchlist:
if league.lower() in i.lower():
outcomp = f"{i}\n"
for j in self.matchlist[i]:
outcomp += f"{self.matchlist[i][j]['timenow']} {j} {self.matchlist[i][j]['midcol']} {self.matchlist[i][j]['away']} {self.matchlist[i][j]['notes']}\n"
outcomp += "\n"
outcomps.append(outcomp)
outlist = []
newchunk = ""
for i in outcomps:
if len(i) + len(newchunk) < 2000:
newchunk += i
else:
if len(i) > 2000:
outlist.append(newchunk)
outlist.append(i[:1999])
outlist.append(i[2000:])
newchunk = ""
else:
outlist.append(newchunk)
newchunk = i
outlist.append(newchunk)
if outlist:
for i in outlist:
await ctx.send(i)
else:
await ctx.send(f"Couldn't find scores for {league}")
@scores.command(name="reload")
@commands.has_permissions(manage_guild=True)
async def _reload(self,ctx):
self.bot.scorechecker.cancel()
self.bot.scorechecker = self.bot.loop.create_task(self.ls())
await ctx.send("Restarted score tracker.")
@commands.group(invoke_without_command=True,aliases=["ls"])
@commands.is_owner()
async def livescores(self,ctx):
""" Check the status of hte live score channel """
e = discord.Embed(title="Live Score Channel Status")
e.set_thumbnail(url=ctx.guild.icon_url)
if self.scoreson:
e.description = "```diff\n+ Enabled```"
e.color=0x00ff00
else:
e.description = "```diff\n- Disabled```"
e.color = 0xff0000
if "scorechannel" in self.bot.config[str(ctx.guild.id)]:
ch = self.bot.config[str(ctx.guild.id)]["scorechannel"]
chan = self.bot.get_channel(ch)
chanval = chan.mention
else:
chanval = "None Set"
e.color = 0xff0000
e.add_field(name=f"output Channel",value=chanval,inline=False)
if self.bot.is_owner(ctx.author):
x = self.bot.scorechecker._state
if x == "PENDING":
v = "✅ Task running."
elif x == "CANCELLED":
e.color = 0xff0000
v = "âš Task Cancelled."
elif x == "FINISHED":
e.color = 0xff0000
self.bot.scorechecker.print_stack()
v = "≠Task Finished"
z = self.bot.scorechecker.exception()
else:
v = f"â” `{self.bot.scorechecker._state}`"
e.add_field(name="Debug Info",value=v,inline=False)
try:
e.add_field(name="Exception",value=z,inline=False)
except NameError:
pass
await ctx.send(embed=e)
@livescores.command(name="on")
@commands.has_permissions(manage_messages=True)
async def scores_on(self,ctx):
""" Turn the Live score channel back on """
if not self.scoreson:
self.scoreson = True
await ctx.send("âš½ Live score channel has been enabled.")
self.bot.scorechecker = bot.loop.create_task(self.ls())
elif self.bot.scorechecker._state == ["FINISHED","CANCELLED"]:
await ctx.send(f"âš½ Restarting {self.bot.scorechecker._state} task after exception {self.bot.scorechecker.exception()}.")
self.bot.scorechecker = bot.loop.create_task(self.ls())
else:
await ctx.send("âš½ Live score channel already enabled.")
@livescores.command(name="off")
@commands.has_permissions(manage_messages=True)
async def scores_off(self,ctx):
""" Turn off the live score channel """
if self.scoreson:
self.scoreson = False
await ctx.send("âš½ Live score channel has been disabled.")
else:
await ctx.send("âš½ Live score channel already disabled.")
@livescores.command(name="unset")
@commands.has_permissions(manage_channels=True)
async def _unset(self,ctx):
""" Unsets the live score channel for this server """
self.bot.config[str(ctx.guild.id)]["scorechannel"] = None
with await self.bot.configlock:
with open('config.json',"w",encoding='utf-8') as f:
json.dump(self.bot.config,f,ensure_ascii=True,
sort_keys=True,indent=4, separators=(',',':'))
await ctx.send(f"Live score channel for {ctx.guild.name} set to None")
@livescores.command(name="set")
@commands.has_permissions(manage_channels=True)
async def _set(self,ctx):
""" Sets the live score channel for this server """
self.bot.config[f"{ctx.guild.id}"].update({"scorechannel":ctx.channel.id})
with await self.bot.configlock:
with open('config.json',"w",encoding='utf-8') as f:
json.dump(self.bot.config,f,ensure_ascii=True,
sort_keys=True,indent=4, separators=(',',':'))
await ctx.send(f"Live score channel for {ctx.guild.name} set to {ctx.channel.mention}")
async def fetch_game(self,ctx,team):
async with self.bot.session.get("http://www.bbc.co.uk/sport/football/scores-fixtures") as resp:
if resp.status != 200:
await m.edit(content=f"HTTP Error: {resp.status}")
return None
# We convert to lower case because fuck "USA".
mystr = await resp.text()
mystr = mystr.lower()
tree = html.fromstring(mystr)
# Search for the team in links.
node = tree.xpath(f".//li/a[.//abbr[contains(@title,'{team.lower()}')]]")
if not node:
await ctx.send("Could not find specified team")
return
elif len(node) > 0:
# If multiple nodes we just take the first cunt.
node = node[0]
return f"http://www.<EMAIL>{node.xpath('./@href')[0]}"
@commands.command(aliases=["substitutes","bench","lineup","lineups"])
async def subs(self,ctx,*,team="Newcastle"):
""" Show subs & lineups for a team (default is Newcastle)'s current game """
team = team.title()
m = await ctx.send(f"Searching for lineups for {team}")
with ctx.typing():
# Locate Game
link = await self.fetch_game(ctx,team)
async with self.bot.session.get(link) as resp:
if resp.status != 200:
await m.edit(content=f"HTTP Error accessing {link}: {resp.status}")
print(resp.status)
return None
await m.edit(content=f"Fetching lineups from {link}")
tree = html.fromstring(await resp.text())
home = tree.xpath('//abbr/@title')[0]
away = tree.xpath('//abbr/@title')[1]
homex = tree.xpath('.//ul[@class="gs-o-list-ui gs-o-list-ui--top-no-border gel-pica"][1]/li')[:11]
awayx = tree.xpath('.//ul[@class="gs-o-list-ui gs-o-list-ui--top-no-border gel-pica"][1]/li')[11:]
async def parse_players(inputlist):
out = []
for i in inputlist:
player = i.xpath('.//span[2]/abbr/span/text()')[0]
infos = "".join(i.xpath('.//span[2]/i/@class'))
infos = "".join(i.xpath('.//span[2]/i/@class'))
infotime = "".join(i.xpath('.//span[2]/i/span/text()'))
infotime = infotime.replace('Booked at ','')
infotime = infotime.replace('mins','\'')
infos = infos.replace('sp-c-booking-card sp-c-booking-card--rotate sp-c-booking-card--yellow gs-u-ml','\💛')
infos = infos.replace('booking-card booking-card--rotate booking-card--red gel-ml','\🔴')
subinfo = i.xpath('.//span[3]/span//text()')
subbed = subinfo[1] if subinfo else ""
subtime = subinfo[3].strip() if subinfo else ""
if subbed:
subbed = f"\â™» {subbed} {subtime}"
if infos:
if subbed:
thisplayer = f"**{player}** ({infos}{infotime}, {subbed})"
else:
thisplayer = f"**{player}** ({infos}{infotime})"
else:
if subbed:
thisplayer = f"**{player}** ({subbed})"
else:
thisplayer = f"**{player}**"
out.append(thisplayer)
return out
homexi = await parse_players(homex)
awayxi = await parse_players(awayx)
# Subs
subs = tree.xpath('//ul[@class="gs-o-list-ui gs-o-list-ui--top-no-border gel-pica"][2]/li/span[2]/abbr/span/text()')
sublen = int(len(subs)/2)
homesubs = [f"*{i}*" for i in subs[:sublen]]
homesubs = ", ".join(homesubs)
awaysubs = [f"*{i}*" for i in subs[sublen:]]
awaysubs = ", ".join(awaysubs)
# Generate Embed
e = discord.Embed()
e.title = f"Lineups for {home} v {away}"
e.url = link
e.color = 0xffdf43
homesquad = ", ".join(homexi) + f"\n\nSubstitutes:\n{homesubs}"
awaysquad = ", ".join(awayxi) + f"\n\nSubstitutes:\n{awaysubs}"
e.add_field(name=f"{home}",value=homesquad)
e.add_field(name=f"{away}",value=awaysquad)
e.set_thumbnail(url="http://newsimg.bbc.co.uk/media/images/67165000/jpg/_67165916_67165915.jpg")
await m.delete()
await ctx.send(embed=e)
@commands.command(aliases=["livestats"])
async def stats(self,ctx,*,team="Newcastle"):
team = team.title()
""" Get the current stats for a team's game (default is Newcastle) """
with ctx.typing():
link = await self.fetch_game(ctx,team)
m = await ctx.send(f"Found match: <{link}>, parsing...")
async with self.bot.session.get(link) as resp:
if resp.status != 200:
await ctx.send(content=f"HTTP Error accessing this match's page: Code {resp.status}")
tree = html.fromstring(await resp.text())
teams = tree.xpath('//abbr/@title')
try:
home = self.bot.teams[teams[0]]['shortname']
except KeyError:
home = teams[0]
try:
away = self.bot.teams[teams[1]]['shortname']
except KeyError:
away = teams[1]
homegoals = "".join(tree.xpath('.//ul[contains(@class,"fixture__scorers")][1]//text()')).replace(" minutes", "")
awaygoals = "".join(tree.xpath('.//ul[contains(@class,"fixture__scorers")][2]//text()')).replace(" minutes", "")
time = "".join(tree.xpath('//span[@class="fixture__status-wrapper"]/span/span/text()'))
time = time.replace(' mins','th minute')
comp = "".join(tree.xpath('//span[contains (@class,"fixture__title")]/text()'))
statlookup = tree.xpath("//dl[contains(@class,'percentage-row')]")
homestats = awaystats = stats = ""
score = " - ".join(tree.xpath("//span[@class='fixture__block']//text()")[0:2])
for i in statlookup:
stats += f"{''.join(i.xpath('.//dt/text()'))}\n"
homestats += f"{''.join(i.xpath('.//dd[1]/span[2]/text()'))}\n"
awaystats += f"{''.join(i.xpath('.//dd[2]/span[2]/text()'))}\n"
try:
homestats += f"[{self.bot.teams[teams[0]]['subreddit'].replace('https://www.reddit.com','')}]({self.bot.teams[teams[0]]['subreddit']})"
awaystats += f"[{self.bot.teams[teams[1]]['subreddit'].replace('https://www.reddit.com','')}]({self.bot.teams[teams[1]]['subreddit']})"
stats += "Subreddit"
except KeyError:
pass
e = discord.Embed(title=f"Match Stats Card",url=link,color=0xffdf43)
e.description = ""
try:
ven = self.bot.teams[teams[0]]['stadium']
vlink = self.bot.teams[teams[0]]['stadlink']
e.description = f"**Venue:** [{ven}]({vlink})"
except:
pass
if homestats:
e.add_field(name=home,value=homestats,inline=True)
else:
print(homestats)
e.description += f"\n | |
<gh_stars>100-1000
import abc
import logging
import re
import time
from collections import defaultdict
import numpy as np
import pandas as pd
from diamond.solvers.repeated_block_diag import RepeatedBlockDiagonal
from scipy import sparse
from future.utils import iteritems
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
class GLM(object):
"""
Binary or cumulative logistic regression model with arbitrary
crossed random effects and known covariance
"""
__metaclass__ = abc.ABCMeta
def __init__(self, train_df, priors_df, copy=False, test_df=None):
r""" Initialize a diamond model
Args:
train_df (DataFrame): DataFrame to estimate the model parameters
priors_df (DataFrame): Covariance matrix to use for regularization.
Format is | group | var1 | var2 | vcov |
where group represents the grouping factor, var1 and var2
specify the row and column of the covariance matrix,
and vcov is a scalar for that entry of the covariance matrix.
Note that if var2 is NULL, vcov is
interpreted as the diagonal element of the covariance
matrix for var1
copy (boolean): Make a copy of train_df. If False, new columns
will be created and the index will be reset.
test_df (DataFrame): This is used to make predictions.
Returns:
Object (GLM)
"""
self.solver = None # solver will be set by child classes
self.train_df = train_df.copy(deep=True) if copy else train_df
self.test_df = test_df
self.priors_df = priors_df
self.variances = None
# set by self.parse_formula
self.response = None
self.main_effects = []
self.groupings = defaultdict(list)
self.grouping_factors = []
self.group_levels = {}
self.num_levels = {}
self.total_num_interactions = 0
self.num_main = 0
# set by self.create_penalty_matrix
self.sparse_inv_covs = {}
self.fit_order = []
# set by self.create_design_matrix
self.level_maps = {}
self.main_map = None
self.inter_maps = {}
self.main_design = None
self.grouping_designs = {}
# set by self.fit
self.fit_kwargs = None
self.effects = {}
self.results = None
self.results_dict = {}
self.start_time = None
self.obj_fun = None
def initialize(self, formula, **kwargs):
r""" Get ready to fit the model by parsing the formula,
checking priors, and creating design, penalty, and Hessian matrices
Args:
formula (string): R-style formula expressing the model to fit.
eg. :math:`y \sim 1 + x + (1 + x | group)`
Keyword Args:
kwargs: additional arguments to pass to fit method
"""
self.fit_kwargs = kwargs
self.start_time = time.time()
self._parse_formula(formula)
self._check_priors_df()
self._create_design_matrix()
self._create_response_matrix()
self._create_penalty_matrix()
self._create_hessians()
@abc.abstractmethod
def _create_response_matrix(self):
""" Must be implemented by subclasses """
raise NotImplementedError
@abc.abstractmethod
def _create_hessians(self):
""" Must be implemented by subclasses """
raise NotImplementedError
@abc.abstractmethod
def fit(self, formula, **kwargs):
""" Must be implemented by subclasses """
raise NotImplementedError
@abc.abstractmethod
def _create_output_dict(self):
""" Must be implemented by subclasses """
raise NotImplementedError
def _check_priors_df(self):
"""Run simple validations on priors data frame
This method runs a number of sanity checks on the priors data frame:
- ensure that the expected columns are present
- ensure that all rows in priors_df are consistent with the formula
- ensure that all random effect variables at least have a variance
Args:
None
Returns:
None
"""
_groupings = {g: f + [np.nan] for g, f in iteritems(self.groupings)}
allowed_covs = {
g: [[v, w] for i, v in enumerate(f)
for j, w in enumerate(f)
if j > i and not isinstance(v, float)]
for g, f in iteritems(_groupings)
}
required_covs = {
g: [[v, np.nan] for i, v in enumerate(f)
if not isinstance(v, float)] for g, f in iteritems(_groupings)
}
example_priors_data = {
"group": [g for g, f in iteritems(allowed_covs) for _ in f],
"var1": [j[0] for _, f in iteritems(allowed_covs) for j in f],
"var2": [j[1] for _, f in iteritems(allowed_covs) for j in f],
"vcov": [0.1 for _, f in iteritems(allowed_covs) for _ in f]
}
example_priors_df = pd.DataFrame.from_dict(example_priors_data)
expected_cols = set(["group", "var1", "var2", "vcov"])
actual_cols = set(self.priors_df.columns)
if len(expected_cols - actual_cols) != 0:
raise AssertionError("""
priors_df is expected to have the following columns:
| group | var1 | var2 | vcov |
""")
# check that all rows in self.priors_df are valid
for row in self.priors_df.iterrows():
group = row[1].group
var1 = row[1].var1
var2 = row[1].var2
# we need to standardize nan values
if str(var2) in ['nan', 'None', 'null']:
var2 = np.nan
var_pair = [var1, var2]
num_matches = sum([np.array_equal(var_pair, p)
for p in allowed_covs[group]])
if num_matches != 1:
raise AssertionError("""There is a row in your priors_df which is not expected.
Unexpected row: {}
A valid priors_df looks something like:
{}
""".format([group] + var_pair, example_priors_df))
try:
required_covs[group].remove(var_pair)
except ValueError:
# thrown when var_pair not in required_covs[group]
pass
# loop through the required_covs and make sure none are remaining
remaining_required_covs = sum([len(f) for _, f in iteritems(required_covs)])
if remaining_required_covs > 0:
raise AssertionError("""Priors_df is missing some required rows.
If you want an unregularized random effect, include it as a fixed
effect instead.
The missing rows are: {}
""".format(required_covs))
@staticmethod
def _check_formula(formula):
"""
Check that the formula contains all necessary ingredients,
such as a response and at least one group
Args:
formula (string): R-style formula expressing the model to fit.
eg. "y ~ 1 + x + (1 + x | group)"
Returns:
None
"""
valid_formula_str = """
A valid formula looks like:
response ~ 1 + feature1 + feature2 + ... +
(1 + feature1 + feature2 + ... | doctor_id)
"""
if "~" not in formula:
msg = "Formula missing '~'. You need a response. {}"
raise AssertionError(msg.format(valid_formula_str))
if "|" not in formula:
msg = "Formula missing '|'. You need at least 1 group. {}"
raise AssertionError(msg.format(valid_formula_str))
def _parse_formula(self, formula):
"""
Args:
formula (string): R-style formula expressing the model to fit.
eg. "y ~ 1 + x + (1 + x | group)"
Returns:
None
"""
# strip all newlines, tabs, and spaces
formula = re.sub(r'[\n\t ]', '', formula)
# split the response from the formula terms
self.response = formula.split("~")[0]
terms = formula.split("~")[1:][0]
interactions = re.findall(r'\(([A-Za-z0-9_|\+]+)\)', terms)
# parse the interactions. these are terms like (1|doctor_id)
for i in interactions:
# remove interactions from terms list
terms = terms.replace("(%s)" % i, "")
i_terms = i.split('|')[0]
i_group = i.split('|')[1:][0]
for i_term in i_terms.split("+"):
if i_term == "1":
self.groupings[i_group].append("intercept")
elif i_term != '':
self.groupings[i_group].append(i_term)
self.group_levels[i_group] = self.train_df[i_group].unique()
self.grouping_factors.append(i_group)
for g in self.grouping_factors:
self.num_levels[g] = self.train_df[g].nunique()
self.total_num_interactions += self.num_levels[g]
# parse the main terms
for m in terms.split("+"):
if m == "1":
self.main_effects.append("intercept")
elif m != '':
self.main_effects.append(m)
self.num_main = len(self.main_effects)
def _create_penalty_matrix(self):
"""
Take the provided covariance matrices and transform it
into an L2 penalty matrix
Args:
None
Returns:
None
"""
self.variances = self.priors_df
self.variances.ix[self.variances['var1'] == '(Intercept)', 'var1'] = 'intercept'
LOGGER.info("creating covariance matrix")
# if "group" is a column in the variances, rename it to "grp"
self.variances.rename(columns={'grp': 'group'}, inplace=True)
inv_covs = {}
for g in self.groupings.keys():
n = len(self.groupings[g])
cov_mat = np.zeros((n, n))
var_grp = self.variances[self.variances.group == g]
if len(var_grp) > 0: # if no priors, then leave cov_mat as zeros
for row in var_grp[['var1', 'var2', 'vcov']].iterrows():
if str(row[1]['var2']) in ['nan', 'None', 'null']:
i = self.groupings[g].index(row[1]['var1'])
cov_mat[i, i] = row[1]['vcov']
else:
i = self.groupings[g].index(row[1]['var1'])
j = self.groupings[g].index(row[1]['var2'])
cov_mat[i, j] = row[1]['vcov']
cov_mat[j, i] = row[1]['vcov']
inv_covs[g] = np.linalg.inv(cov_mat)
self.sparse_inv_covs[g] = \
RepeatedBlockDiagonal(inv_covs[g], self.num_levels[g])
self.sparse_inv_covs['main'] = None
def _create_main_design(self, **kwargs):
r"""
Create design matrix for main effects
Keyword Args:
* *df* (``DataFrame``). specify a new dataframe to create
design matrix from
Returns:
array_like: design matrix in sparse CSR format
"""
df = kwargs.get('df', self.train_df)
df.reset_index(drop=True, inplace=True)
df['row_index'] = df.index
df['intercept'] = 1.0 # assume intercept is always included
id_cols = ['row_index']
melted_df = pd.melt(df[id_cols + self.main_effects], id_cols)
melted_df = melted_df.merge(self.main_map, on='variable')
melted_df['col_index'] = melted_df['main_idx']
row = melted_df.row_index
col = melted_df.col_index
data = melted_df.value
return sparse.coo_matrix((data, (row, col)),
shape=(max(row) + 1, max(col) + 1)).tocsr()
def _create_inter_design(self, g, **kwargs):
r"""
Create random effects design matrix for grouping factor g
This is straightforward when you create the matrix using the training
DataFrame
But a new DataFrame can have new levels of g which did not exist in
training DF
For these levels, the random coefficients are set to zero
But as a practical matter, it's easier to zero out the values of the
predictors
here than it is to modify the fitted coefficient vector
| |
tick2On=True,
labelcolor=label_color,
color=color)
cbar.set_label(label, size=label_size, color=label_color)
return cbar
except:
report_err(comment='Could not set color bar; please set manually!')
def get_cmap(colors, n=None, r=False, start=0, stop=1, **kwargs):
"""
Converts a list of colors into a color map or discretizes a registered cmap
http://matplotlib.org/examples/color/colormaps_reference.html
http://www.ncl.ucar.edu/Document/Graphics/color_table_gallery.shtml
:param colors: (list/str) - a list containing RGB or Python/NCL cmap name
:param n: (int) - number of colors in cmap
:param r: (boolean) - reverse colormap
:param start: (scalar) - value to start on the cmap between 0 and 1
:param stop: (scalar) - value to end on the cmap between 0 and 1
:param kwargs: (kwargs) - additional keyword arguments
:return cmap: (mpl.cmap) - color map
"""
try:
if '_r' in colors:
colors = colors[:-2]
r = True
except:
pass
if colors in NCL_CMAP_NAMES:
if r:
color_list = get_color_list(NCL_CMAPS[colors].values[0])[::-1]
cmap = LinearSegmentedColormap.from_list('cmap',
colors=color_list)
else:
cmap = NCL_CMAPS[colors].values[0]
if n is None:
n = NCL_CMAPS[colors].values[1]
else:
if isinstance(colors, str):
if r:
colors += '_r'
if n is None:
n = 10
cmap = plt.get_cmap(colors, **kwargs)
elif isinstance(colors, mpl.colors.LinearSegmentedColormap):
return colors
else:
if r:
colors = colors[::-1]
if n is None and len(colors) > 2:
n = len(colors)
elif n is None:
n = 10
if not isinstance(colors[0], str):
if (np.array(colors) > 1).any():
for i, tup in enumerate(colors):
colors[i] = np.array(tup) / 255.
cmap = LinearSegmentedColormap.from_list('mycmap', colors=colors,
**kwargs)
colors = cmap(np.linspace(start, stop, cmap.N))
return LinearSegmentedColormap.from_list('mycmap', colors=colors, N=n)
def get_color_list(cmap, hexcodes=False, **kwargs):
"""
Converts a registered colormap into a list of RGB tuples or hexcodes
:param cmap_name: (mpl.cmap/str) - actual colormap or name of color
:param hexcodes: (boolean) - whether to return a list of hexcodes
:param kwargs: (kwargs) - additional keyword arguments
:return cmap: (list) - list of RGB tuples or hexcodes
"""
if isinstance(cmap, str):
if cmap in NCL_CMAP_NAMES:
cmap = NCL_CMAPS[cmap].values[0]
else:
cmap = plt.get_cmap(cmap)
if not hexcodes:
color_list = [cmap(i)[:3] for i in range(cmap.N)]
else:
color_list = [mpl.colors.rgb2hex(cmap(i)[:3])
for i in range(cmap.N)]
return color_list
def set_latlons(ax,
color=COLORS['black'],
alpha=ALPHAS['semi opaque'],
size=4,
top=False,
bottom=True,
left=True,
right=False,
lat_labels='auto',
lon_labels='auto',
central_longitude=0,
**kwargs):
"""
Set lat lon labels for a map.
:param ax: (mpl.axes) - plot axis
:param color: (scalar) - color of lat lon labels
:param alpha: (scalar/str) - transparency of lat lon labels
:param size: (scalar) - size of lat lon labels
:param bottom: (boolean) - whether to show bottom lon labels
:param top: (boolean) - whether to show top lon labels
:param left: (boolean) - whether to show left lat labels
:param right: (boolean) - whether to show right lat labels
:param lat_labels: (array) - list of latitudes to show on map
:param lon_labels: (array) - list of longitudes to show on map
:param kwargs: (kwargs) - additional keyword arguments
:return gl: (ax.gridlines) - gridlines
"""
from cartopy.mpl.gridliner import (LONGITUDE_FORMATTER,
LATITUDE_FORMATTER
)
size = scale_it(ax, size, 1, exp=True)
geom = plt.getp(plt.getp(ax, 'subplotspec'), 'geometry')
nplots = geom[0] * geom[1]
size += nplots
linewidth = np.log(nplots + 1) / 85 + 0.35
gl = ax.gridlines(draw_labels=True,
linewidth=linewidth,
color=COLORS['black'],
alpha=ALPHAS['translucid'],
linestyle=(0, (16, 4)), **kwargs) # length, how often
if lon_labels is not None and lon_labels is not 'auto':
gl.xlocator = mticker.FixedLocator(lon_labels)
elif not lon_labels:
gl.xlabels_top = False
gl.xlabels_bottom = False
if lat_labels is not None and lat_labels is not 'auto':
gl.ylocator = mticker.FixedLocator(lat_labels)
elif not lat_labels:
gl.ylabels_left = False
gl.ylabels_right = False
else:
if central_longitude != 0:
base_range = np.arange(-360, 420, 60)
base_range -= central_longitude
base_range = np.delete(base_range,
np.where(base_range == -180)[0])
gl.xlocator = mticker.FixedLocator(base_range)
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabels_top = top
gl.ylabels_bottom = bottom
gl.xlabels_left = left
gl.ylabels_right = right
gl.xlabel_style = {'size': size, 'color': color, 'alpha': alpha}
gl.ylabel_style = {'size': size, 'color': color, 'alpha': alpha}
return gl
def set_figtext(ax, text, size=12, pad=0,
loc='bottom center',
color=COLORS['black'],
alpha=ALPHAS['translucent'],
fha=None, fva=None, **kwargs):
"""
Add text to the side of a figure.
loc choices - center, center bottom, center left, center right,
upper left, upper right, bottom left, bottom right.
:param ax: (mpl.axes) - plot axis
:param text: (str) - text to put on the figure
:param loc: (str) - location of the text
:param size: (int) - size in points
:param color: (str) - color of text
:param alpha: (scalar/str) - transparency of text
:param fha: (boolean) - force the horizontal alignment to be input str
:param fva: (boolean) - force the vertical alignment to be input str
:param kwargs: (kwargs) - additional keyword arguments
"""
size = scale_it(ax, size, 1, exp=True)
pad = scale_it(ax, pad, 0.005, exp=True)
loc_keywords = get_loc_keywords(loc)
if 'lower' in loc_keywords:
if 'center' in loc_keywords: # lower center
ha = 'center'
va = 'top'
x = 0.5
y = -0.09 + pad
elif 'right' in loc_keywords:
ha = 'left'
if 'corner' in loc_keywords: # lower corner right
va = 'center'
x = 0.925
y = -0.04 + pad
else: # lower right
va = 'bottom'
x = 0.925 + pad
y = 0.125
elif 'left' in loc_keywords:
ha = 'right'
if 'corner' in loc_keywords: # lower corner left
va = 'center'
x = 0.855
y = -0.04 + pad
else: # lower left
va = 'bottom'
x = 0.05
y = 0.125
elif 'upper' in loc_keywords:
if 'center' in loc_keywords:
ha = 'center'
va = 'center'
x = 0.5
y = 0.975 - pad
elif 'right' in loc_keywords:
ha = 'left'
if 'corner' in loc_keywords:
va = 'center'
x = 0.925
y = 0.975 - pad
else:
va = 'top'
x = 0.925 + pad
y = 0.9
elif 'left' in loc_keywords:
ha = 'right'
if 'corner' in loc_keywords:
va = 'center'
x = 0.855
y = 0.975 - pad
else:
va = 'top'
x = 0.05
y = 0.9
else:
va = 'center'
if 'right' in loc_keywords:
x = 0.925 + pad
y = 0.5
ha = 'left'
elif 'left' in loc_keywords:
x = 0.05
y = 0.5
ha = 'right'
else:
x = 0.5
y = 0.5
ha = 'center'
if fva is not None:
va = fva
if fha is not None:
ha = fha
plt.figtext(x, y, text,
ha=ha, va=va,
wrap=True,
size=size,
color=color,
alpha=alpha,
**kwargs)
def set_axtext(ax, text, loc='bottom center', xy=None,
size=12, color=COLORS['black'],
xpad=None, ypad=None,
alpha=ALPHAS['translucent'],
fha=None, fva=None,
**kwargs):
"""
:param ax: (mpl.axes) - plot axis
:param text: (str) - text to put on the subplot
:param loc: (str) - location of the text
:param xy: (tup) - coordinate to set text
:param size: (int) - size in points
:param color: (str) - color of text
:param xpad: (scalar) - padding in the x axis direction
:param ypad: (scalar) - padding in the y axis direction
:param alpha: (scalar/str) - transparency of text
:param fha: (boolean) - force the horizontal alignment to be input str
:param fva: (boolean) - force the vertical alignment to be input str
:param kwargs: (kwargs) - additional keyword arguments
"""
size = scale_it(ax, size, 1, exp=True)
if xy is None:
loc_keywords = get_loc_keywords(loc)
xtick_diff = np.average(np.diff(plt.getp(ax, 'xticks')))
ytick_diff = np.average(np.diff(plt.getp(ax, 'yticks')))
if ax.get_xlim()[0] > 700000:
if 'lower' in loc_keywords:
loc_keywords.remove('lower')
va = 'bottom'
ha = ''.join(loc_keywords)
if ha is 'left':
xy = (ax.get_xlim()[0] + xtick_diff * 0.025,
ax.get_ylim()[0] + ytick_diff * 0.025)
elif ha is 'right':
xy = (ax.get_xlim()[1] - xtick_diff * 0.025,
ax.get_ylim()[0] + ytick_diff * 0.025)
else:
xy = ((ax.get_xlim()[0] + ax.get_xlim()[1]) / 2,
ax.get_ylim()[0] + ytick_diff * 0.025)
elif 'upper' in loc_keywords:
loc_keywords.remove('upper')
va = 'top'
ha = ''.join(loc_keywords)
if ha is 'left':
xy = (ax.get_xlim()[0] + xtick_diff * 0.025,
ax.get_ylim()[1])
elif ha is 'right':
xy = (ax.get_xlim()[1] - xtick_diff * 0.025,
ax.get_ylim()[1])
else:
xy = ((ax.get_xlim()[0] + ax.get_xlim()[1]) / 2,
ax.get_ylim()[1])
else:
loc_keywords.remove('center')
va = 'center'
ha = ''.join(loc_keywords)
if ha is 'left':
xy = (ax.get_xlim()[0] + xtick_diff * 0.025,
ax.get_ylim()[1] / 2)
elif ha is 'right':
xy = (ax.get_xlim()[1] - xtick_diff * 0.025,
ax.get_ylim()[1] / 2)
else:
ha = 'center'
xy = ((ax.get_xlim()[0] + ax.get_xlim()[1]) / 2,
ax.get_ylim()[1] / 2)
| |
<gh_stars>1-10
'''
Copyright (C) 2018 CG Cookie
http://cgcookie.com
<EMAIL>
Created by <NAME>, <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from .options import retopoflow_version
# sync help texts with https://github.com/CGCookie/retopoflow-docs (http://docs.retopoflow.com/)
# https://wincent.com/wiki/Unicode_representations_of_modifier_keys
help_firsttime = '''
# Welcome to RetopoFlow {version}!
RetopoFlow is an add-on for Blender that brings together a set of retopology tools within a custom Blender mode to enable you to work more quickly, efficiently, and in a more artist-friendly manner.
The RF tools, which are specifically designed for retopology, create a complete workflow in Blender without the need for additional software.
The RetopoFlow tools automatically generate geometry by drawing on an existing surface, snapping the new mesh to the source surface at all times, meaning you never have to worry about your mesh conforming to the original model---no Shrinkwrap modifier required!
Additionally, all mesh generation is quad-based (except for PolyPen).
## Changelog
Below is a summary of the changes made.
A full summary is available on [Blender Market](https://blendermarket.com/products/retopoflow).
### Changes in 2.0.3
- Hiding RF buttons in 3D View panel to improve overall performance when Region Overlap is disabled
- Visualizing target geometry counts in bottom right corner
- Improved target rendering by constraining normal offset
- Only showing "small clip start" alert once per Blender run rather than once per RetopoFlow run
- By default, the options for unselected tools are hidden (can disable Options > General > Tool Options > Auto Hide Options).
- Overall stability improvements
### Minor Changes from Version 2.0.0
- Can navigate to all help documents through help system.
(Click [All Help Documents](All Help Documents) button below or press `Shift+F1`)
- Fixed bug where navigation broke with internationalization settings
- Improved many UX/UI issues.
For example, now the RetopoFlow panel will explicitly state whether a new target will be created and what meshes are acting as sources.
For another example, RetopoFlow will now gracefully handle registration failures (usually happening when Blender is installed through package manager).
- Squashed many hard-to-find bugs in Loops, PolyPen, Patches, Strokes, Contours
- Better error handling with shader compilation.
- Fixed critical bug with framework.
### Major Changes from Version 1.x
What you see behind this message window is a complete rewrite of the code base.
RetopoFlow 2.x now works like any other Blender mode, like Edit Mode or Sculpt Mode, but it will also feel distinct.
We focused our 2.x development on two main items: stability and user experience.
With an established and solid framework, we will focus more on features in future releases.
- Everything runs within the RF Mode; no more separation of tools!
In fact, the shortcut keys `Q`, `W`, `E`, `R`, `T`, `Y`, `U`, and `I` will switch quickly between the tools.
- Each tool has been simplified to perform its job well.
- All tools use the current selection for their context.
For example, PolyStrips can edit any strip of quads by simply selecting them.
- The selected and active mesh is the Target Mesh, and any other visible meshes are Source Meshes.
- Many options and configurations are sticky, which means that some settings will remain even if you leave RF Mode or quit Blender.
- All tools have similar and consistent visualization, although they each will have their own custom widget (ex: circle cursor in Tweak) and annotations (ex: edge count in Contours).
- Mirroring (X, Y, and/or Z) is now visualized by overlaying a color on all the source meshes.
- Every change automatically commits to the target mesh; geometry is created in real-time!
No more lost work from crashing.
- Auto saves will trigger!
- Undo and redo are universally available within RF Mode.
Press `Ctrl+Z` roll back any change, or `Ctrl+Shift+Z` to redo.
- The new Strokes tool extends your target mesh with a simple selection and stroke.
## Feedback
We want to know how RetopoFlow has benefited you in your work.
Please consider doing the following:
- Give us a rating with comments on the Blender Market.
(requires purchasing a copy through Blender Market)
- Purchase a copy of RetopoFlow on the Blender Market to help fund future developments.
- Consider donating to our drink funds :)
We have worked hard to make this as production-ready as possible.
We focused on stability and bug handling in addition to new features, improving overall speed, and making RetopoFlow easier to use.
However, if you find a bug or a missing feature, please let us know so that we can fix them!
Be sure to submit screenshots, .blend files, and/or instructions on reproducing the bug to our bug tracker by clicking the "Report Issue" button or visiting [https://github.com/CGCookie/retopoflow/issues](https://github.com/CGCookie/retopoflow/issues).
We have added buttons to open the issue tracker in your default browser and to save screenshots of Blender.

## Known Issues / Future Work
Below is a list of known issues that are currently being addressed.
- Source meshes with very high poly count can cause a delay and stutter at start-up time.
- A target mesh with high poly count target mesh can cause slowness in some tools.
- RF runs _very_ slowly (<1.0 FPS) on a few rare machines.
- Patches supports only rudimentary fills.
- RetopoFlow does not work with Blender 2.80 (beta).
## Final Words
We thank you for using RetopoFlow, and we look forward to hearing back from you!
Cheers!
<br>
---The CG Cookie Tool Development Team
'''.format(version=retopoflow_version)
help_quickstart = '''
RetopoFlow 2.x Quick Start Guide
================================
We wrote this guide to help you get started as quickly a possible with the new RetopoFlow 2.x.
More detailed help is available by pressing `F1` after you start RetopoFlow.
TL;DR
-----
==> When you are retopologizing for the first time, deselect all objects and click one of the RetopoFlow tools.
==> When continuing work on a previous retopology session, select the target object, and click one of the RetopoFlow tools.
Terminology
-----------
Source Object(s)
: The original object(s) that you are re-creating. These meshes typically have a high polygon count with poor topology and edge flow (ex: result of Dyntopo in Sculpt Mode).
Target Object
: The new object that stores the retopologized surface. This mesh typically has a low polygon count with good topology and edge flow.
Target and Source Objects
-------------------------
In RetopoFlow 1.x you were required to select the source and target objects explicitly, but in RetopoFlow 2.x the source and target objects are determined by RetopoFlow based on which mesh objects are selected, active, and visible.
The target object is either:
- the active mesh object if it is also selected and visible (Object Mode)
- the mesh object currently being edited (Edit Mode)
- otherwise, a newly created mesh object
Any mesh object that is visible and not the target object is considered a source object.
This means that you can hide or move objects to hidden layers to change which source objects will be retopologized.
Note: only newly created or edited target geometry will snap to the source.
RetopoFlow Mode
---------------
Notes about earlier version: the tools in RetopoFlow 1.x were set of disjointed tools, where you would need to quit one tool in order to start another.
Also, because we wrote RF 1.x tools separately, the visualizations and settings were not consistent.
Furthermore, the only indication that a tool was running in RetopoFlow 1.x was a small "Click for Help" button in the top-right corner, which is easily missed.
In RetopoFlow 2.x, we completely rewrote the framework so that RF acts like any other Blender Mode (like Edit Mode, Sculpt Mode, Vertex Paint Mode).
Choosing one of the tools from the RetopoFlow panel will start RetopoFlow Mode with the chosen tool selected.
When RetopoFlow Mode is enabled, all parts of Blender outside the 3D view will be darkened (and | |
<reponame>vadian/climate
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ocw import dataset as ds
import datetime
import numpy as np
import numpy.ma as ma
import scipy.interpolate
import scipy.ndimage
from scipy.ndimage import map_coordinates
import netCDF4
import logging
logger = logging.getLogger(__name__)
def temporal_rebin(target_dataset, temporal_resolution):
""" Rebin a Dataset to a new temporal resolution
:param target_dataset: Dataset object that needs temporal rebinned
:type target_dataset: :class:`dataset.Dataset`
:param temporal_resolution: The new temporal bin size
:type temporal_resolution: :class:`datetime.timedelta`
:returns: A new temporally rebinned Dataset
:rtype: :class:`dataset.Dataset`
"""
# Decode the temporal resolution into a string format that
# _rcmes_calc_average_on_new_time_unit_K() can understand
day_count = temporal_resolution.days
time_unit = None
if day_count == 1:
time_unit = 'daily'
elif day_count > 1 and day_count <= 31:
time_unit = 'monthly'
elif day_count > 31 and day_count <= 366:
time_unit = 'annual'
else:
time_unit = 'full'
masked_values = target_dataset.values.view(ma.MaskedArray)
binned_values, binned_dates = _rcmes_calc_average_on_new_time_unit_K(masked_values, target_dataset.times, time_unit)
binned_dates = np.array(binned_dates)
new_dataset = ds.Dataset(target_dataset.lats,
target_dataset.lons,
binned_dates,
binned_values,
target_dataset.variable,
target_dataset.name)
return new_dataset
def spatial_regrid(target_dataset, new_latitudes, new_longitudes):
""" Regrid a Dataset using the new latitudes and longitudes
:param target_dataset: Dataset object that needs spatially regridded
:type target_dataset: :class:`dataset.Dataset`
:param new_latitudes: Array of latitudes
:type new_latitudes: :class:`numpy.ndarray`
:param new_longitudes: Array of longitudes
:type new_longitudes: :class:`numpy.ndarray`
:returns: A new spatially regridded Dataset
:rtype: :class:`dataset.Dataset`
"""
# Make masked array of shape (times, new_latitudes,new_longitudes)
new_values = ma.zeros([len(target_dataset.times),
len(new_latitudes),
len(new_longitudes)])
# Create grids of the given lats and lons for the underlying API
# NOTE: np.meshgrid() requires inputs (x, y) and returns data
# of shape(y|lat|rows, x|lon|columns). So we pass in lons, lats
# and get back data.shape(lats, lons)
lons, lats = np.meshgrid(target_dataset.lons, target_dataset.lats)
new_lons, new_lats = np.meshgrid(new_longitudes, new_latitudes)
# Convert all lats and lons into Numpy Masked Arrays
lats = ma.array(lats)
lons = ma.array(lons)
new_lats = ma.array(new_lats)
new_lons = ma.array(new_lons)
target_values = ma.array(target_dataset.values)
# Call _rcmes_spatial_regrid on each time slice
for i in range(len(target_dataset.times)):
new_values[i] = _rcmes_spatial_regrid(target_values[i],
lats,
lons,
new_lats,
new_lons)
# TODO:
# This will call down to the _congrid() function and the lat and lon
# axis will be adjusted with the time axis being held constant
# Create a new Dataset Object to return using new data
regridded_dataset = ds.Dataset(new_latitudes,
new_longitudes,
target_dataset.times,
new_values,
target_dataset.variable,
target_dataset.name)
return regridded_dataset
def ensemble(datasets):
"""
Generate a single dataset which is the mean of the input datasets
:param datasets: Datasets to be used to compose the ensemble dataset from.
All Datasets must be the same shape.
:type datasets: :class:`list` of :class:`dataset.Dataset`
:returns: New Dataset with a name of 'Dataset Ensemble'
:rtype: :class:`dataset.Dataset`
"""
_check_dataset_shapes(datasets)
dataset_values = [dataset.values for dataset in datasets]
ensemble_values = np.mean(dataset_values, axis=0)
# Build new dataset object from the input datasets and the ensemble values and return it
ensemble_dataset = ds.Dataset(datasets[0].lats,
datasets[0].lons,
datasets[0].times,
ensemble_values,
name="Dataset Ensemble")
return ensemble_dataset
def subset(subregion, target_dataset):
'''Subset given dataset(s) with subregion information
:param subregion: The Bounds with which to subset the target Dataset.
:type subregion: :class:`dataset.Bounds`
:param target_dataset: The Dataset object to subset.
:type target_dataset: :class:`dataset.Dataset`
:returns: The subset-ed Dataset object
:rtype: :class:`dataset.Dataset`
:raises: ValueError
'''
# Ensure that the subregion information is well formed
_are_bounds_contained_by_dataset(subregion, target_dataset)
# Get subregion indices into subregion data
dataset_slices = _get_subregion_slice_indices(subregion, target_dataset)
# Build new dataset with subset information
return ds.Dataset(
# Slice the lats array with our calculated slice indices
target_dataset.lats[dataset_slices["lat_start"]:
dataset_slices["lat_end"] + 1],
# Slice the lons array with our calculated slice indices
target_dataset.lons[dataset_slices["lon_start"]:
dataset_slices["lon_end"] + 1],
# Slice the times array with our calculated slice indices
target_dataset.times[dataset_slices["time_start"]:
dataset_slices["time_end"]+ 1],
# Slice the values array with our calculated slice indices
target_dataset.values[
dataset_slices["time_start"]:dataset_slices["time_end"] + 1,
dataset_slices["lat_start"]:dataset_slices["lat_end"] + 1,
dataset_slices["lon_start"]:dataset_slices["lon_end"] + 1],
target_dataset.variable,
target_dataset.name
)
def safe_subset(subregion, target_dataset):
'''Safely subset given dataset with subregion information
A standard subset requires that the provided subregion be entirely contained
within the datasets bounds. `safe_subset` returns the overlap of the
subregion and dataset without returning an error.
:param subregion: The Bounds with which to subset the target Dataset.
:type subregion: :class:`dataset.Bounds`
:param target_dataset: The Dataset object to subset.
:type target_dataset: :class:`dataset.Dataset`
:returns: The subset-ed Dataset object
:rtype: :class:`dataset.Dataset`
'''
lat_min, lat_max, lon_min, lon_max = target_dataset.spatial_boundaries()
start, end = target_dataset.time_range()
if subregion.lat_min < lat_min:
subregion.lat_min = lat_min
if subregion.lat_max > lat_max:
subregion.lat_max = lat_max
if subregion.lon_min < lon_min:
subregion.lon_min = lon_min
if subregion.lon_max > lon_max:
subregion.lon_max = lon_max
if subregion.start < start:
subregion.start = start
if subregion.end > end:
subregion.end = end
return subset(subregion, target_dataset)
def normalize_dataset_datetimes(dataset, timestep):
''' Normalize Dataset datetime values.
Force daily to an hour time value of 00:00:00.
Force monthly data to the first of the month at midnight.
:param dataset: The Dataset which will have its time value normalized.
:type dataset: :class:`dataset.Dataset`
:param timestep: The timestep of the Dataset's values. Either 'daily' or
'monthly'.
:type timestep: :mod:`string`
:returns: A new Dataset with normalized datetime values.
:rtype: :class:`dataset.Dataset`
'''
new_times = _rcmes_normalize_datetimes(dataset.times, timestep)
return ds.Dataset(
dataset.lats,
dataset.lons,
np.array(new_times),
dataset.values,
dataset.variable,
dataset.name
)
def write_netcdf(dataset, path, compress=True):
''' Write a dataset to a NetCDF file.
:param dataset: The dataset to write.
:type dataset: :class:`dataset.Dataset`
:param path: The output file path.
:type path: :mod:`string`
'''
out_file = netCDF4.Dataset(path, 'w', format='NETCDF4')
# Set attribute lenghts
lat_len = len(dataset.lats)
lon_len = len(dataset.lons)
time_len = len(dataset.times)
# Create attribute dimensions
lat_dim = out_file.createDimension('lat', lat_len)
lon_dim = out_file.createDimension('lon', lon_len)
time_dim = out_file.createDimension('time', time_len)
# Create variables
lats = out_file.createVariable('lat', 'f8', ('lat',), zlib=compress)
lons = out_file.createVariable('lon', 'f8', ('lon',), zlib=compress)
times = out_file.createVariable('time', 'f8', ('time',), zlib=compress)
var_name = dataset.variable if dataset.variable else 'var'
values = out_file.createVariable(var_name,
'f8',
('time', 'lat', 'lon'),
zlib=compress)
# Set the time variable units
# We don't deal with hourly/minutely/anything-less-than-a-day data so
# we can safely stick with a 'days since' offset here. Note that the
# NetCDF4 helper date2num doesn't support 'months' or 'years' instead
# of days.
times.units = "days since %s" % dataset.times[0]
# Store the dataset's values
lats[:] = dataset.lats
lons[:] = dataset.lons
times[:] = netCDF4.date2num(dataset.times, times.units)
values[:] = dataset.values
out_file.close()
def _rcmes_normalize_datetimes(datetimes, timestep):
""" Normalize Dataset datetime values.
Force daily to an hour time value of 00:00:00.
Force monthly data to the first of the month at midnight.
:param datetimes: The datetimes to normalize.
:type datetimes: List of `datetime` values.
:param timestep: The flag for how to normalize the datetimes.
:type timestep: String
"""
normalDatetimes = []
if timestep.lower() == 'monthly':
for inputDatetime in datetimes:
if inputDatetime.day != 1:
# Clean the inputDatetime
inputDatetimeString = inputDatetime.strftime('%Y%m%d')
normalInputDatetimeString = inputDatetimeString[:6] + '01'
inputDatetime = datetime.datetime.strptime(normalInputDatetimeString, '%Y%m%d')
normalDatetimes.append(inputDatetime)
elif timestep.lower() == 'daily':
for inputDatetime in datetimes:
if inputDatetime.hour != 0 or inputDatetime.minute != 0 or inputDatetime.second != 0:
datetimeString = inputDatetime.strftime('%Y%m%d%H%M%S')
normalDatetimeString = datetimeString[:8] + '000000'
inputDatetime = datetime.datetime.strptime(normalDatetimeString, '%Y%m%d%H%M%S')
normalDatetimes.append(inputDatetime)
return normalDatetimes
def _rcmes_spatial_regrid(spatial_values, lat, lon, lat2, lon2, order=1):
'''
Spatial regrid from one set of lat,lon values onto a new set (lat2,lon2)
:param spatial_values: Values in a spatial grid that need to be regridded
:type spatial_values: 2d masked numpy array. shape (latitude, longitude)
:param lat: Grid of latitude values which map to the spatial values
:type lat: 2d numpy array. shape(latitudes, longitudes)
:param lon: Grid of longitude values which map to the spatial values
:type lon: 2d numpy array. shape(latitudes, longitudes)
:param lat2: Grid | |
tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_tensorboard_time_series(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, tensorboard_time_series.TensorboardTimeSeries)
for i in results
)
def test_list_tensorboard_time_series_pages():
client = TensorboardServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_time_series), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[
tensorboard_time_series.TensorboardTimeSeries(),
tensorboard_time_series.TensorboardTimeSeries(),
tensorboard_time_series.TensorboardTimeSeries(),
],
next_page_token="abc",
),
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[], next_page_token="def",
),
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[
tensorboard_time_series.TensorboardTimeSeries(),
],
next_page_token="ghi",
),
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[
tensorboard_time_series.TensorboardTimeSeries(),
tensorboard_time_series.TensorboardTimeSeries(),
],
),
RuntimeError,
)
pages = list(client.list_tensorboard_time_series(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_tensorboard_time_series_async_pager():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_time_series),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[
tensorboard_time_series.TensorboardTimeSeries(),
tensorboard_time_series.TensorboardTimeSeries(),
tensorboard_time_series.TensorboardTimeSeries(),
],
next_page_token="abc",
),
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[], next_page_token="def",
),
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[
tensorboard_time_series.TensorboardTimeSeries(),
],
next_page_token="ghi",
),
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[
tensorboard_time_series.TensorboardTimeSeries(),
tensorboard_time_series.TensorboardTimeSeries(),
],
),
RuntimeError,
)
async_pager = await client.list_tensorboard_time_series(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, tensorboard_time_series.TensorboardTimeSeries)
for i in responses
)
@pytest.mark.asyncio
async def test_list_tensorboard_time_series_async_pages():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tensorboard_time_series),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[
tensorboard_time_series.TensorboardTimeSeries(),
tensorboard_time_series.TensorboardTimeSeries(),
tensorboard_time_series.TensorboardTimeSeries(),
],
next_page_token="abc",
),
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[], next_page_token="def",
),
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[
tensorboard_time_series.TensorboardTimeSeries(),
],
next_page_token="ghi",
),
tensorboard_service.ListTensorboardTimeSeriesResponse(
tensorboard_time_series=[
tensorboard_time_series.TensorboardTimeSeries(),
tensorboard_time_series.TensorboardTimeSeries(),
],
),
RuntimeError,
)
pages = []
async for page_ in (
await client.list_tensorboard_time_series(request={})
).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_delete_tensorboard_time_series(
transport: str = "grpc",
request_type=tensorboard_service.DeleteTensorboardTimeSeriesRequest,
):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_tensorboard_time_series_from_dict():
test_delete_tensorboard_time_series(request_type=dict)
def test_delete_tensorboard_time_series_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_time_series), "__call__"
) as call:
client.delete_tensorboard_time_series()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest()
@pytest.mark.asyncio
async def test_delete_tensorboard_time_series_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.DeleteTensorboardTimeSeriesRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_tensorboard_time_series_async_from_dict():
await test_delete_tensorboard_time_series_async(request_type=dict)
def test_delete_tensorboard_time_series_field_headers():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.DeleteTensorboardTimeSeriesRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_time_series), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_tensorboard_time_series_field_headers_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tensorboard_service.DeleteTensorboardTimeSeriesRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_time_series), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_tensorboard_time_series(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_tensorboard_time_series_flattened():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_tensorboard_time_series(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_tensorboard_time_series_flattened_error():
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_tensorboard_time_series(
tensorboard_service.DeleteTensorboardTimeSeriesRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_tensorboard_time_series_flattened_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tensorboard_time_series), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_tensorboard_time_series(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_tensorboard_time_series_flattened_error_async():
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_tensorboard_time_series(
tensorboard_service.DeleteTensorboardTimeSeriesRequest(), name="name_value",
)
def test_batch_read_tensorboard_time_series_data(
transport: str = "grpc",
request_type=tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest,
):
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_read_tensorboard_time_series_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = (
tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse()
)
response = client.batch_read_tensorboard_time_series_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert (
args[0] == tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest()
)
# Establish that the response is the type that we expect.
assert isinstance(
response, tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse
)
def test_batch_read_tensorboard_time_series_data_from_dict():
test_batch_read_tensorboard_time_series_data(request_type=dict)
def test_batch_read_tensorboard_time_series_data_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TensorboardServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_read_tensorboard_time_series_data), "__call__"
) as call:
client.batch_read_tensorboard_time_series_data()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert (
args[0] == tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest()
)
@pytest.mark.asyncio
async def test_batch_read_tensorboard_time_series_data_async(
transport: str = "grpc_asyncio",
request_type=tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest,
):
client = TensorboardServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual | |
from formalchemy.tests import *
from formalchemy.fields import DateTimeFieldRenderer
import datetime
class Dt(Base):
__tablename__ = 'dts'
id = Column('id', Integer, primary_key=True)
foo = Column('foo', Date, nullable=True)
bar = Column('bar', Time, nullable=True)
foobar = Column('foobar', DateTime, nullable=True)
class DateTimeFieldRendererFr(DateTimeFieldRenderer):
edit_format = 'd-m-y'
__doc__ = r"""
>>> fs = FieldSet(Dt)
>>> fs.configure(options=[fs.foobar.with_renderer(DateTimeFieldRendererFr)])
>>> print pretty_html(fs.foobar.with_html(lang='fr').render()) #doctest: +ELLIPSIS
<span id="Dt--foobar">
<select id="Dt--foobar__day" lang="fr" name="Dt--foobar__day">
<option value="DD">
Jour
</option>
...
<select id="Dt--foobar__month" lang="fr" name="Dt--foobar__month">
<option value="MM">
Mois
</option>
<option value="1">
Janvier
</option>
...
>>> fs = FieldSet(Dt)
>>> print pretty_html(fs.foobar.render())
<span id="Dt--foobar">
<select id="Dt--foobar__month" name="Dt--foobar__month">
<option value="MM">
Month
</option>
<option value="1">
January
</option>
<option value="2">
February
</option>
<option value="3">
March
</option>
<option value="4">
April
</option>
<option value="5">
May
</option>
<option value="6">
June
</option>
<option value="7">
July
</option>
<option value="8">
August
</option>
<option value="9">
September
</option>
<option value="10">
October
</option>
<option value="11">
November
</option>
<option value="12">
December
</option>
</select>
<select id="Dt--foobar__day" name="Dt--foobar__day">
<option value="DD">
Day
</option>
<option value="1">
1
</option>
<option value="2">
2
</option>
<option value="3">
3
</option>
<option value="4">
4
</option>
<option value="5">
5
</option>
<option value="6">
6
</option>
<option value="7">
7
</option>
<option value="8">
8
</option>
<option value="9">
9
</option>
<option value="10">
10
</option>
<option value="11">
11
</option>
<option value="12">
12
</option>
<option value="13">
13
</option>
<option value="14">
14
</option>
<option value="15">
15
</option>
<option value="16">
16
</option>
<option value="17">
17
</option>
<option value="18">
18
</option>
<option value="19">
19
</option>
<option value="20">
20
</option>
<option value="21">
21
</option>
<option value="22">
22
</option>
<option value="23">
23
</option>
<option value="24">
24
</option>
<option value="25">
25
</option>
<option value="26">
26
</option>
<option value="27">
27
</option>
<option value="28">
28
</option>
<option value="29">
29
</option>
<option value="30">
30
</option>
<option value="31">
31
</option>
</select>
<input id="Dt--foobar__year" maxlength="4" name="Dt--foobar__year" size="4" type="text" value="YYYY" />
<select id="Dt--foobar__hour" name="Dt--foobar__hour">
<option value="HH">
HH
</option>
<option value="0">
0
</option>
<option value="1">
1
</option>
<option value="2">
2
</option>
<option value="3">
3
</option>
<option value="4">
4
</option>
<option value="5">
5
</option>
<option value="6">
6
</option>
<option value="7">
7
</option>
<option value="8">
8
</option>
<option value="9">
9
</option>
<option value="10">
10
</option>
<option value="11">
11
</option>
<option value="12">
12
</option>
<option value="13">
13
</option>
<option value="14">
14
</option>
<option value="15">
15
</option>
<option value="16">
16
</option>
<option value="17">
17
</option>
<option value="18">
18
</option>
<option value="19">
19
</option>
<option value="20">
20
</option>
<option value="21">
21
</option>
<option value="22">
22
</option>
<option value="23">
23
</option>
</select>
:
<select id="Dt--foobar__minute" name="Dt--foobar__minute">
<option value="MM">
MM
</option>
<option value="0">
0
</option>
<option value="1">
1
</option>
<option value="2">
2
</option>
<option value="3">
3
</option>
<option value="4">
4
</option>
<option value="5">
5
</option>
<option value="6">
6
</option>
<option value="7">
7
</option>
<option value="8">
8
</option>
<option value="9">
9
</option>
<option value="10">
10
</option>
<option value="11">
11
</option>
<option value="12">
12
</option>
<option value="13">
13
</option>
<option value="14">
14
</option>
<option value="15">
15
</option>
<option value="16">
16
</option>
<option value="17">
17
</option>
<option value="18">
18
</option>
<option value="19">
19
</option>
<option value="20">
20
</option>
<option value="21">
21
</option>
<option value="22">
22
</option>
<option value="23">
23
</option>
<option value="24">
24
</option>
<option value="25">
25
</option>
<option value="26">
26
</option>
<option value="27">
27
</option>
<option value="28">
28
</option>
<option value="29">
29
</option>
<option value="30">
30
</option>
<option value="31">
31
</option>
<option value="32">
32
</option>
<option value="33">
33
</option>
<option value="34">
34
</option>
<option value="35">
35
</option>
<option value="36">
36
</option>
<option value="37">
37
</option>
<option value="38">
38
</option>
<option value="39">
39
</option>
<option value="40">
40
</option>
<option value="41">
41
</option>
<option value="42">
42
</option>
<option value="43">
43
</option>
<option value="44">
44
</option>
<option value="45">
45
</option>
<option value="46">
46
</option>
<option value="47">
47
</option>
<option value="48">
48
</option>
<option value="49">
49
</option>
<option value="50">
50
</option>
<option value="51">
51
</option>
<option value="52">
52
</option>
<option value="53">
53
</option>
<option value="54">
54
</option>
<option value="55">
55
</option>
<option value="56">
56
</option>
<option value="57">
57
</option>
<option value="58">
58
</option>
<option value="59">
59
</option>
</select>
:
<select id="Dt--foobar__second" name="Dt--foobar__second">
<option value="SS">
SS
</option>
<option value="0">
0
</option>
<option value="1">
1
</option>
<option value="2">
2
</option>
<option value="3">
3
</option>
<option value="4">
4
</option>
<option value="5">
5
</option>
<option value="6">
6
</option>
<option value="7">
7
</option>
<option value="8">
8
</option>
<option value="9">
9
</option>
<option value="10">
10
</option>
<option value="11">
11
</option>
<option value="12">
12
</option>
<option value="13">
13
</option>
<option value="14">
14
</option>
<option value="15">
15
</option>
<option value="16">
16
</option>
<option value="17">
17
</option>
<option value="18">
18
</option>
<option value="19">
19
</option>
<option value="20">
20
</option>
<option value="21">
21
</option>
<option value="22">
22
</option>
<option value="23">
23
</option>
<option value="24">
24
</option>
<option value="25">
25
</option>
<option value="26">
26
</option>
<option value="27">
27
</option>
<option value="28">
28
</option>
<option value="29">
29
</option>
<option value="30">
30
</option>
<option value="31">
31
</option>
<option value="32">
32
</option>
<option value="33">
33
</option>
<option value="34">
34
</option>
<option value="35">
35
</option>
<option value="36">
36
</option>
<option value="37">
37
</option>
<option value="38">
38
</option>
<option value="39">
39
</option>
<option value="40">
40
</option>
<option value="41">
41
</option>
<option value="42">
42
</option>
<option value="43">
43
</option>
<option value="44">
44
</option>
<option value="45">
45
</option>
<option value="46">
46
</option>
<option value="47">
47
</option>
<option value="48">
48
</option>
<option value="49">
49
</option>
<option value="50">
50
</option>
<option value="51">
51
</option>
<option value="52">
52
</option>
<option value="53">
53
</option>
<option value="54">
54
</option>
<option value="55">
55
</option>
<option value="56">
56
</option>
<option value="57">
57
</option>
<option value="58">
58
</option>
<option value="59">
59
</option>
</select>
</span>
>>> fs = FieldSet(Dt)
>>> dt = fs.model
>>> dt.foo = datetime.date(2008, 6, 3); dt.bar=datetime.time(14, 16, 18); dt.foobar=datetime.datetime(2008, 6, 3, 14, 16, 18)
>>> print pretty_html(fs.foo.render())
<span id="Dt--foo">
<select id="Dt--foo__month" name="Dt--foo__month">
<option value="MM">
Month
</option>
<option value="1">
January
</option>
<option value="2">
February
</option>
<option value="3">
March
</option>
<option value="4">
April
</option>
<option value="5">
May
</option>
<option value="6" selected="selected">
June
</option>
<option value="7">
July
</option>
<option value="8">
August
</option>
<option value="9">
September
</option>
<option value="10">
October
</option>
<option value="11">
November
</option>
<option value="12">
December
</option>
</select>
<select id="Dt--foo__day" name="Dt--foo__day">
<option value="DD">
Day
</option>
<option value="1">
1
</option>
<option value="2">
2
</option>
<option value="3" selected="selected">
3
</option>
<option value="4">
4
</option>
<option value="5">
5
</option>
<option value="6">
6
</option>
<option value="7">
7
</option>
<option value="8">
8
</option>
<option value="9">
9
</option>
<option value="10">
10
</option>
<option value="11">
11
</option>
<option value="12">
12
</option>
<option value="13">
13
</option>
<option value="14">
14
</option>
<option value="15">
15
</option>
<option value="16">
16
</option>
<option value="17">
17
</option>
<option value="18">
18
</option>
<option value="19">
19
</option>
<option value="20">
20
</option>
<option value="21">
21
</option>
<option value="22">
22
</option>
<option value="23">
23
</option>
<option value="24">
24
</option>
<option value="25">
25
</option>
<option value="26">
26
</option>
<option value="27">
27
</option>
<option value="28">
28
</option>
<option value="29">
29
</option>
<option value="30">
30
</option>
<option value="31">
31
</option>
</select>
<input id="Dt--foo__year" maxlength="4" name="Dt--foo__year" size="4" type="text" value="2008" />
</span>
>>> print pretty_html(fs.bar.render())
<span id="Dt--bar">
<select id="Dt--bar__hour" name="Dt--bar__hour">
<option value="HH">
HH
</option>
<option value="0">
0
</option>
<option value="1">
1
</option>
<option value="2">
2
</option>
<option value="3">
3
</option>
<option value="4">
4
</option>
<option value="5">
5
</option>
<option value="6">
6
</option>
<option value="7">
7
</option>
<option value="8">
8
</option>
<option value="9">
9
</option>
<option value="10">
10
</option>
<option value="11">
11
</option>
<option value="12">
12
</option>
<option value="13">
13
</option>
<option value="14" selected="selected">
14
</option>
<option value="15">
15
</option>
<option value="16">
16
</option>
<option value="17">
17
</option>
<option value="18">
18
</option>
<option value="19">
19
</option>
<option value="20">
20
</option>
<option value="21">
21
</option>
<option value="22">
22
</option>
<option value="23">
23
</option>
</select>
:
<select id="Dt--bar__minute" name="Dt--bar__minute">
<option value="MM">
MM
</option>
<option value="0">
0
</option>
<option value="1">
1
</option>
<option value="2">
2
</option>
<option value="3">
3
</option>
<option value="4">
4
</option>
<option value="5">
5
</option>
<option value="6">
6
</option>
<option value="7">
7
</option>
<option value="8">
8
</option>
<option value="9">
9
</option>
<option value="10">
10
</option>
<option value="11">
11
</option>
<option value="12">
12
</option>
<option value="13">
13
</option>
<option value="14">
14
</option>
<option value="15">
15
</option>
<option value="16" selected="selected">
16
</option>
<option value="17">
17
| |
the zones
attached to that specific air loop.
Duplicate groups of Zone name, Design Specification Outdoor Air Object Name,
and Design Specification Zone Air Distribution Object Name to increase allowable number of entries
"""
_schema = {'extensible-fields': OrderedDict([(u'zone 1 name',
{'name': u'Zone 1 Name',
'pyname': u'zone_1_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'design specification outdoor air object name 1',
{'name': u'Design Specification Outdoor Air Object Name 1',
'pyname': u'design_specification_outdoor_air_object_name_1',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'design specification zone air distribution object name 1',
{'name': u'Design Specification Zone Air Distribution Object Name 1',
'pyname': u'design_specification_zone_air_distribution_object_name_1',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'})]),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'availability schedule name',
{'name': u'Availability Schedule Name',
'pyname': u'availability_schedule_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'demand controlled ventilation',
{'name': u'Demand Controlled Ventilation',
'pyname': u'demand_controlled_ventilation',
'default': u'No',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Yes',
u'No'],
'autocalculatable': False,
'type': 'alpha'}),
(u'system outdoor air method',
{'name': u'System Outdoor Air Method',
'pyname': u'system_outdoor_air_method',
'default': u'VentilationRateProcedure',
'required-field': False,
'autosizable': False,
'accepted-values': [u'ZoneSum',
u'VentilationRateProcedure',
u'IndoorAirQualityProcedure',
u'ProportionalControlBasedOnDesignOccupancy',
u'ProportionalControlBasedonOccupancySchedule',
u'IndoorAirQualityProcedureGenericContaminant'],
'autocalculatable': False,
'type': 'alpha'}),
(u'zone maximum outdoor air fraction',
{'name': u'Zone Maximum Outdoor Air Fraction',
'pyname': u'zone_maximum_outdoor_air_fraction',
'default': 1.0,
'minimum>': 0.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'dimensionless'})]),
'format': None,
'group': u'Controllers',
'min-fields': 8,
'name': u'Controller:MechanicalVentilation',
'pyname': u'ControllerMechanicalVentilation',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def availability_schedule_name(self):
"""field `Availability Schedule Name`
| If this field is blank, the controller uses the values from the associated Controller:OutdoorAir.
| Schedule values greater than 0 indicate mechanical ventilation is enabled
Args:
value (str): value for IDD Field `Availability Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `availability_schedule_name` or None if not set
"""
return self["Availability Schedule Name"]
@availability_schedule_name.setter
def availability_schedule_name(self, value=None):
"""Corresponds to IDD field `Availability Schedule Name`"""
self["Availability Schedule Name"] = value
@property
def demand_controlled_ventilation(self):
"""field `Demand Controlled Ventilation`
| Default value: No
Args:
value (str): value for IDD Field `Demand Controlled Ventilation`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `demand_controlled_ventilation` or None if not set
"""
return self["Demand Controlled Ventilation"]
@demand_controlled_ventilation.setter
def demand_controlled_ventilation(self, value="No"):
"""Corresponds to IDD field `Demand Controlled Ventilation`"""
self["Demand Controlled Ventilation"] = value
@property
def system_outdoor_air_method(self):
"""field `System Outdoor Air Method`
| Default value: VentilationRateProcedure
Args:
value (str): value for IDD Field `System Outdoor Air Method`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `system_outdoor_air_method` or None if not set
"""
return self["System Outdoor Air Method"]
@system_outdoor_air_method.setter
def system_outdoor_air_method(self, value="VentilationRateProcedure"):
"""Corresponds to IDD field `System Outdoor Air Method`"""
self["System Outdoor Air Method"] = value
@property
def zone_maximum_outdoor_air_fraction(self):
"""field `Zone Maximum Outdoor Air Fraction`
| Units: dimensionless
| Default value: 1.0
Args:
value (float): value for IDD Field `Zone Maximum Outdoor Air Fraction`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `zone_maximum_outdoor_air_fraction` or None if not set
"""
return self["Zone Maximum Outdoor Air Fraction"]
@zone_maximum_outdoor_air_fraction.setter
def zone_maximum_outdoor_air_fraction(self, value=1.0):
"""Corresponds to IDD field `Zone Maximum Outdoor Air Fraction`"""
self["Zone Maximum Outdoor Air Fraction"] = value
def add_extensible(
self,
zone_1_name=None,
design_specification_outdoor_air_object_name_1=None,
design_specification_zone_air_distribution_object_name_1=None,
):
"""Add values for extensible fields.
Args:
zone_1_name (str): value for IDD Field `Zone 1 Name`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
design_specification_outdoor_air_object_name_1 (str): value for IDD Field `Design Specification Outdoor Air Object Name 1`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
design_specification_zone_air_distribution_object_name_1 (str): value for IDD Field `Design Specification Zone Air Distribution Object Name 1`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
"""
vals = []
zone_1_name = self.check_value("Zone 1 Name", zone_1_name)
vals.append(zone_1_name)
design_specification_outdoor_air_object_name_1 = self.check_value(
"Design Specification Outdoor Air Object Name 1",
design_specification_outdoor_air_object_name_1)
vals.append(design_specification_outdoor_air_object_name_1)
design_specification_zone_air_distribution_object_name_1 = self.check_value(
"Design Specification Zone Air Distribution Object Name 1",
design_specification_zone_air_distribution_object_name_1)
vals.append(design_specification_zone_air_distribution_object_name_1)
self._extdata.append(vals)
@property
def extensibles(self):
"""Get list of all extensibles."""
return self._extdata
@extensibles.setter
def extensibles(self, extensibles):
"""Replaces extensible fields with `extensibles`
Args:
extensibles (list): nested list of extensible values
"""
self._extdata = []
for ext in extensibles:
self.add_extensible(*ext)
class AirLoopHvacControllerList(DataObject):
""" Corresponds to IDD object `AirLoopHVAC:ControllerList`
List controllers in order of control sequence
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'controller 1 object type',
{'name': u'Controller 1 Object Type',
'pyname': u'controller_1_object_type',
'required-field': True,
'autosizable': False,
'accepted-values': [u'Controller:WaterCoil',
u'Controller:OutdoorAir'],
'autocalculatable': False,
'type': 'alpha'}),
(u'controller 1 name',
{'name': u'Controller 1 Name',
'pyname': u'controller_1_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'controller 2 object type',
{'name': u'Controller 2 Object Type',
'pyname': u'controller_2_object_type',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Controller:WaterCoil',
u'Controller:OutdoorAir'],
'autocalculatable': False,
'type': 'alpha'}),
(u'controller 2 name',
{'name': u'Controller 2 Name',
'pyname': u'controller_2_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'controller 3 object type',
{'name': u'Controller 3 Object Type',
'pyname': u'controller_3_object_type',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Controller:WaterCoil',
u'Controller:OutdoorAir'],
'autocalculatable': False,
'type': 'alpha'}),
(u'controller 3 name',
{'name': u'Controller 3 Name',
'pyname': u'controller_3_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'controller 4 object type',
{'name': u'Controller 4 Object Type',
'pyname': u'controller_4_object_type',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Controller:WaterCoil',
u'Controller:OutdoorAir'],
'autocalculatable': False,
'type': 'alpha'}),
(u'controller 4 name',
{'name': u'Controller 4 Name',
'pyname': u'controller_4_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'controller 5 object type',
{'name': u'Controller 5 Object Type',
'pyname': u'controller_5_object_type',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Controller:WaterCoil',
u'Controller:OutdoorAir'],
'autocalculatable': False,
'type': 'alpha'}),
(u'controller 5 name',
{'name': u'Controller 5 Name',
'pyname': u'controller_5_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'controller 6 object type',
{'name': u'Controller 6 Object Type',
'pyname': u'controller_6_object_type',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Controller:WaterCoil',
u'Controller:OutdoorAir'],
'autocalculatable': False,
'type': 'alpha'}),
(u'controller 6 name',
{'name': u'Controller 6 Name',
'pyname': u'controller_6_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'controller 7 object type',
{'name': u'Controller 7 Object Type',
'pyname': u'controller_7_object_type',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Controller:WaterCoil',
u'Controller:OutdoorAir'],
'autocalculatable': False,
'type': 'alpha'}),
(u'controller 7 name',
{'name': u'Controller 7 Name',
'pyname': u'controller_7_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'controller 8 object type',
{'name': u'Controller 8 Object Type',
'pyname': u'controller_8_object_type',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Controller:WaterCoil',
u'Controller:OutdoorAir'],
'autocalculatable': False,
'type': 'alpha'}),
(u'controller 8 name',
{'name': u'Controller 8 Name',
'pyname': u'controller_8_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'})]),
'format': None,
'group': u'Controllers',
'min-fields': 0,
'name': u'AirLoopHVAC:ControllerList',
'pyname': u'AirLoopHvacControllerList',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def controller_1_object_type(self):
"""field `Controller 1 Object Type`
Args:
value (str): value for IDD Field `Controller 1 Object Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `controller_1_object_type` or None if not set
"""
return self["Controller 1 Object Type"]
@controller_1_object_type.setter
def controller_1_object_type(self, value=None):
"""Corresponds to IDD field `Controller 1 Object Type`"""
self["Controller 1 Object Type"] = value
@property
def controller_1_name(self):
"""field `Controller 1 Name`
Args:
value (str): value for IDD Field `Controller 1 Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `controller_1_name` or None if not set
"""
return self["Controller 1 Name"]
@controller_1_name.setter
def controller_1_name(self, value=None):
"""Corresponds to IDD field `Controller 1 Name`"""
self["Controller 1 Name"] = value
@property
def controller_2_object_type(self):
"""field `Controller 2 | |
name = "com_github_rogpeppe_go_internal",
importpath = "github.com/rogpeppe/go-internal",
sum = "h1:Usqs0/lDK/NqTkvrmKSwA/3XkZAs7ZAW/eLeQ2MVBTw=",
version = "v1.5.0",
)
go_repository(
name = "com_github_rs_cors",
importpath = "github.com/rs/cors",
sum = "h1:G9tHG9lebljV9mfp9SNPDL36nCDxmo3zTlAf1YgvzmI=",
version = "v1.6.0",
)
go_repository(
name = "com_github_rubenv_sql_migrate",
importpath = "github.com/rubenv/sql-migrate",
sum = "h1:xkBtI5JktwbW/vf4vopBbhYsRFTGfQWHYXzC0/qYwxI=",
version = "v0.0.0-20200212082348-64f95ea68aa3",
)
go_repository(
name = "com_github_russross_blackfriday",
importpath = "github.com/russross/blackfriday",
sum = "h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=",
version = "v1.5.2",
)
go_repository(
name = "com_github_russross_blackfriday_v2",
importpath = "github.com/russross/blackfriday/v2",
sum = "h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=",
version = "v2.0.1",
)
go_repository(
name = "com_github_ryanuber_columnize",
importpath = "github.com/ryanuber/columnize",
sum = "h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s=",
version = "v2.1.0+incompatible",
)
go_repository(
name = "com_github_samuel_go_zookeeper",
importpath = "github.com/samuel/go-zookeeper",
sum = "h1:p3Vo3i64TCLY7gIfzeQaUJ+kppEO5WQG3cL8iE8tGHU=",
version = "v0.0.0-20190923202752-2cc03de413da",
)
go_repository(
name = "com_github_santhosh_tekuri_jsonschema",
importpath = "github.com/santhosh-tekuri/jsonschema",
sum = "h1:hNhW8e7t+H1vgY+1QeEQpveR6D4+OwKPXCfD2aieJis=",
version = "v1.2.4",
)
go_repository(
name = "com_github_satori_go_uuid",
importpath = "github.com/satori/go.uuid",
sum = "h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=",
version = "v1.2.0",
)
go_repository(
name = "com_github_sclevine_spec",
importpath = "github.com/sclevine/spec",
sum = "h1:1Jwdf9jSfDl9NVmt8ndHqbTZ7XCCPbh1jI3hkDBHVYA=",
version = "v1.2.0",
)
go_repository(
name = "com_github_sean_seed",
importpath = "github.com/sean-/seed",
sum = "h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=",
version = "v0.0.0-20170313163322-e2103e2c3529",
)
go_repository(
name = "com_github_sergi_go_diff",
importpath = "github.com/sergi/go-diff",
sum = "h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=",
version = "v1.0.0",
)
go_repository(
name = "com_github_shopify_logrus_bugsnag",
importpath = "github.com/Shopify/logrus-bugsnag",
sum = "h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=",
version = "v0.0.0-20171204204709-577dee27f20d",
)
go_repository(
name = "com_github_shopify_sarama",
importpath = "github.com/Shopify/sarama",
sum = "h1:9oksLxC6uxVPHPVYUmq6xhr1BOF/hHobWH2UzO67z1s=",
version = "v1.19.0",
)
go_repository(
name = "com_github_shopify_toxiproxy",
importpath = "github.com/Shopify/toxiproxy",
sum = "h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=",
version = "v2.1.4+incompatible",
)
go_repository(
name = "com_github_shopspring_decimal",
importpath = "github.com/shopspring/decimal",
sum = "h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=",
version = "v0.0.0-20180709203117-cd690d0c9e24",
)
go_repository(
name = "com_github_shurcool_httpfs",
importpath = "github.com/shurcooL/httpfs",
sum = "h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk=",
version = "v0.0.0-20190707220628-8d4bc4ba7749",
)
go_repository(
name = "com_github_shurcool_sanitized_anchor_name",
importpath = "github.com/shurcooL/sanitized_anchor_name",
sum = "h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=",
version = "v1.0.0",
)
go_repository(
name = "com_github_shurcool_vfsgen",
importpath = "github.com/shurcooL/vfsgen",
sum = "h1:ug7PpSOB5RBPK1Kg6qskGBoP3Vnj/aNYFTznWvlkGo0=",
version = "v0.0.0-20181202132449-6a9ea43bcacd",
)
go_repository(
name = "com_github_sirupsen_logrus",
importpath = "github.com/sirupsen/logrus",
sum = "h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=",
version = "v1.6.0",
)
go_repository(
name = "com_github_smartystreets_assertions",
importpath = "github.com/smartystreets/assertions",
sum = "h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w=",
version = "v1.0.1",
)
go_repository(
name = "com_github_smartystreets_goconvey",
importpath = "github.com/smartystreets/goconvey",
sum = "h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=",
version = "v1.6.4",
)
go_repository(
name = "com_github_soheilhy_cmux",
importpath = "github.com/soheilhy/cmux",
sum = "h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=",
version = "v0.1.4",
)
go_repository(
name = "com_github_spaolacci_murmur3",
importpath = "github.com/spaolacci/murmur3",
sum = "h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=",
version = "v1.1.0",
)
go_repository(
name = "com_github_spf13_afero",
importpath = "github.com/spf13/afero",
sum = "h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=",
version = "v1.2.2",
)
go_repository(
name = "com_github_spf13_cast",
importpath = "github.com/spf13/cast",
sum = "h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=",
version = "v1.3.1",
)
go_repository(
name = "com_github_spf13_cobra",
importpath = "github.com/spf13/cobra",
sum = "h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=",
version = "v1.0.0",
)
go_repository(
name = "com_github_spf13_jwalterweatherman",
importpath = "github.com/spf13/jwalterweatherman",
sum = "h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=",
version = "v1.0.0",
)
go_repository(
name = "com_github_spf13_pflag",
importpath = "github.com/spf13/pflag",
sum = "h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=",
version = "v1.0.5",
)
go_repository(
name = "com_github_spf13_viper",
importpath = "github.com/spf13/viper",
sum = "h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=",
version = "v1.4.0",
)
go_repository(
name = "com_github_stoewer_go_strcase",
importpath = "github.com/stoewer/go-strcase",
sum = "h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=",
version = "v1.2.0",
)
go_repository(
name = "com_github_stretchr_objx",
importpath = "github.com/stretchr/objx",
sum = "h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=",
version = "v0.2.0",
)
go_repository(
name = "com_github_stretchr_testify",
importpath = "github.com/stretchr/testify",
sum = "h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=",
version = "v1.5.1",
)
go_repository(
name = "com_github_syndtr_gocapability",
importpath = "github.com/syndtr/gocapability",
sum = "h1:zLV6q4e8Jv9EHjNg/iHfzwDkCve6Ua5jCygptrtXHvI=",
version = "v0.0.0-20170704070218-db04d3cc01c8",
)
go_repository(
name = "com_github_thanos_io_thanos",
importpath = "github.com/thanos-io/thanos",
sum = "h1:UkWLa93sihcxCofelRH/NBGQxFyFU73eXIr2a+dwOFM=",
version = "v0.11.0",
)
go_repository(
name = "com_github_tidwall_pretty",
importpath = "github.com/tidwall/pretty",
sum = "h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=",
version = "v1.0.0",
)
go_repository(
name = "com_github_tmc_grpc_websocket_proxy",
importpath = "github.com/tmc/grpc-websocket-proxy",
sum = "h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=",
version = "v0.0.0-20190109142713-0ad062ec5ee5",
)
go_repository(
name = "com_github_tv42_httpunix",
importpath = "github.com/tv42/httpunix",
sum = "h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8=",
version = "v0.0.0-20150427012821-b75d8614f926",
)
go_repository(
name = "com_github_uber_jaeger_client_go",
importpath = "github.com/uber/jaeger-client-go",
sum = "h1:HgqpYBng0n7tLJIlyT4kPCIv5XgCsF+kai1NnnrJzEU=",
version = "v2.20.1+incompatible",
)
go_repository(
name = "com_github_uber_jaeger_lib",
importpath = "github.com/uber/jaeger-lib",
sum = "h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw=",
version = "v2.2.0+incompatible",
)
go_repository(
name = "com_github_ugorji_go",
importpath = "github.com/ugorji/go",
sum = "h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw=",
version = "v1.1.4",
)
go_repository(
name = "com_github_ugorji_go_codec",
importpath = "github.com/ugorji/go/codec",
sum = "h1:3SVOIvH7Ae1KRYyQWRjXWJEA9sS/c/pjvH++55Gr648=",
version = "v0.0.0-20181204163529-d75b2dcb6bc8",
)
go_repository(
name = "com_github_urfave_cli",
importpath = "github.com/urfave/cli",
sum = "h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw=",
version = "v1.20.0",
)
go_repository(
name = "com_github_vektah_gqlparser",
importpath = "github.com/vektah/gqlparser",
sum = "h1:ZsyLGn7/7jDNI+y4SEhI4yAxRChlv15pUHMjijT+e68=",
version = "v1.1.2",
)
go_repository(
name = "com_github_xanzy_go_gitlab",
importpath = "github.com/xanzy/go-gitlab",
sum = "h1:rWtwKTgEnXyNUGrOArN7yyc3THRkpYcKXIXia9abywQ=",
version = "v0.15.0",
)
go_repository(
name = "com_github_xdg_scram",
importpath = "github.com/xdg/scram",
sum = "h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=",
version = "v0.0.0-20180814205039-7eeb5667e42c",
)
go_repository(
name = "com_github_xdg_stringprep",
importpath = "github.com/xdg/stringprep",
sum = "h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=",
version = "v1.0.0",
)
go_repository(
name = "com_github_xeipuuv_gojsonpointer",
importpath = "github.com/xeipuuv/gojsonpointer",
sum = "h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=",
version = "v0.0.0-20180127040702-4e3ac2762d5f",
)
go_repository(
name = "com_github_xeipuuv_gojsonreference",
importpath = "github.com/xeipuuv/gojsonreference",
sum = "h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=",
version = "v0.0.0-20180127040603-bd5ef7bd5415",
)
go_repository(
name = "com_github_xeipuuv_gojsonschema",
importpath = "github.com/xeipuuv/gojsonschema",
sum = "h1:ngVtJC9TY/lg0AA/1k48FYhBrhRoFlEmWzsehpNAaZg=",
version = "v1.1.0",
)
go_repository(
name = "com_github_xiang90_probing",
importpath = "github.com/xiang90/probing",
sum = "h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=",
version = "v0.0.0-20190116061207-43a291ad63a2",
)
go_repository(
name = "com_github_xlab_handysort",
importpath = "github.com/xlab/handysort",
sum = "h1:j2hhcujLRHAg872RWAV5yaUrEjHEObwDv3aImCaNLek=",
version = "v0.0.0-20150421192137-fb3537ed64a1",
)
go_repository(
name = "com_github_xlab_treeprint",
importpath = "github.com/xlab/treeprint",
sum = "h1:YdYsPAZ2pC6Tow/nPZOPQ96O3hm/ToAkGsPLzedXERk=",
version = "v0.0.0-20180616005107-d6fb6747feb6",
)
go_repository(
name = "com_github_xordataexchange_crypt",
importpath = "github.com/xordataexchange/crypt",
sum = "h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow=",
version = "v0.0.3-0.20170626215501-b2862e3d0a77",
)
go_repository(
name = "com_github_yuin_goldmark",
importpath = "github.com/yuin/goldmark",
sum = "h1:ruQGxdhGHe7FWOJPT0mKs5+pD2Xs1Bm/kdGlHO04FmM=",
version = "v1.2.1",
)
go_repository(
name = "com_github_yvasiyarov_go_metrics",
importpath = "github.com/yvasiyarov/go-metrics",
sum = "h1:p7OofyZ509h8DmPLh8Hn+EIIZm/xYhdZHJ9GnXHdr6U=",
version = "v0.0.0-20150112132944-c25f46c4b940",
)
go_repository(
name = "com_github_yvasiyarov_gorelic",
importpath = "github.com/yvasiyarov/gorelic",
sum = "h1:4DTF1WOM2ZZS/xMOkTFBOcb6XiHu/PKn3rVo6dbewQE=",
version = "v0.0.7",
)
go_repository(
name = "com_github_yvasiyarov_newrelic_platform_go",
importpath = "github.com/yvasiyarov/newrelic_platform_go",
sum = "h1:AsFN8kXcCVkUFHyuzp1FtYbzp1nCO/H6+1uPSGEyPzM=",
version = "v0.0.0-20160601141957-9c099fbc30e9",
)
go_repository(
name = "com_github_ziutek_mymysql",
importpath = "github.com/ziutek/mymysql",
sum = "h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs=",
version = "v1.5.4",
)
go_repository(
name = "com_gitlab_nyarla_go_crypt",
importpath = "gitlab.com/nyarla/go-crypt",
sum = "h1:7gd+rd8P3bqcn/96gOZa3F5dpJr/vEiDQYlNb/y2uNs=",
version = "v0.0.0-20160106005555-d9a5dc2b789b",
)
go_repository(
name = "com_google_cloud_go",
importpath = "cloud.google.com/go",
sum = "h1:PvKAVQWCtlGUSlZkGW3QLelKaWq7KYv/MW1EboG8bfM=",
version = "v0.51.0",
)
go_repository(
name = "com_google_cloud_go_bigquery",
importpath = "cloud.google.com/go/bigquery",
sum = "h1:sAbMqjY1PEQKZBWfbu6Y6bsupJ9c4QdHnzg/VvYTLcE=",
version = "v1.3.0",
)
go_repository(
name = "com_google_cloud_go_datastore",
importpath = "cloud.google.com/go/datastore",
sum = "h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM=",
version = "v1.0.0",
)
go_repository(
name = "com_google_cloud_go_pubsub",
importpath = "cloud.google.com/go/pubsub",
sum = "h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8=",
version = "v1.0.1",
)
go_repository(
name = "com_google_cloud_go_storage",
importpath = "cloud.google.com/go/storage",
sum = "h1:2Ze/3nQD5F+HfL0xOPM2EeawDWs+NPRtzgcre+17iZU=",
version = "v1.3.0",
)
go_repository(
name = "com_shuralyov_dmitri_gpu_mtl",
importpath = "dmitri.shuralyov.com/gpu/mtl",
sum = "h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY=",
version = "v0.0.0-20190408044501-666a987793e9",
)
go_repository(
name = "in_gopkg_airbrake_gobrake_v2",
importpath = "gopkg.in/airbrake/gobrake.v2",
sum = "h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=",
version = "v2.0.9",
)
go_repository(
name = "in_gopkg_alecthomas_kingpin_v2",
importpath = "gopkg.in/alecthomas/kingpin.v2",
sum = "h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=",
version = "v2.2.6",
)
go_repository(
name = "in_gopkg_check_v1",
importpath = "gopkg.in/check.v1",
sum = "h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=",
version = "v1.0.0-20190902080502-41f04d3bba15",
)
go_repository(
name = "in_gopkg_cheggaaa_pb_v1",
importpath = "gopkg.in/cheggaaa/pb.v1",
sum = "h1:Ev7yu1/f6+d+b3pi5vPdRPc6nNtP1umSfcWiEfRqv6I=",
version = "v1.0.25",
)
go_repository(
name = "in_gopkg_errgo_v2",
importpath = "gopkg.in/errgo.v2",
sum = "h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=",
version = "v2.1.0",
)
go_repository(
name = "in_gopkg_fsnotify_fsnotify_v1",
importpath = "gopkg.in/fsnotify/fsnotify.v1",
sum = "h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBlQbo=",
version = "v1.4.7",
)
go_repository(
name = "in_gopkg_fsnotify_v1",
importpath = "gopkg.in/fsnotify.v1",
sum = "h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=",
version = "v1.4.7",
)
go_repository(
name = "in_gopkg_gemnasium_logrus_airbrake_hook_v2",
importpath = "gopkg.in/gemnasium/logrus-airbrake-hook.v2",
sum = "h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0=",
version = "v2.1.2",
)
go_repository(
name = "in_gopkg_gorp_v1",
importpath = "gopkg.in/gorp.v1",
sum = "h1:j3DWlAyGVv8whO7AcIWznQ2Yj7yJkn34B8s63GViAAw=",
version = "v1.7.2",
)
go_repository(
name = "in_gopkg_imdario_mergo_v0",
importpath = "gopkg.in/imdario/mergo.v0",
sum = "h1:QDotlIZtaO/p+Um0ok18HRTpq5i5/SAk/qprsor+9c8=",
version = "v0.3.7",
)
go_repository(
name = "in_gopkg_inf_v0",
importpath = "gopkg.in/inf.v0",
sum = "h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=",
version = "v0.9.1",
)
go_repository(
name = "in_gopkg_ini_v1",
importpath = "gopkg.in/ini.v1",
sum = "h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=",
version = "v1.51.0",
)
go_repository(
name = "in_gopkg_natefinch_lumberjack_v2",
importpath = "gopkg.in/natefinch/lumberjack.v2",
sum = "h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=",
version = "v2.0.0",
)
go_repository(
name = "in_gopkg_op_go_logging_v1",
importpath = "gopkg.in/op/go-logging.v1",
sum = "h1:6D+BvnJ/j6e222UW8s2qTSe3wGBtvo0MbVQG/c5k8RE=",
version = "v1.0.0-20160211212156-b2cb9fa56473",
)
go_repository(
name = "in_gopkg_resty_v1",
importpath = "gopkg.in/resty.v1",
sum = "h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI=",
version = "v1.12.0",
)
go_repository(
name = "in_gopkg_square_go_jose_v2",
importpath = "gopkg.in/square/go-jose.v2",
sum = "h1:orlkJ3myw8CN1nVQHBFfloD+L3egixIa4FvUP6RosSA=",
version = "v2.2.2",
)
go_repository(
name = "in_gopkg_tomb_v1",
importpath = "gopkg.in/tomb.v1",
sum = "h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=",
version = "v1.0.0-20141024135613-dd632973f1e7",
)
go_repository(
name = "in_gopkg_yaml_v2",
importpath = "gopkg.in/yaml.v2",
sum = "h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=",
version = "v2.3.0",
)
go_repository(
name = "in_gopkg_yaml_v3",
importpath = "gopkg.in/yaml.v3",
sum = "h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=",
version = "v3.0.0-20200615113413-eeeca48fe776",
)
go_repository(
name = "io_etcd_go_bbolt",
importpath = "go.etcd.io/bbolt",
sum = "h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=",
version = "v1.3.5",
)
go_repository(
name = "io_etcd_go_etcd",
importpath = "go.etcd.io/etcd",
sum = "h1:Gqga3zA9tdAcfqobUGjSoCob5L3f8Dt5EuOp3ihNZko=",
version = "v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5",
)
go_repository(
name = "io_k8s_api",
importpath = "k8s.io/api",
sum = "h1:QemsSLlTqf1zsGMvtEuJ6C0SQxtkGLYro6Zwo6Gy+po=",
version = "v0.19.13",
)
go_repository(
name = "io_k8s_apiextensions_apiserver",
importpath = "k8s.io/apiextensions-apiserver",
sum = "h1:uc8Hh/JgXI3QndiofOicTcBO9LqfA4l9OwYNUlh4yTY=",
version = "v0.19.13",
)
go_repository(
name = "io_k8s_apimachinery",
importpath = "k8s.io/apimachinery",
sum = "h1:7XK494E/orAwJAhk4wvnHl42eMa/jQddM7TjFO7zQaI=",
version = "v0.19.13",
)
go_repository(
name = "io_k8s_apiserver",
importpath = "k8s.io/apiserver",
sum = "h1:xT2n9FKVOVWyPbGy3t+BjUNdTw05Qz621IRHeSjKtYc=",
version = "v0.19.13",
)
go_repository(
name = "io_k8s_autoscaler",
importpath = "k8s.io/autoscaler",
| |
y, tol=2, rel=2))
self.assertTrue(approx_equal(inf, inf, tol=2, rel=2))
self.assertTrue(approx_equal(-inf, -inf, tol=2, rel=2))
class ApproxFloatTest(unittest.TestCase):
# Test the approx_equal function with floats.
def testExactlyEqual(self):
# Test that equal values are equal and unequal values are unequal.
values = [-23.0, 0.0, 1.3e-15, 3.37, 1.7e9, 4.7e15]
for x in values:
self.assertTrue(
approx_equal(x, x, tol=0, rel=0),
'equality failure for x=%r' % x
)
self.assertFalse(
approx_equal(x, x+1, tol=0, rel=0),
'inequality failure for x=%r' % x
)
def testAbsolute(self):
# Test approximate equality with an absolute error.
self.assertTrue(approx_equal(4.57, 4.54, tol=0.5, rel=0))
self.assertTrue(approx_equal(4.57, 4.52, tol=0.5, rel=0))
self.assertTrue(approx_equal(2.3e12, 2.6e12, tol=0.4e12, rel=0))
self.assertFalse(approx_equal(2.3e12, 2.6e12, tol=0.2e12, rel=0))
self.assertTrue(approx_equal(1.01e-9, 1.03e-9, tol=0.05e-9, rel=0))
self.assertTrue(approx_equal(273.5, 263.9, tol=9.7, rel=0))
self.assertFalse(approx_equal(273.5, 263.9, tol=9.0, rel=0))
def testRelative(self):
# Test approximate equality with a relative error.
self.assertTrue(approx_equal(3.5, 4.1, tol=0, rel=0.147))
self.assertFalse(approx_equal(3.5, 4.1, tol=0, rel=0.146))
self.assertTrue(approx_equal(7.2e11, 6.9e11, tol=0, rel=0.042))
self.assertFalse(approx_equal(7.2e11, 6.9e11, tol=0, rel=0.041))
def testSpecials(self):
nan = float('nan')
inf = float('inf')
for y in (nan, inf, -inf, 1.1):
self.assertFalse(approx_equal(nan, y, tol=2, rel=2))
for y in (nan, -inf, 1.1):
self.assertFalse(approx_equal(inf, y, tol=2, rel=2))
for y in (nan, inf, 1.1):
self.assertFalse(approx_equal(-inf, y, tol=2, rel=2))
for y in (nan, inf, -inf):
self.assertFalse(approx_equal(1.1, y, tol=2, rel=2))
self.assertTrue(approx_equal(inf, inf, tol=2, rel=2))
self.assertTrue(approx_equal(-inf, -inf, tol=2, rel=2))
def testZeroes(self):
nzero = math.copysign(0.0, -1)
self.assertTrue(approx_equal(nzero, 0.0, tol=1, rel=1))
self.assertTrue(approx_equal(0.0, nzero, tol=0, rel=0))
class TestNumericTestCase(unittest.TestCase):
# The formatting routine that generates the error messages is complex
# enough that it needs its own test.
# NOTE: Try not to compare to the exact error message, since
# that might change. Instead, look for substrings that should
# be present.
def test_error_msg(self):
# Test the error message generated for inexact tests.
msg = NumericTestCase._make_std_err_msg(2.5, 4.0, 0.5, 0.25, None)
self.assertIn('actual value 2.5', msg)
self.assertIn('expected 4.0', msg)
self.assertIn('tol=0.5', msg)
self.assertIn('rel=0.25', msg)
self.assertIn('absolute error = 1.5', msg)
self.assertIn('relative error = 0.375', msg)
def test_error_msg_sequence(self):
# Test the error message generated for sequence tests.
msg = NumericTestCase._make_std_err_msg(2.5, 4.0, 0.5, 0.25, 7)
self.assertIn('differ at index 7', msg)
self.assertIn('actual value 2.5', msg)
self.assertIn('expected 4.0', msg)
self.assertIn('tol=0.5', msg)
self.assertIn('rel=0.25', msg)
self.assertIn('absolute error = 1.5', msg)
self.assertIn('relative error = 0.375', msg)
def testNumericTestCaseIsTestCase(self):
# Ensure that NumericTestCase actually is a TestCase.
self.assertTrue(issubclass(NumericTestCase, unittest.TestCase))
# === Utility functions ===
def comp_var(data, p):
"""So-called 'computational formula for variance'.
FOR TESTING AND COMPARISON USE ONLY, DO NOT USE IN PRODUCTION.
This formula is numerically unstable and can be extremely inaccurate,
including returning negative results. Use this only for exact values
(ints, Fractions) or small data sets with very little rounding error.
Calculate the population variance σ2 = 1/n**2 * (n*Σ(x**2) - (Σx)**2)
>>> comp_var([1, 1, 3, 7], 0)
6.0
Calculate the sample variance s2 = 1/(n*(n-1)) * (n*Σ(x**2) - (Σx)**2)
>>> comp_var([1, 1, 3, 7], 1)
8.0
"""
n = len(data)
s1 = sum(x**2 for x in data)
s2 = sum(data)
return (n*s1 - s2**2)/(n*(n-p))
class TestCompPVariance(unittest.TestCase):
"""Test the comp_var function.
Note: any tests here should also be tested against the real variance
function(s); there's no point in confirming that the computational
formula doesn't give the right answer if we don't also test that we
can get the right answer!
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.func = lambda data: comp_var(data, 0) # Population variance.
self.data = [1, 2, 4, 5, 8]
self.expected = 6.0
def test_variance(self):
self.assertEqual(self.func(self.data), self.expected)
def shifted_data(self):
return [x+1e12 for x in self.data]*100
def test_shifted_variance(self):
# We expect the computational formula to be numerically unstable;
# if it isn't, we want to know about it!
data = self.shifted_data()
variance = self.func(data)
self.assertTrue(variance < -1e-9) # Impossible value!
class TestCompVariance(TestCompPVariance):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.func = lambda data: comp_var(data, 1) # Sample variance.
self.expected = 7.5
# === Test metadata, exceptions and module globals ===
class MetadataTest(unittest.TestCase):
expected_metadata = [
"__version__", "__date__", "__author__", "__author_email__",
"__doc__", "__all__",
]
module = calcstats
def testCheckAll(self):
# Check everything in __all__ exists.
module = self.module
for name in module.__all__:
# No private names in __all__:
self.assertFalse(name.startswith("_"),
'private name "%s" in __all__' % name)
# And anything in __all__ must exist:
self.assertTrue(hasattr(module, name),
'missing name "%s" in __all__' % name)
def testMeta(self):
# Test for the existence of metadata.
module = self.module
for meta in self.expected_metadata:
self.assertTrue(hasattr(module, meta), "%s not present" % meta)
class StatsErrorTest(unittest.TestCase):
def testHasException(self):
self.assertTrue(hasattr(calcstats, 'StatsError'))
self.assertTrue(issubclass(calcstats.StatsError, ValueError))
# === Test the utility functions ===
class CoroutineTest(unittest.TestCase):
def testDecorator(self):
@calcstats.coroutine
def co():
x = (yield None)
y = (yield 42)
f = co()
self.assertEqual(f.send(1), 42)
class AddPartialTest(unittest.TestCase):
def testInplace(self):
# Test that add_partial modifies list in place and returns None.
L = []
result = calcstats.add_partial(L, 1.5)
self.assertEqual(L, [1.5])
self.assertTrue(result is None)
def testAddInts(self):
# Test that add_partial adds ints.
ap = calcstats.add_partial
L = []
ap(L, 1)
ap(L, 2)
self.assertEqual(sum(L), 3)
ap(L, 1000)
x = sum(L)
self.assertEqual(x, 1003)
self.assertTrue(isinstance(x, int))
def testAddFloats(self):
# Test that add_partial adds floats.
ap = calcstats.add_partial
L = []
ap(L, 1.5)
ap(L, 2.5)
self.assertEqual(sum(L), 4.0)
ap(L, 1e120)
ap(L, 1e-120)
ap(L, 0.5)
self.assertEqual(sum(L), 1e120)
ap(L, -1e120)
self.assertEqual(sum(L), 4.5)
ap(L, -4.5)
self.assertEqual(sum(L), 1e-120)
def testAddFracs(self):
# Test that add_partial adds Fractions.
ap = calcstats.add_partial
L = []
ap(L, Fraction(1, 4))
ap(L, Fraction(2, 3))
self.assertEqual(sum(L), Fraction(11, 12))
ap(L, Fraction(42, 23))
x = sum(L)
self.assertEqual(x, Fraction(757, 276))
self.assertTrue(isinstance(x, Fraction))
def testAddDec(self):
# Test that add_partial adds Decimals.
ap = calcstats.add_partial
L = []
ap(L, Decimal('1.23456'))
ap(L, Decimal('6.78901'))
self.assertEqual(sum(L), Decimal('8.02357'))
ap(L, Decimal('1e200'))
ap(L, Decimal('1e-200'))
self.assertEqual(sum(L), Decimal('1e200'))
ap(L, Decimal('-1e200'))
self.assertEqual(sum(L), Decimal('8.02357'))
ap(L, Decimal('-8.02357'))
x = sum(L)
self.assertEqual(x, Decimal('1e-200'))
self.assertTrue(isinstance(x, Decimal))
def testAddFloatSubclass(self):
# Test that add_partial adds float subclass.
class MyFloat(float):
def __add__(self, other):
return MyFloat(super().__add__(other))
__radd__ = __add__
ap = calcstats.add_partial
L = []
ap(L, MyFloat(1.25))
ap(L, MyFloat(1e-170))
ap(L, MyFloat(1e200))
self.assertEqual(sum(L), 1e200)
ap(L, MyFloat(5e199))
ap(L, MyFloat(-1.0))
ap(L, MyFloat(-2e200))
ap(L, MyFloat(5e199))
self.assertEqual(sum(L), 0.25)
ap(L, MyFloat(-0.25))
x = sum(L)
self.assertEqual(x, 1e-170)
self.assertTrue(isinstance(x, MyFloat))
# === Test sums ===
class TestConsumerMixin:
def testIsConsumer(self):
# Test that the function is a consumer.
cr = self.func()
self.assertTrue(hasattr(cr, 'send'))
class RunningSumTest(unittest.TestCase, TestConsumerMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.func = calcstats.running_sum
def testSum(self):
cr = self.func()
data = [3, 5, 0, -2, 0.5, 2.75]
expected = [3, 8, 8, 6, 6.5, 9.25]
assert len(data)==len(expected)
for x, y in zip(data, expected):
self.assertEqual(cr.send(x), y)
def testSumStart(self):
start = 3.5
cr = self.func(start)
data = [2, 5.5, -4, 0, 0.25, 1.25]
expected = [2, 7.5, 3.5, 3.5, 3.75, 5.0]
assert len(data)==len(expected)
for x, y in zip(data, expected):
self.assertEqual(cr.send(x), start+y)
def testSumTortureTest(self):
cr = self.func()
for i in range(100):
self.assertEqual(cr.send(1), 2*i+1)
self.assertEqual(cr.send(1e100), 1e100)
self.assertEqual(cr.send(1), 1e100)
self.assertEqual(cr.send(-1e100), 2*i+2)
def testFractions(self):
F = Fraction
data = [F(3, 5), 2, F(1, 4), F(1, 3), F(3, 2)]
expected = [F(3, 5), F(13, 5), F(57, 20), F(191, 60), F(281, 60)]
assert len(data)==len(expected)
start = F(1, 2)
rs = self.func(start)
for f, y in zip(data, expected):
x = rs.send(f)
self.assertEqual(x, start+y)
self.assertTrue(isinstance(x, Fraction))
def testDecimals(self):
D = Decimal
data = [D('0.2'), 3, -D('1.3'), D('2.7'), D('3.2')]
expected = [D('0.2'), D('3.2'), D('1.9'), D('4.6'), D('7.8')]
assert len(data)==len(expected)
start = D('1.555')
rs = self.func(start)
for d, y in zip(data, expected):
x = rs.send(d)
self.assertEqual(x, start+y)
self.assertTrue(isinstance(x, Decimal))
class UnivariateMixin:
# Common tests for most univariate functions that take a data argument.
#
# This tests the behaviour of functions of the form func(data [,...])
# without checking the specific value returned. Testing that the return
# value is actually correct is not the responsibility of this class.
def testNoArgs(self):
# Expect no arguments to raise an exception.
self.assertRaises(TypeError, self.func)
def testEmptyData(self):
# Expect no data points to raise an exception.
for empty in ([], (), iter([])):
self.assertRaises(ValueError, self.func, empty)
def testSingleData(self):
# Pass if a single data point doesn't raise an exception.
for data in ([1], [3.3], [1e23]):
assert len(data) == 1
_ = self.func(data)
def testDoubleData(self):
# Pass if two data points doesn't raise an exception.
for data in ([1, 3], [3.3, 5.5], [1e23, 2e23]):
assert len(data) == 2
_ = self.func(data)
def testTripleData(self):
# Pass if three data points doesn't raise an exception.
for data in ([1, 3, 4], | |
<filename>lights/lights/lights.py
# Lights Controller and light patterns
# <NAME>, 22 October 2016
# Copyright Notice
import logging
import os
import random
import signal
import time
import RPi.GPIO as GPIO
import Adafruit_WS2801
import Adafruit_GPIO.SPI as SPI
from multiprocessing import Process
import config
# list of the known patterns
patterns = [
'chase_up',
'chase_down',
'fill_up',
'fill_down',
'fill_up_and_down',
'fill_up_chase_up',
'alternating',
'random_sets',
'random_on_off',
'appear_from_back',
'fade_in_out',
'rainbow_colors',
'rainbow_cycle',
]
# list of the lights to use in patterns
pattern_lights = range(0, config.pixel_count)
SPI_PORT = 0
SPI_DEVICE = 0
class LightsController(object):
"""Contains functions for controlling the lights"""
# the process for running pattern functions
process = None
# the Adafruit WS2801 pixels object
pixels = None
@staticmethod
def setup():
"""Setup the interfaces"""
LightsController.pixels = Adafruit_WS2801.WS2801Pixels(config.pixel_count, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE), gpio=GPIO)
LightsController.pixels.clear()
LightsController.pixels.show()
@staticmethod
def off(lights=None, stop_existing=True):
"""Turn off all lights in the given list
Default all
Set stop_existing to False to skip stopping the existing process
"""
if stop_existing:
LightsController.stop_existing_process()
if lights is None:
LightsController.pixels.clear()
else:
for i in lights:
LightsController.pixels.set_pixel_rgb(i, 0, 0, 0)
LightsController.pixels.show()
@staticmethod
def on(lights=None, stop_existing=True):
"""Turn on all lights in the given list
Default all
Set stop_existing to False to skip stopping the existing process
"""
if stop_existing:
LightsController.stop_existing_process()
if lights is None:
LightsController.set_color()
else:
for i in lights:
if config.is_rbg:
LightsController.pixels.set_pixel_rgb(i, config.color['r'], config.color['b'], config.color['g'])
else:
LightsController.pixels.set_pixel_rgb(i, config.color['r'], config.color['g'], config.color['b'])
LightsController.pixels.show()
@staticmethod
def set_color(light=None, show=True):
"""Set a specific led or all leds to the current color
Set show to False to skip calling pixels.show()
"""
if config.is_rbg:
if light is None:
LightsController.pixels.set_pixels_rgb(config.color['r'], config.color['b'], config.color['g'])
else:
LightsController.pixels.set_pixel_rgb(light, config.color['r'], config.color['b'], config.color['g'])
else:
if light is None:
LightsController.pixels.set_pixels_rgb(config.color['r'], config.color['g'], config.color['b'])
else:
LightsController.pixels.set_pixel_rgb(light, config.color['r'], config.color['g'], config.color['b'])
if show:
LightsController.pixels.show()
@staticmethod
def get_random_color():
"""Gets a random color - RGB values"""
color = {
'r': random.randint(0, 255),
'g': random.randint(0, 255),
'b': random.randint(0, 255)
}
return color
@staticmethod
def brightness_decrease(wait=0.01, step=1):
"""Decrease the brightness until black"""
for j in range(int(256 // step)):
for i in range(LightsController.pixels.count()):
r, g, b = LightsController.pixels.get_pixel_rgb(i)
r = int(max(0, r - step))
g = int(max(0, g - step))
b = int(max(0, b - step))
# we don't need to check the is_rbg flag here because this decreases from the current values
LightsController.pixels.set_pixel_rgb(i, r, g, b)
LightsController.pixels.show()
# if we have reached black, then we are done
if r == 0 and g == 0 and b == 0:
break
if wait > 0:
time.sleep(wait)
@staticmethod
def brightness_increase(wait=0.01, step=1):
"""Increase the brightness until full"""
for j in range(int(256 // step)):
for i in range(LightsController.pixels.count()):
r = int(min(j, config.color['r']))
g = int(min(j, config.color['g']))
b = int(min(j, config.color['b']))
if config.is_rbg:
LightsController.pixels.set_pixel_rgb(i, r, b, g)
else:
LightsController.pixels.set_pixel_rgb(i, r, g, b)
LightsController.pixels.show()
# if we have reached the full color, then we are done
if r == config.color['r'] and g == config.color['g'] and b == config.color['b']:
break
if wait > 0:
time.sleep(wait)
@staticmethod
def wheel(pos):
"""The wheel function to interpolate between different hues"""
if pos < 85:
return Adafruit_WS2801.RGB_to_color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
return Adafruit_WS2801.RGB_to_color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
return Adafruit_WS2801.RGB_to_color(0, pos * 3, 255 - pos * 3)
@staticmethod
def start_pattern(pattern='fill_up', delay=0.1, pause=0.5, rounds=0):
"""Start a running pattern. This is done by forking a new process to
run the pattern so that it will run indefinitely.
There can only be one forked process at a time.
"""
LightsController.stop_existing_process()
LightsController.process = Process(target=globals()[pattern], args=(delay, pause, rounds))
LightsController.process.start()
@staticmethod
def stop_existing_process():
"""Stop an existing process if there is one
Running through uwsgi it seems that SIGKILL is necessary as
.terminate() did not work. It seems the process ignores the SIGTERM signal.
"""
if LightsController.process is not None and LightsController.process.is_alive():
#LightsController.process.terminate()
try:
os.kill(LightsController.process.pid, signal.SIGKILL)
# wait for the process in order to reap it (avoids having defunct zombie processes leftover)
os.waitpid(LightsController.process.pid, 0)
except OSError:
pass
except Exception as e:
logging.exception('Error in stop_existing_process')
# Define all light patterns
# Each pattern function should take three parameters:
# delay - lengh of the delay used in the pattern
# pause - length of the pause used in the pattern (i.e. in between rounds)
# rounds - the number of rounds to do before finishing
#
# A pattern should run indefinitely if the number of rounds is 0.
def all_random(delay=0, pause=0.5, rounds=0):
LightsController.off(stop_existing=False)
done = False
current_round = rounds
try:
while not done:
# get random pattern
pattern = random.sample(patterns, 1)[0]
# get random values
pattern_delay = random.uniform(0.005, 0.05)
pattern_pause = random.uniform(0.5, 5)
pattern_rounds = random.randint(3, 6)
logging.debug("Doing %s delay %s pause %s rounds %d", pattern, str(pattern_delay), str(pattern_pause), pattern_rounds)
globals()[pattern](pattern_delay, pattern_pause, pattern_rounds)
time.sleep(delay)
if rounds > 0:
current_round -= 1
if current_round <= 0:
done = True
except Exception as e:
logging.exception('Error in all_random')
def chase_up(delay=0.1, pause=0.5, rounds=0):
LightsController.off(stop_existing=False)
done = False
current_round = rounds
while not done:
for light in pattern_lights:
LightsController.on([light], stop_existing=False)
time.sleep(delay)
LightsController.off([light], stop_existing=False)
time.sleep(pause)
if rounds > 0:
current_round -= 1
if current_round <= 0:
done = True
def chase_down(delay=0.1, pause=0.5, rounds=0):
LightsController.off(stop_existing=False)
done = False
current_round = rounds
while not done:
for light in pattern_lights[::-1]:
LightsController.on([light], stop_existing=False)
time.sleep(delay)
LightsController.off([light], stop_existing=False)
time.sleep(pause)
if rounds > 0:
current_round -= 1
if current_round <= 0:
done = True
def fill_up(delay=0.1, pause=0.5, rounds=0):
LightsController.off(stop_existing=False)
done = False
current_round = rounds
while not done:
for light in pattern_lights:
LightsController.on([light], stop_existing=False)
time.sleep(delay)
time.sleep(pause)
LightsController.off(stop_existing=False)
time.sleep(delay)
if rounds > 0:
current_round -= 1
if current_round <= 0:
done = True
def fill_down(delay=0.1, pause=0.5, rounds=0):
LightsController.off(stop_existing=False)
done = False
current_round = rounds
while not done:
for light in pattern_lights[::-1]:
LightsController.on([light], stop_existing=False)
time.sleep(delay)
time.sleep(pause)
LightsController.off(stop_existing=False)
time.sleep(delay)
if rounds > 0:
current_round -= 1
if current_round <= 0:
done = True
def fill_up_and_down(delay=0.1, pause=0.5, rounds=0):
LightsController.off(stop_existing=False)
done = False
current_round = rounds
while not done:
for light in pattern_lights:
LightsController.on([light], stop_existing=False)
time.sleep(delay)
time.sleep(pause)
for light in pattern_lights[::-1]:
LightsController.off([light], stop_existing=False)
time.sleep(delay)
time.sleep(pause)
if rounds > 0:
current_round -= 1
if current_round <= 0:
done = True
def fill_up_chase_up(delay=0.1, pause=0.5, rounds=0):
LightsController.off(stop_existing=False)
done = False
current_round = rounds
while not done:
for light in pattern_lights:
LightsController.on([light], stop_existing=False)
time.sleep(delay)
for light in pattern_lights:
LightsController.off([light], stop_existing=False)
time.sleep(delay)
time.sleep(pause)
LightsController.off(stop_existing=False)
time.sleep(delay)
if rounds > 0:
current_round -= 1
if current_round <= 0:
done = True
def alternating(delay=0.1, pause=0.5, rounds=0):
LightsController.off(stop_existing=False)
done = False
current_round = rounds
group1 = [i for i in pattern_lights if i % 2]
group2 = [i for i in pattern_lights if i % 2 == 0]
while not done:
LightsController.off(group2, stop_existing=False)
time.sleep(delay)
LightsController.on(group1, stop_existing=False)
time.sleep(pause)
LightsController.on(group2, stop_existing=False)
time.sleep(delay)
LightsController.off(group1, stop_existing=False)
time.sleep(pause)
if rounds > 0:
current_round -= 1
if current_round <= 0:
done = True
def random_sets(delay=0.1, pause=0.5, rounds=0):
LightsController.off(stop_existing=False)
done = False
current_round = rounds
while not done:
# get a random half from the available lights
lights = random.sample(pattern_lights, config.pixel_count // 2)
LightsController.on(lights, stop_existing=False)
time.sleep(pause)
LightsController.off(lights, stop_existing=False)
if rounds > 0:
current_round -= 1
if current_round <= 0:
done = True
def random_on_off(delay=0.1, pause=0.5, rounds=0):
LightsController.off(stop_existing=False)
done = False
current_round = rounds
lights = list(pattern_lights)
while not done:
random.shuffle(lights)
for light in lights:
LightsController.on([light], stop_existing=False)
time.sleep(delay)
time.sleep(pause)
random.shuffle(lights)
for light in lights:
LightsController.off([light], stop_existing=False)
time.sleep(delay)
time.sleep(pause)
if rounds > 0:
current_round -= 1
if current_round <= 0:
done = True
def appear_from_back(delay=0.1, pause=0.5, rounds=0):
LightsController.off(stop_existing=False)
done = False
current_round = rounds
while not done:
# in order to speed up this pattern in goes in "blocks" of 10
for i in range(0, config.pixel_count, 10):
for j in reversed(range(i, config.pixel_count)):
LightsController.pixels.clear()
# first set all pixels at the beginning
for k in range(i):
LightsController.set_color(k, show=False)
# set the pixel at position j and the 9 preceeding pixels
for m in range(max(j-9, 0), j+1):
LightsController.set_color(m, show=False)
LightsController.pixels.show()
time.sleep(delay)
time.sleep(pause)
if rounds > 0:
current_round -= 1
if current_round <= 0:
done = True
def fade_in_out(delay=0.1, pause=0.5, rounds=0):
LightsController.off(stop_existing=False)
done = False
current_round = rounds
while not done:
LightsController.brightness_increase(wait=delay)
time.sleep(pause)
LightsController.brightness_decrease(wait=delay)
time.sleep(pause)
if rounds > 0:
current_round -= 1
if current_round <= 0:
done = True
def rainbow_cycle(delay=0.005, pause=0.5, rounds=0):
LightsController.off(stop_existing=False)
done | |
Why: #3599 in Alexa global
'http://www.autodesk.com/',
# Why: #3600 in Alexa global
'http://www.vertitechnologygroup.com/',
# Why: #3601 in Alexa global
'http://www.leaseweb.com/',
# Why: #3602 in Alexa global
'http://www.yoox.com/',
# Why: #3603 in Alexa global
'http://www.papajohns.com/',
# Why: #3604 in Alexa global
'http://www.unmillondeutilidades.com/',
# Why: #3605 in Alexa global
'http://www.webmasters.ru/',
# Why: #3606 in Alexa global
'http://www.seoclerks.com/',
# Why: #3607 in Alexa global
'http://www.yootheme.com/',
# Why: #3608 in Alexa global
'http://www.google.com.py/',
# Why: #3609 in Alexa global
'http://www.beemp3.com/',
# Why: #3610 in Alexa global
'http://www.yepme.com/',
# Why: #3611 in Alexa global
'http://www.alef.ir/',
# Why: #3613 in Alexa global
'http://www.gotowebinar.com/',
# Why: #3614 in Alexa global
'http://www.onec.dz/',
# Why: #3615 in Alexa global
'http://www.bonprix.de/',
# Why: #3616 in Alexa global
'http://www.landsend.com/',
# Why: #3617 in Alexa global
'http://www.libertatea.ro/',
# Why: #3618 in Alexa global
'http://www.timeout.com/',
# Why: #3619 in Alexa global
'http://www.appnexus.com/',
# Why: #3620 in Alexa global
'http://www.uproxx.com/',
# Why: #3622 in Alexa global
'http://www.alohatube.com/',
# Why: #3623 in Alexa global
'http://www.citilink.ru/',
# Why: #3624 in Alexa global
'http://www.askubuntu.com/',
# Why: #3625 in Alexa global
'http://www.freemake.com/',
# Why: #3626 in Alexa global
'http://www.rockettheme.com/',
# Why: #3627 in Alexa global
'http://www.tupaki.com/',
# Why: #3628 in Alexa global
'http://www.53.com/',
# Why: #3629 in Alexa global
'http://www.tune.pk/',
# Why: #3630 in Alexa global
'http://www.standardchartered.com/',
# Why: #3631 in Alexa global
'http://www.video-i365.com/',
# Why: #3632 in Alexa global
'http://www.knowyourmeme.com/',
# Why: #3633 in Alexa global
'http://www.gofeminin.de/',
# Why: #3634 in Alexa global
'http://www.vmware.com/',
# Why: #3635 in Alexa global
'http://www.vbox7.com/',
# Why: #3636 in Alexa global
'http://www.webfail.com/',
# Why: #3637 in Alexa global
'http://www.onewebsearch.com/',
# Why: #3638 in Alexa global
'http://www.xnxxmovies.com/',
# Why: #3639 in Alexa global
'http://www.blogspot.hk/',
# Why: #3640 in Alexa global
'http://www.hgtv.com/',
# Why: #3641 in Alexa global
'http://www.findagrave.com/',
# Why: #3642 in Alexa global
'http://www.yoast.com/',
# Why: #3643 in Alexa global
'http://www.audiopoisk.com/',
# Why: #3644 in Alexa global
'http://www.sexytube.me/',
# Why: #3645 in Alexa global
'http://www.centerblog.net/',
# Why: #3646 in Alexa global
'http://www.webpronews.com/',
# Why: #3647 in Alexa global
'http://www.prnewswire.com/',
# Why: #3648 in Alexa global
'http://www.vietnamnet.vn/',
# Why: #3649 in Alexa global
'http://www.groupon.co.in/',
# Why: #3650 in Alexa global
'http://www.bom.gov.au/',
# Why: #3651 in Alexa global
'http://www.loxblog.com/',
# Why: #3652 in Alexa global
'http://www.llnw.com/',
# Why: #3653 in Alexa global
'http://www.jcrew.com/',
# Why: #3654 in Alexa global
'http://www.carsensor.net/',
# Why: #3655 in Alexa global
'http://www.aukro.cz/',
# Why: #3656 in Alexa global
'http://www.zoomby.ru/',
# Why: #3657 in Alexa global
'http://www.wallstcheatsheet.com/',
# Why: #3658 in Alexa global
'http://www.17k.com/',
# Why: #3659 in Alexa global
'http://www.secondlife.com/',
# Why: #3660 in Alexa global
'http://www.marmiton.org/',
# Why: #3661 in Alexa global
'http://www.zorpia.com/',
# Why: #3662 in Alexa global
'http://www.searchya.com/',
# Why: #3663 in Alexa global
'http://www.rtl2.de/',
# Why: #3664 in Alexa global
'http://www.wiocha.pl/',
# Why: #3665 in Alexa global
'http://www.28tui.com/',
# Why: #3666 in Alexa global
'http://www.shopzilla.com/',
# Why: #3667 in Alexa global
'http://www.google.com.ni/',
# Why: #3668 in Alexa global
'http://www.lycos.com/',
# Why: #3669 in Alexa global
'http://www.gucheng.com/',
# Why: #3670 in Alexa global
'http://www.rajanews.com/',
# Why: #3671 in Alexa global
'http://www.blackhatteam.com/',
# Why: #3672 in Alexa global
'http://www.mp3.es/',
# Why: #3673 in Alexa global
'http://www.forums.wordpress.com/',
# Why: #3674 in Alexa global
'http://www.micromaxinfo.com/',
# Why: #3675 in Alexa global
'http://www.sub.jp/',
# Why: #3676 in Alexa global
'http://www.duden.de/',
# Why: #3677 in Alexa global
'http://www.nyc.gov/',
# Why: #3679 in Alexa global
'http://www.monova.org/',
# Why: #3680 in Alexa global
'http://www.al-wlid.com/',
# Why: #3681 in Alexa global
'http://www.dastelefonbuch.de/',
# Why: #3682 in Alexa global
'http://www.cam4ultimate.com/',
# Why: #3683 in Alexa global
'http://www.inps.it/',
# Why: #3684 in Alexa global
'http://www.nazwa.pl/',
# Why: #3685 in Alexa global
'http://www.beatport.com/',
# Why: #3686 in Alexa global
'http://www.wizzair.com/',
# Why: #3687 in Alexa global
'http://www.thomann.de/',
# Why: #3688 in Alexa global
'http://www.juntadeandalucia.es/',
# Why: #3689 in Alexa global
'http://www.oficialsurveyscenter.co/',
# Why: #3690 in Alexa global
'http://www.zaluu.com/',
# Why: #3691 in Alexa global
'http://www.videarn.com/',
# Why: #3692 in Alexa global
'http://www.azcentral.com/',
# Why: #3693 in Alexa global
'http://www.xvideosmovie.com/',
# Why: #3694 in Alexa global
'http://www.eforosh.com/',
# Why: #3696 in Alexa global
'http://www.movie25.com/',
# Why: #3697 in Alexa global
'http://www.creditkarma.com/',
# Why: #3698 in Alexa global
'http://upi.com/',
# Why: #3699 in Alexa global
'http://www.mozook.com/',
# Why: #3700 in Alexa global
'http://www.heavy.com/',
# Why: #3701 in Alexa global
'http://www.worldoftanks.com/',
# Why: #3702 in Alexa global
'http://www.vkrugudruzei.ru/',
# Why: #3704 in Alexa global
'http://www.hourlyrevshare.net/',
# Why: #3705 in Alexa global
'http://www.walkerplus.com/',
# Why: #3706 in Alexa global
'http://www.btyou.com/',
# Why: #3707 in Alexa global
'http://www.adzibiz.com/',
# Why: #3708 in Alexa global
'http://www.tryflirting.com/',
# Why: #3709 in Alexa global
'http://www.moi.gov.sa/',
# Why: #3710 in Alexa global
'http://www.cooltext.com/',
# Why: #3711 in Alexa global
'http://www.dawanda.com/',
# Why: #3712 in Alexa global
'http://www.travian.com.sa/',
# Why: #3713 in Alexa global
'http://www.va.gov/',
# Why: #3714 in Alexa global
'http://www.sunmaker.com/',
# Why: #3715 in Alexa global
'http://www.aaa.com/',
# Why: #3716 in Alexa global
'http://www.dinodirect.com/',
# Why: #3717 in Alexa global
'http://www.cima4u.com/',
# Why: #3718 in Alexa global
'http://www.huaban.com/',
# Why: #3719 in Alexa global
'http://www.nzherald.co.nz/',
# Why: #3720 in Alexa global
'http://www.plotek.pl/',
# Why: #3722 in Alexa global
'http://www.chow.com/',
# Why: #3723 in Alexa global
'http://www.rincondelvago.com/',
# Why: #3724 in Alexa global
'http://uzai.com/',
# Why: #3725 in Alexa global
'http://www.dbw.cn/',
# Why: #3727 in Alexa global
'http://www.stayfriends.de/',
# Why: #3728 in Alexa global
'http://www.reed.co.uk/',
# Why: #3729 in Alexa global
'http://www.rainpow.com/',
# Why: #3730 in Alexa global
'http://www.dallasnews.com/',
# Why: #3731 in Alexa global
'http://www.ntvspor.net/',
# Why: #3732 in Alexa global
'http://www.fonearena.com/',
# Why: #3733 in Alexa global
'http://www.forocoches.com/',
# Why: #3734 in Alexa global
'http://www.myfonts.com/',
# Why: #3735 in Alexa global
'http://www.fenopy.se/',
# Why: #3736 in Alexa global
'http://www.animefreak.tv/',
# Why: #3737 in Alexa global
'http://www.websitewelcome.com/',
# Why: #3738 in Alexa global
'http://www.indonetwork.co.id/',
# Why: #3739 in Alexa global
'http://www.mapsofindia.com/',
# Why: #3740 in Alexa global
'http://www.newlook.com/',
# Why: #3741 in Alexa global
'http://www.holiday-weather.com/',
# Why: #3742 in Alexa global
'http://zhe800.com/',
# Why: #3743 in Alexa global
'http://www.recipesfinder.com/',
# Why: #3744 in Alexa global
'http://www.bankrate.com.cn/',
# Why: #3745 in Alexa global
'http://www.bbom.com.br/',
# Why: #3746 in Alexa global
'http://www.dahe.cn/',
# Why: #3747 in Alexa global
'http://www.jalopnik.com/',
# Why: #3748 in Alexa global
'http://www.canon.com/',
# Why: #3750 in Alexa global
'http://www.freshbooks.com/',
# Why: #3751 in Alexa global
'http://www.clickcompare.info/',
# Why: #3752 in Alexa global
'http://www.aprod.hu/',
# Why: #3753 in Alexa global
'http://www.thisav.com/',
# Why: #3754 in Alexa global
'http://www.boerse.bz/',
# Why: #3755 in Alexa global
'http://www.orange.es/',
# Why: #3756 in Alexa global
'http://www.forobeta.com/',
# Why: #3757 in Alexa global
'http://www.surfactif.fr/',
# Why: #3758 in Alexa global
'http://www.listverse.com/',
# Why: #3759 in Alexa global
'http://www.feedjit.com/',
# Why: #3760 in Alexa global
'http://www.ntv.co.jp/',
# Why: #3761 in Alexa global
'http://www.bni.co.id/',
# Why: #3762 in Alexa global
'http://www.gamemazing.com/',
# Why: #3763 in Alexa global
'http://www.mbalib.com/',
# Why: #3764 in Alexa global
'http://www.topsy.com/',
# Why: #3765 in Alexa global
'http://www.torchbrowser.com/',
# Why: #3766 in Alexa global
'http://www.ieee.org/',
# Why: #3767 in Alexa global
'http://www.tinydeal.com/',
# Why: #3768 in Alexa global
'http://www.playdom.com/',
# Why: #3769 in Alexa global
'http://www.redorbit.com/',
# Why: #3770 in Alexa global
'http://www.inboxdollars.com/',
# Why: #3771 in Alexa global
'http://www.google.com.bh/',
# Why: #3772 in Alexa global
'http://www.pcanalysis.net/',
# Why: #3773 in Alexa global
'http://www.acer.com/',
# Why: #3774 in Alexa global
'http://www.jizzbell.com/',
# Why: #3775 in Alexa global
'http://www.google.com.kh/',
# Why: #3776 in Alexa global
'http://www.mappy.com/',
# Why: #3777 in Alexa global
'http://www.day.az/',
# Why: #3778 in Alexa global
'http://www.euronews.com/',
# Why: #3779 in Alexa global
'http://www.wikidot.com/',
# Why: #3780 in Alexa global
'http://www.creativecommons.org/',
# Why: #3781 in Alexa global
'http://www.quantcast.com/',
# Why: #3782 in Alexa global
'http://www.iconarchive.com/',
# Why: #3783 in Alexa global
'http://www.iyaya.com/',
# Why: #3784 in Alexa global
'http://www.jetstar.com/',
# Why: #3786 in Alexa global
'http://diandian.com/',
# Why: #3787 in Alexa global
'http://www.winzip.com/',
# Why: #3788 in Alexa global
'http://www.clixzor.com/',
# Why: #3789 in Alexa global
'http://www.teebik.com/',
# Why: #3790 in Alexa global
'http://meilele.com/',
| |
<reponame>gordon-quad/matrix-nio<filename>nio/store.py<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2018 Zil0
# Copyright © 2018, 2019 <NAME> <<EMAIL>>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import attr
import time
from builtins import bytes, super
from logbook import Logger
from typing import List, Optional, DefaultDict, Iterator, Dict
from datetime import datetime
from functools import wraps
from atomicwrites import atomic_write
from .exceptions import OlmTrustError
from .log import logger_group
from .crypto import (
OlmAccount,
Session,
InboundGroupSession,
OlmDevice,
SessionStore,
GroupSessionStore,
DeviceStore
)
from peewee import (
SqliteDatabase,
Model,
TextField,
BlobField,
BooleanField,
ForeignKeyField,
CompositeKey,
DoesNotExist
)
try:
FileNotFoundError # type: ignore
except NameError: # pragma: no cover
FileNotFoundError = IOError
logger = Logger("nio.cryptostore")
logger_group.add_logger(logger)
class Key(object):
def __init__(self, user_id, device_id, key):
# type: (str, str, str) -> None
self.user_id = user_id
self.device_id = device_id
self.key = key
@classmethod
def from_line(cls, line):
# type: (str) -> Optional[Key]
fields = line.split(" ")
if len(fields) < 4:
return None
user_id, device_id, key_type, key = fields[:4]
if key_type == "matrix-ed25519":
return Ed25519Key(user_id.strip(), device_id.strip(), key.strip())
else:
return None
def to_line(self):
# type: () -> str
key_type = ""
if isinstance(self, Ed25519Key):
key_type = "matrix-ed25519"
else: # pragma: no cover
raise NotImplementedError(
"Invalid key type {}".format(type(self.key))
)
line = "{} {} {} {}\n".format(
self.user_id, self.device_id, key_type, str(self.key)
)
return line
@classmethod
def from_olmdevice(cls, device):
# type: (OlmDevice) -> Ed25519Key
user_id = device.user_id
device_id = device.id
return Ed25519Key(user_id, device_id, device.ed25519)
class Ed25519Key(Key):
def __eq__(self, value):
# type: (object) -> bool
if not isinstance(value, Ed25519Key):
return NotImplemented
if (
self.user_id == value.user_id
and self.device_id == value.device_id
and self.key == value.key
):
return True
return False
class KeyStore(object):
def __init__(self, filename):
# type: (str) -> None
self._entries = [] # type: List[Key]
self._filename = filename # type: str
self._load(filename)
def __iter__(self):
# type: () -> Iterator[Key]
for entry in self._entries:
yield entry
def __repr__(self):
# type: () -> str
return "KeyStore object, file: {}".format(self._filename)
def _load(self, filename):
# type: (str) -> None
try:
with open(filename, "r") as f:
for line in f:
line = line.strip()
if not line or line.startswith("#"):
continue
entry = Key.from_line(line)
if not entry:
continue
self._entries.append(entry)
except FileNotFoundError:
pass
def get_key(self, user_id, device_id):
# type: (str, str) -> Optional[Key]
for entry in self._entries:
if user_id == entry.user_id and device_id == entry.device_id:
return entry
return None
def _save_store(f):
@wraps(f)
def decorated(*args, **kwargs):
self = args[0]
ret = f(*args, **kwargs)
self._save()
return ret
return decorated
def _save(self):
# type: () -> None
with atomic_write(self._filename, overwrite=True) as f:
for entry in self._entries:
line = entry.to_line()
f.write(line)
@_save_store
def add(self, key):
# type: (Key) -> bool
existing_key = self.get_key(key.user_id, key.device_id)
if existing_key:
if (
existing_key.user_id == key.user_id
and existing_key.device_id == key.device_id
and type(existing_key) is type(key)
):
if existing_key.key != key.key:
message = (
"Error: adding existing device to trust store "
"with mismatching fingerprint {} {}".format(
key.key, existing_key.key
)
)
logger.error(message)
raise OlmTrustError(message)
self._entries.append(key)
self._save()
return True
@_save_store
def remove(self, key):
# type: (Key) -> bool
if key in self._entries:
self._entries.remove(key)
self._save()
return True
return False
def check(self, key):
# type: (Key) -> bool
return key in self._entries
class ByteField(BlobField):
def python_value(self, value): # pragma: no cover
if isinstance(value, bytes):
return value
return bytes(value, "utf-8")
def db_value(self, value): # pragma: no cover
if isinstance(value, bytearray):
return bytes(value)
return value
# Please don't remove this.
# This is a workaround for this bug: https://bugs.python.org/issue27400
class DateField(TextField):
def python_value(self, value): # pragma: no cover
format = "%Y-%m-%d %H:%M:%S.%f"
try:
return datetime.strptime(value, format)
except TypeError:
return datetime(*(time.strptime(value, format)[0:6]))
def db_value(self, value): # pragma: no cover
return value.strftime("%Y-%m-%d %H:%M:%S.%f")
class Accounts(Model):
account = ByteField()
device_id = TextField(unique=True)
shared = BooleanField()
user_id = TextField(primary_key=True)
class Meta:
table_name = "accounts"
class DeviceKeys(Model):
curve_key = TextField()
deleted = BooleanField()
device = ForeignKeyField(
column_name="device_id",
field="device_id",
model=Accounts,
on_delete="CASCADE"
)
ed_key = TextField()
user_device_id = TextField()
user_id = TextField()
class Meta:
table_name = "device_keys"
indexes = (
(("device", "user_id", "user_device_id"), True),
)
primary_key = CompositeKey("device", "user_device_id", "user_id")
class MegolmInboundSessions(Model):
curve_key = TextField()
device = ForeignKeyField(
column_name="device_id",
field="device_id",
model=Accounts,
on_delete="CASCADE"
)
ed_key = TextField()
room_id = TextField()
session = ByteField()
session_id = TextField(primary_key=True)
class Meta:
table_name = "megolm_inbound_sessions"
class ForwardedChains(Model):
curve_key = TextField()
session = ForeignKeyField(
MegolmInboundSessions,
backref="forwarded_chains",
on_delete="CASCADE"
)
class OlmSessions(Model):
creation_time = DateField()
curve_key = TextField()
device = ForeignKeyField(
column_name="device_id",
field="device_id",
model=Accounts,
on_delete="CASCADE"
)
session = ByteField()
session_id = TextField(primary_key=True)
class Meta:
table_name = "olm_sessions"
class OutgoingKeyRequests(Model):
session_id = TextField()
device = ForeignKeyField(
Accounts,
on_delete="CASCADE",
backref="key_requests",
)
class SyncTokens(Model):
token = TextField()
device = ForeignKeyField(
model=Accounts,
primary_key=True,
on_delete="CASCADE"
)
class TrackedUsers(Model):
user_id = TextField()
device = ForeignKeyField(
Accounts,
on_delete="CASCADE"
)
def use_database(fn):
@wraps(fn)
def inner(self, *args, **kwargs):
with self.database.bind_ctx(self.models):
return fn(self, *args, **kwargs)
return inner
@attr.s
class MatrixStore(object):
"""Storage class for matrix state."""
models = [
Accounts,
OlmSessions,
MegolmInboundSessions,
ForwardedChains,
DeviceKeys,
]
user_id = attr.ib(type=str)
device_id = attr.ib(type=str)
store_path = attr.ib(type=str)
pickle_key = attr.ib(type=str, default="")
database_name = attr.ib(type=str, default="")
database_path = attr.ib(type=str, init=False)
database = attr.ib(type=SqliteDatabase, init=False)
def __attrs_post_init__(self):
self.database_name = self.database_name or "{}_{}.db".format(
self.user_id,
self.device_id
)
self.database_path = os.path.join(self.store_path, self.database_name)
self.database = SqliteDatabase(
self.database_path,
pragmas={
"foreign_keys": 1,
"secure_delete": 1,
}
)
with self.database.bind_ctx(self.models):
self.database.connect()
self.database.create_tables(self.models)
@use_database
def load_account(self):
# type: () -> Optional[OlmAccount]
"""Load the Olm account from the database.
Returns:
``OlmAccount`` object, or ``None`` if it wasn't found for the
current device_id.
"""
try:
account = Accounts.get(
Accounts.user_id == self.user_id,
Accounts.device_id == self.device_id
)
except DoesNotExist:
return None
return OlmAccount.from_pickle(
account.account,
self.pickle_key,
account.shared
)
@use_database
def save_account(self, account):
"""Save the provided Olm account to the database.
Args:
account (OlmAccount): The olm account that will be pickled and
saved in the database.
"""
Accounts.insert(
user_id=self.user_id,
device_id=self.device_id,
shared=account.shared,
account=account.pickle(self.pickle_key)
).on_conflict_ignore().execute()
Accounts.update(
{
Accounts.account: account.pickle(self.pickle_key),
Accounts.shared: account.shared
}
).where(
(Accounts.user_id == self.user_id)
& (Accounts.device_id == self.device_id)
).execute()
@use_database
def load_sessions(self):
# type: () -> SessionStore
"""Load all Olm sessions from the database.
Returns:
``SessionStore`` object, containing all the loaded sessions.
"""
session_store = SessionStore()
sessions = OlmSessions.select().join(Accounts).where(
Accounts.device_id == self.device_id
)
for s in sessions:
session = Session.from_pickle(
s.session,
s.creation_time,
self.pickle_key
)
session_store.add(s.curve_key, session)
return session_store
@use_database
def save_session(self, curve_key, session):
"""Save the provided Olm session to the database.
Args:
curve_key (str): The curve key that owns the Olm session.
session (Session): The Olm session that will be pickled and
saved in the database.
"""
OlmSessions.replace(
device=self.device_id,
curve_key=curve_key,
session=session.pickle(self.pickle_key),
session_id=session.id,
creation_time=session.creation_time
).execute()
@use_database
def load_inbound_group_sessions(self):
# type: () -> GroupSessionStore
"""Load all Olm sessions from the database.
Returns:
``GroupSessionStore`` object, containing all the loaded sessions.
"""
store = GroupSessionStore()
sessions = MegolmInboundSessions.select().join(Accounts).where(
Accounts.device_id == self.device_id
)
for s in sessions:
session = InboundGroupSession.from_pickle(
s.session,
s.ed_key,
s.curve_key,
s.room_id,
self.pickle_key,
[chain.curve_key for chain in s.forwarded_chains]
)
store.add(session)
return store
@use_database
def save_inbound_group_session(self, session):
"""Save the provided Megolm inbound group session to the database.
Args:
session (InboundGroupSession): The session to save.
"""
MegolmInboundSessions.insert(
curve_key=session.sender_key,
device=self.device_id,
ed_key=session.ed25519,
room_id=session.room_id,
session=session.pickle(self.pickle_key),
session_id=session.id
).on_conflict_ignore().execute()
MegolmInboundSessions.update(
{
MegolmInboundSessions.session: session.pickle(self.pickle_key)
}
).where(
MegolmInboundSessions.session_id == session.id
).execute()
# TODO, use replace many here
for chain in session.forwarding_chain:
ForwardedChains.replace(
curve_key=chain,
session=session.id
).execute()
@use_database
def load_device_keys(self):
# type: () -> DeviceStore
store = DeviceStore()
device_keys = DeviceKeys.select().join(Accounts).where(
Accounts.device_id == self.device_id
)
for d in device_keys:
store.add(OlmDevice(
d.user_id,
d.user_device_id,
d.ed_key,
d.curve_key,
d.deleted,
))
return store
@use_database
def save_device_keys(self, device_keys):
"""Save the provided device keys to the database.
Args:
device_keys (Dict[str, Dict[str, OlmDevice]]): A dictionary
containing a mapping from an user id to a dictionary containing
a mapping of a device id to a OlmDevice.
"""
rows = []
for user_id, devices_dict in device_keys.items():
for device_id, device in devices_dict.items():
rows.append(
{
"curve_key": device.curve25519,
"deleted": device.deleted,
"device": self.device_id,
"ed_key": device.ed25519,
"user_device_id": device_id,
"user_id": user_id,
}
)
if not rows:
return
# TODO this needs to be batched
DeviceKeys.replace_many(rows).execute()
| |
to generate.
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
#### Calculate the neutral spectrum
# phi for the equilibrium ancestral population
phiN = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
phiN = dadi.Integration.one_pop(phiN, xx, Tp, nu=nuA)
# Now do the divergence event
phiN = dadi.PhiManip.phi_1D_to_2D(xx, phiN)
# We set the population sizes after the split to nu1 and nu2 and the migration rate to zero
phiN = dadi.Integration.two_pops(phiN, xx, Ts, nu1, nu2, m12=0, m21=0)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to m12 and m21
phiN = dadi.Integration.two_pops(phiN, xx, Tsc, nu1, nu2, m12=m12, m21=m21)
###
## calculate the spectrum.
fsN = dadi.Spectrum.from_phi(phiN, (n1,n2), (xx,xx))
#### Calculate the genomic island spectrum
# phi for the equilibrium ancestral population
phiI = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
phiI = dadi.Integration.one_pop(phiI, xx, Tp, nu=nuA)
# Now do the divergence event
phiI = dadi.PhiManip.phi_1D_to_2D(xx, phiI)
# We set the population sizes after the split to nu1 and nu2 and the migration rate to zero
phiI = dadi.Integration.two_pops(phiI, xx, Ts, nu1, nu2, m12=0, m21=0)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to me12 and me21
phiI = dadi.Integration.two_pops(phiI, xx, Tsc, nu1, nu2, m12=me12, m21=me21)
###
## calculate the spectrum.
fsI = dadi.Spectrum.from_phi(phiI, (n1,n2), (xx,xx))
#### Calculate the pectrum in normally-recombining regions
# phi for the equilibrium ancestral population
phinr = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
phinr = dadi.Integration.one_pop(phinr, xx, Tp, nu=nuA)
# Now do the divergence event
phinr = dadi.PhiManip.phi_1D_to_2D(xx, phinr)
# We set the population sizes after the split and isolation to nu1 and nu2 and set the migration rates to zero
phinr = dadi.Integration.two_pops(phinr, xx, Ts, nu1, nu2, m12=0, m21=0)
# We keep the population sizes after the split and isolation to nu1 and nu2 and set the migration rates to zero
phinr = dadi.Integration.two_pops(phinr, xx, Tsc, nu1, nu2, m12=m12, m21=m21)
###
## calculate the spectrum.
fsnr = dadi.Spectrum.from_phi(phinr, (n1,n2), (xx,xx))
#### Spectrum of low-recombining regions
# phi for the equilibrium ancestral population
philr = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
philr = dadi.Integration.one_pop(philr, xx, Tp, nu=nuA)
# Now do the divergence event
philr = dadi.PhiManip.phi_1D_to_2D(xx, philr)
# We set the population sizes after the split and isolation to nu1 and nu2 and set the migration rates to zero
philr = dadi.Integration.two_pops(philr, xx, Ts, nu1*hrf, nu2*hrf, m12=0, m21=0)
# We keep the population sizes after the split and isolation to nu1 and nu2 and set the migration rate to m12 and m21
philr = dadi.Integration.two_pops(philr, xx, Tsc, nu1*hrf, nu2*hrf, m12=m12, m21=m21)
###
## calculate the spectrum.
fslr = dadi.Spectrum.from_phi(philr, (n1,n2), (xx,xx))
### Sum the spectra
fs = (Q*fslr+(1-Q)*fsnr+P*fsN+(1-P)*fsI)
return fs
def SCA2NG(params, (n1,n2), pts):
nuA, nu1, nu2, b1, b2, hrf, m12, m21, Tp, Ts, Tsc, Q = params
"""
Model of semi permeability with split, complete isolation, followed by secondary contact with 2 migration rates
nu1: Size of population 1 after split.
nu2: Size of population 2 after split.
b1: Population growth coefficient of population 1
b2: Population growth coefficient of population 2
hrf: Hill-Robertson factor, i.e. the degree to which Ne is locally reduced due to the effects of background selection and selective sweep effects
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
Ts: The scaled time between the split and the secondary contact (in units of 2*Na generations).
Tsc: The scale time between the secondary contact and present.
Q: The proportion of the genome with a reduced effective size due to selection at linked sites
n1,n2: Size of fs to generate.
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
#### Calculate the spectrum in normally-recombining regions
# phi for the equilibrium ancestral population
phinr = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
phinr = dadi.Integration.one_pop(phinr, xx, Tp, nu=nuA)
# Now do the divergence event
phinr = dadi.PhiManip.phi_1D_to_2D(xx, phinr)
# We set the population sizes after the to nu1 and nu2 and set the migration rates to zero
phinr = dadi.Integration.two_pops(phinr, xx, Ts, nu1, nu2, m12=0, m21=0)
# We set the population sizes changes independently between pops after the split and isolation to nu1 and nu2 and set the migration rates to zero
bnu1_func = lambda t: nu1 * b1**(t/Tsc)
bnu2_func = lambda t: nu2 * b2**(t/Tsc)
phinr = dadi.Integration.two_pops(phinr, xx, Tsc, bnu1_func, bnu2_func, m12=m12, m21=m21)
###
## calculate the spectrum.
fsnr = dadi.Spectrum.from_phi(phinr, (n1,n2), (xx,xx))
#### Spectrum of low-recombining regions
# phi for the equilibrium ancestral population
philr = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
philr = dadi.Integration.one_pop(philr, xx, Tp, nu=nuA)
# Now do the divergence event
philr = dadi.PhiManip.phi_1D_to_2D(xx, philr)
# We set the population sizes after the split to nu1 and nu2 and set the migration rates to zero
philr = dadi.Integration.two_pops(philr, xx, Ts, nu1*hrf, nu2*hrf, m12=0, m21=0)
# We set the population sizes changes independently between pops after the split and isolation to bnu1 and bnu2 (bnu{1,2}_func) & integrate the hrf and set the migration rates to m12 & m21
bnu1hrf_func = lambda t: (nu1 * b1**(t/Tsc)) * hrf
bnu2hrf_func = lambda t: (nu2 * b2**(t/Tsc)) * hrf
philr = dadi.Integration.two_pops(philr, xx, Tsc, bnu1hrf_func, bnu2hrf_func, m12=m12, m21=m21)
###
## calculate the spectrum.
fslr = dadi.Spectrum.from_phi(philr, (n1,n2), (xx,xx))
### Sum the spectra
fs = ((1-Q)*fsnr+Q*fslr)
return fs
def SCA2mG(params, (n1,n2), pts):
nuA, nu1, nu2, b1, b2, m12, m21, me12, me21, Tp, Ts, Tsc, P = params
"""
Model of semi permeability with split, complete isolation, followed by secondary contact with 2 migration rates and exponential growth
nu1: Size of pop 1 after split.
nu2: Size of pop 2 after split.
b1: Population growth coefficient of population 1
b2: Population growth coefficient of population 2
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
me12: Effective migration from pop 2 to pop 1 in genomic islands.
me21: Effective migration from pop 1 to pop 2 in genomic islands.
Ts: The scaled time between the split and the secondary contact (in units of 2*Na generations).
Tsc: The scale time between the secondary contact and present.
P: The porportion of the genome evolving neutrally.
n1,n2: Size of fs to generate.
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
### Calculate the neutral spectrum
# phi for the equilibrium ancestral population
phiN = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
phiN = dadi.Integration.one_pop(phiN, xx, Tp, nu=nuA)
# Now do the divergence event
phiN = dadi.PhiManip.phi_1D_to_2D(xx, phiN)
# We set the population sizes after the split to nu1 and nu2 and the migration rate to zero
phiN = dadi.Integration.two_pops(phiN, xx, Ts, nu1, nu2, m12=0, m21=0)
# We start the population size change after the split and set the migration rates to m12 and m21
bnu1_func = lambda t: nu1 * b1**(t/Ts)
bnu2_func = lambda t: nu2 * b2**(t/Ts)
phiN = dadi.Integration.two_pops(phiN, xx, Ts, bnu1_func, bnu2_func, m12=m12, m21=m21)
## calculate the spectrum.
fsN = dadi.Spectrum.from_phi(phiN, (n1,n2), (xx,xx))
### Calculate the genomic island spectrum
# phi for the equilibrium ancestral population
phiI = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
phiI = dadi.Integration.one_pop(phiI, xx, Tp, nu=nuA)
# Now do the divergence event
phiI = dadi.PhiManip.phi_1D_to_2D(xx, phiI)
# We set the population sizes after the split to nu1 and nu2 and the migration rate to zero
phiI = dadi.Integration.two_pops(phiI, xx, Ts, nu1, nu2, m12=0, m21=0)
# We start the population size change after the split and set the migration rates to | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Bertrand256
# Created on: 2017-12
from typing import List
from PyQt5 import QtGui
import bitcoin
import functools
import re
from PyQt5.QtCore import pyqtSlot, QAbstractTableModel, QVariant, Qt, QPoint
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import QDialog, QMenu, QApplication, QLineEdit, QShortcut, QMessageBox
from terracoin_utils import bip32_path_string_to_n, pubkey_to_address
from hw_common import HardwareWalletCancelException
from ui import ui_initialize_hw_dlg
from doc_dlg import show_doc_dlg
from hw_intf import *
from mnemonic import Mnemonic
ACTION_RECOVER_FROM_WORDS_CONV = 1
ACTION_RECOVER_FROM_WORDS_SAFE = 2
ACTION_RECOVER_FROM_ENTROPY = 3
ACTION_INITIALIZE_NEW_SAFE = 4
ACTION_WIPE_DEVICE = 5
STEP_SELECT_DEVICE_TYPE = 0
STEP_SELECT_DEVICE_INSTANCE = 1
STEP_SELECT_ACTION = 2
STEP_INPUT_NUMBER_OF_WORDS = 3
STEP_INPUT_ENTROPY = 4
STEP_INPUT_WORDS = 5
STEP_INPUT_HW_OPTIONS = 6
STEP_FINISHED = 7
class HwInitializeDlg(QDialog, ui_initialize_hw_dlg.Ui_HwInitializeDlg, WndUtils):
def __init__(self, parent) -> None:
QDialog.__init__(self, parent)
ui_initialize_hw_dlg.Ui_HwInitializeDlg.__init__(self)
WndUtils.__init__(self, parent.config)
self.main_ui = parent
self.current_step = STEP_SELECT_DEVICE_TYPE
self.action_type: Optional[int] = None # numeric value represting the action type from the first step
self.word_count: int = 24
self.mnemonic_words: List[str] = [""] * 24
self.entropy: str = '' # current entropy (entered by the user or converted from mnemonic words)
self.mnemonic = Mnemonic('english')
self.grid_model = MnemonicModel(self, self.mnemonic_words, self.mnemonic.wordlist)
self.address_preview_model = PreviewAddressesModel(self)
self.hw_options_details_visible = False
self.step_history: List[int] = []
self.hw_type: Optional[HWType] = None # HWType
self.hw_device_id_selected = Optional[str] # device id of the hw client selected
self.hw_device_instances: List[List[str]] = [] # list of 2-element list: hw desc, ref to trezor/keepey/ledger client object
self.act_paste_words = None
self.hw_action_mnemonic_words: Optional[str] = None
self.hw_action_use_pin: Optional[bool] = None
self.hw_action_pin: Optional[str] = None
self.hw_action_use_passphrase: Optional[bool] = None
self.hw_action_passphrase: Optional[str] = None # only for Ledger
self.hw_action_secondary_pin: Optional[str] = None # only for Ledger
self.hw_action_label: Optional[str] = None
self.setupUi()
def setupUi(self):
ui_initialize_hw_dlg.Ui_HwInitializeDlg.setupUi(self, self)
self.setWindowTitle("Hardware wallet initialization/recovery")
self.viewMnemonic.verticalHeader().setDefaultSectionSize(
self.viewMnemonic.verticalHeader().fontMetrics().height() + 6)
self.set_word_count(self.word_count)
self.rbDeviceTrezor.toggled.connect(self.on_device_type_changed)
self.rbDeviceKeepkey.toggled.connect(self.on_device_type_changed)
self.rbDeviceLedger.toggled.connect(self.on_device_type_changed)
self.rbActRecoverWordsSafe.toggled.connect(self.on_rbActionType_changed)
self.rbActRecoverMnemonicWords.toggled.connect(self.on_rbActionType_changed)
self.rbActRecoverHexEntropy.toggled.connect(self.on_rbActionType_changed)
self.rbActInitializeNewSeed.toggled.connect(self.on_rbActionType_changed)
self.rbActWipeDevice.toggled.connect(self.on_rbActionType_changed)
self.rbWordsCount24.toggled.connect(functools.partial(self.set_word_count, 24))
self.rbWordsCount18.toggled.connect(functools.partial(self.set_word_count, 18))
self.rbWordsCount12.toggled.connect(functools.partial(self.set_word_count, 12))
self.chbHwOptionsUsePIN.toggled.connect(self.update_current_tab)
self.chbHwOptionsUsePassphrase.toggled.connect(self.update_current_tab)
self.btnShowPIN.setText("\u29BF")
self.btnShowPassphrase.setText("\u29BF")
self.btnShowSecondaryPIN.setText("\u29BF")
self.btnShowPIN.pressed.connect(functools.partial(self.edtHwOptionsPIN.setEchoMode, QLineEdit.Normal))
self.btnShowPIN.released.connect(functools.partial(self.edtHwOptionsPIN.setEchoMode, QLineEdit.Password))
self.btnShowPassphrase.pressed.connect(
functools.partial(self.edtHwOptionsLedgerPassphrase.setEchoMode, QLineEdit.Normal))
self.btnShowPassphrase.released.connect(
functools.partial(self.edtHwOptionsLedgerPassphrase.setEchoMode, QLineEdit.Password))
self.btnShowSecondaryPIN.pressed.connect(
functools.partial(self.edtHwOptionsLedgerSecondaryPIN.setEchoMode, QLineEdit.Normal))
self.btnShowSecondaryPIN.released.connect(
functools.partial(self.edtHwOptionsLedgerSecondaryPIN.setEchoMode, QLineEdit.Password))
self.tabSteps.setCurrentIndex(0)
self.btnBack.setEnabled(False)
self.viewAddresses.setModel(self.address_preview_model)
self.viewAddresses.setColumnWidth(0, 150)
self.viewAddresses.verticalHeader().setDefaultSectionSize(
self.viewAddresses.verticalHeader().fontMetrics().height() + 6)
# words grid context menu
self.popMenuWords = QMenu(self)
# copy action
self.actCopyWords = self.popMenuWords.addAction("\u274f Copy all words")
self.actCopyWords.triggered.connect(self.on_actCopyWords_triggered)
self.actCopyWords.setShortcut(QKeySequence("Ctrl+C")) # not working on Mac (used here to display
# shortcut in menu item
QShortcut(QKeySequence("Ctrl+C"), self.viewMnemonic).activated.connect(self.on_actCopyWords_triggered)
# paste action
self.act_paste_words = self.popMenuWords.addAction("\u23ce Paste")
self.act_paste_words.triggered.connect(self.on_actPasteWords_triggered)
self.act_paste_words.setShortcut(QKeySequence("Ctrl+V"))
QShortcut(QKeySequence("Ctrl+V"), self.viewMnemonic).activated.connect(self.on_actPasteWords_triggered)
self.fraDetails.setVisible(False)
self.resize(self.size().width(), 350)
self.apply_current_step_to_ui()
self.update_current_tab()
def read_action_type_from_ui(self):
if self.rbActRecoverWordsSafe.isChecked():
self.action_type = ACTION_RECOVER_FROM_WORDS_SAFE # recover safe (onlline)
elif self.rbActRecoverMnemonicWords.isChecked():
self.action_type = ACTION_RECOVER_FROM_WORDS_CONV # recover convenient (safe only when offline)
elif self.rbActRecoverHexEntropy.isChecked():
self.action_type = ACTION_RECOVER_FROM_ENTROPY
elif self.rbActInitializeNewSeed.isChecked():
self.action_type = ACTION_INITIALIZE_NEW_SAFE
elif self.rbActWipeDevice.isChecked():
self.action_type = ACTION_WIPE_DEVICE
else:
raise Exception('Invalid action')
def apply_current_step_to_ui(self):
if self.current_step == STEP_SELECT_DEVICE_TYPE:
idx = 0
elif self.current_step == STEP_SELECT_DEVICE_INSTANCE:
idx = 1
elif self.current_step == STEP_SELECT_ACTION:
idx = 2
elif self.current_step == STEP_INPUT_NUMBER_OF_WORDS:
idx = 3
elif self.current_step == STEP_INPUT_ENTROPY:
idx = 4
elif self.current_step == STEP_INPUT_WORDS:
idx = 5
elif self.current_step == STEP_INPUT_HW_OPTIONS:
idx = 6
elif self.current_step == STEP_FINISHED:
idx = 7
else:
raise Exception('Invalid step.')
self.tabSteps.setCurrentIndex(idx)
def set_next_step(self, step):
if step != self.current_step:
self.step_history.append(self.current_step)
self.current_step = step
self.apply_current_step_to_ui()
if self.current_step == STEP_FINISHED:
self.btnNext.setText('Close')
def apply_step_select_device_type(self) -> bool:
"""Moves forward from the 'device type selection' step."""
success = True
if not self.hw_type:
self.errorMsg('Select your hardware wallet type.')
success = False
else:
self.set_next_step(STEP_SELECT_ACTION)
return success
def apply_step_select_action(self) -> bool:
"""Moves forward from the 'select action' step."""
success = True
self.read_action_type_from_ui()
if self.action_type in (ACTION_RECOVER_FROM_WORDS_CONV, ACTION_RECOVER_FROM_WORDS_SAFE,
ACTION_INITIALIZE_NEW_SAFE):
self.set_next_step(STEP_INPUT_NUMBER_OF_WORDS)
elif self.action_type == ACTION_RECOVER_FROM_ENTROPY:
self.set_next_step(STEP_INPUT_ENTROPY)
elif self.action_type == ACTION_WIPE_DEVICE:
if self.hw_type in (HWType.trezor, HWType.keepkey):
if self.queryDlg('Do you really want to wipe your %s device?' % self.hw_type,
buttons=QMessageBox.Yes | QMessageBox.Cancel,
default_button=QMessageBox.Cancel, icon=QMessageBox.Warning) == QMessageBox.Yes:
try:
self.load_hw_devices()
cnt = len(self.hw_device_instances)
if cnt == 0:
self.errorMsg('Couldn\'t find any %s devices connected to your computer.' %
HWType.get_desc(self.hw_type))
success = False
elif cnt == 1:
# there is only one instance of this device type
self.hw_device_id_selected = self.hw_device_instances[0][1]
success = self.apply_action_on_hardware_wallet()
if success:
self.set_next_step(STEP_FINISHED)
else:
# there is more than one instance of this device type; go to the device instance selection tab
self.set_next_step(STEP_SELECT_DEVICE_INSTANCE)
success = True
except HardwareWalletCancelException:
self.warnMsg('Operation cancelled.')
success = False
else:
success = False
else:
raise Exception('Not implemented')
return success
def apply_step_select_number_of_words(self) -> bool:
"""Moves forward from the 'select number of words' step."""
if self.action_type == ACTION_RECOVER_FROM_WORDS_CONV:
self.set_next_step(STEP_INPUT_WORDS)
elif self.action_type == ACTION_RECOVER_FROM_WORDS_SAFE:
self.set_next_step(STEP_INPUT_HW_OPTIONS)
elif self.action_type == ACTION_INITIALIZE_NEW_SAFE:
self.set_next_step(STEP_INPUT_HW_OPTIONS)
else:
raise Exception('Invalid action type.')
return True
def apply_step_input_entropy(self) -> bool:
"""Moves forward from the 'input entropy' step."""
success = True
ent_str = self.edtHexEntropy.text()
try:
entropy = bytes.fromhex(ent_str)
if len(entropy) not in (32, 24, 16):
self.warnMsg('The entropy hex-string can only have 16, 24 or 32 bytes.')
success = False
else:
self.entropy = entropy
words = self.entropy_to_mnemonic(entropy)
self.set_words(words)
self.set_word_count(len(words))
self.set_next_step(STEP_INPUT_WORDS)
except Exception as e:
self.warnMsg(str(e))
success = False
return success
def apply_step_input_words(self) -> bool:
"""Moves forward from the 'input words' step."""
success = True
if self.action_type == ACTION_RECOVER_FROM_WORDS_CONV:
# verify all the seed words entered by the user
wl = self.mnemonic.wordlist
invalid_indexes = []
suppress_error_message = False
for idx, word in enumerate(self.get_cur_mnemonic_words()):
if not word:
self.errorMsg('Cannot continue - not all words are entered.')
success = False
suppress_error_message = True
break
if word not in wl:
success = False
invalid_indexes.append(idx)
if not success:
# verify the whole word-set entered by the user (checksum)
if not suppress_error_message:
self.errorMsg('Cannot continue - invalid word(s): %s.' %
','.join(['#' + str(x + 1) for x in invalid_indexes]))
else:
try:
ws = self.get_cur_mnemonic_words()
self.entropy = self.mnemonic.to_entropy(ws)
except Exception as e:
success = False
if str(e) == 'Failed checksum.':
self.errorMsg('Invalid checksum of the provided words. You\'ve probably mistyped some'
' words or changed their order.')
else:
self.errorMsg('There was an error in the provided word-list. Error details: ' + str(e))
elif self.action_type == ACTION_RECOVER_FROM_ENTROPY:
pass
else:
raise Exception('Invalid action type.')
if success:
self.set_next_step(STEP_INPUT_HW_OPTIONS)
return success
def apply_step_input_hw_options(self) -> bool:
"""Moves forward from the 'input hardware wallet options' step."""
self.hw_action_use_pin = self.chbHwOptionsUsePIN.isChecked()
self.hw_action_use_passphrase = self.chbHwOptionsUsePassphrase.isChecked()
self.hw_action_label = self.edtHwOptionsDeviceLabel.text()
success = True
if not self.hw_action_label:
self.hw_action_label = 'My %s' % HWType.get_desc(self.hw_type)
if self.action_type in (ACTION_RECOVER_FROM_WORDS_CONV, ACTION_RECOVER_FROM_ENTROPY):
self.hw_action_mnemonic_words = ' '.join(self.get_cur_mnemonic_words())
self.hw_action_pin = ''
self.hw_action_secondary_pin = ''
self.hw_action_passphrase = ''
if self.hw_action_use_pin:
self.hw_action_pin = self.edtHwOptionsPIN.text()
if len(self.hw_action_pin) not in (4, 8):
self.errorMsg('Invalid PIN length. It can only have 4 or 8 characters.')
success = False
else:
if self.hw_type == HWType.ledger_nano_s:
if not re.match("^[0-9]+$", self.hw_action_pin):
self.errorMsg('Invalid PIN. Allowed characters: 0-9.')
success = False
else:
if not re.match("^[1-9]+$", self.hw_action_pin):
self.errorMsg('Invalid PIN. Allowed characters: 1-9.')
success = False
if not success:
self.edtHwOptionsPIN.setFocus()
else:
if self.hw_type == HWType.ledger_nano_s:
if self.hw_action_use_passphrase:
self.hw_action_passphrase = self.edtHwOptionsLedgerPassphrase.text()
if not self.hw_action_passphrase:
self.errorMsg('For Ledger Nano S you need to provide your passphrase - it will be '
'stored in the device and secured by secondary PIN.')
self.edtHwOptionsLedgerPassphrase.setFocus()
success = False
else:
# validate secondary PIN
self.hw_action_secondary_pin = self.edtHwOptionsLedgerSecondaryPIN.text()
if not self.hw_action_secondary_pin:
self.errorMsg('Secondary PIN is required if you want to save passphrase '
'in your Ledger Nano S.')
self.edtHwOptionsLedgerSecondaryPIN.setFocus()
success = False
else:
if len(self.hw_action_secondary_pin) not in (4, 8):
self.errorMsg('Invalid secondary PIN length. '
'It can only have 4 or 8 characters.')
success = False
elif not re.match("^[0-9]+$", self.hw_action_secondary_pin):
self.errorMsg('Invalid secondary PIN. Allowed characters: 0-9.')
success = False
if not success:
self.edtHwOptionsLedgerSecondaryPIN.setFocus()
elif self.action_type in (ACTION_RECOVER_FROM_WORDS_SAFE, ACTION_INITIALIZE_NEW_SAFE):
pass
else:
raise Exception('Invalid action.')
if success:
# try to load devices
self.load_hw_devices()
cnt = len(self.hw_device_instances)
if cnt == 0:
self.errorMsg('Couldn\'t find any %s devices connected to your computer.' %
HWType.get_desc(self.hw_type))
elif cnt == 1:
# there is only one instance of this device type
self.hw_device_id_selected = self.hw_device_instances[0][1]
success = self.apply_action_on_hardware_wallet()
if success:
self.set_next_step(STEP_FINISHED)
else:
# there is more than one instance of this device type; go to the device instance selection tab
self.set_next_step(STEP_SELECT_DEVICE_INSTANCE)
success = True
return success
def apply_step_select_device_id(self) -> bool:
"""Moves forward from the 'select device instance' step."""
idx = self.cboDeviceInstance.currentIndex()
if idx >= 0 and idx < len(self.hw_device_instances):
self.hw_device_id_selected = self.hw_device_instances[idx][1]
success = self.apply_action_on_hardware_wallet()
if success:
self.set_next_step(STEP_FINISHED)
else:
self.errorMsg('No %s device instances.' % HWType.get_desc(self.hw_type))
success = False
return success
def apply_action_on_hardware_wallet(self) -> bool:
"""Executes command on hardware wallet device related to the selected actions."""
if self.action_type in (ACTION_RECOVER_FROM_WORDS_CONV, ACTION_RECOVER_FROM_ENTROPY):
device_id, cancelled = load_device_by_mnemonic(
self.hw_type, self.hw_device_id_selected, self.hw_action_mnemonic_words, self.hw_action_pin,
self.hw_action_use_passphrase, self.hw_action_label,
self.hw_action_passphrase, self.hw_action_secondary_pin, | |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Copyright (c) 2020 <NAME>. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE.chromium file.
import sys
import struct
from enum import Enum, IntEnum
from typing import TypeVar, Generic, NewType, Callable, Iterable, Any, Tuple
import const
const.kPayloadUnit = 64
const.kCapacityReadOnly = sys.maxsize
const.kuint32max = sys.maxsize
#------------------------------------------------------------------------------
# https://docs.python.org/dev/library/stdtypes.html#memoryview
class SizeOf(IntEnum):
BOOL = 1
INT = 4
LONG = 4
UINT8 = 1
UINT16 = 2
UINT32 = 4
INT32 = 4
INT64 = 8
UINT64 = 8
FLOAT = 4
DOUBLE = 8
HEADER = 4
FILEHEADER = 8
SIZE_TYPE = 2
ID_TYPE = 1
# Identifies the type of session service this is. This is used by the
# backend to determine the name of the files.
class SessionType(IntEnum):
SESSION_RESTORE = 0
TAB_RESTORE = 1
# https://chromium.googlesource.com/external/WebKit/Source/Platform/chromium/public/+/ad66491450101178db06dc094cb1836fb3d80825/WebReferrerPolicy.h
class WebKitWebReferrerPolicy(IntEnum):
WebReferrerPolicyAlways = 0
WebReferrerPolicyDefault = 1
WebReferrerPolicyNever = 2
WebReferrerPolicyOrigin = 3
# Types of transitions between pages. These are stored in the history
# database to separate visits, and are reported by the renderer for page
# navigations.
#
# WARNING: don't change these numbers. They are written directly into the
# history database, so future versions will need the same values to match
# the enums.
#
# A type is made of a core value and a set of qualifiers. A type has one
# core value and 0 or or more qualifiers.
class PageTransition(IntEnum):
# User got to this page by clicking a link on another page.
PAGE_TRANSITION_LINK = 0
# User got this page by typing the URL in the URL bar. This should not be
# used for cases where the user selected a choice that didn't look at all
# like a URL; see GENERATED below.
#
# We also use this for other "explicit" navigation actions.
PAGE_TRANSITION_TYPED = 1
# User got to this page through a suggestion in the UI, for example,
# through the destinations page.
PAGE_TRANSITION_AUTO_BOOKMARK = 2
# This is a subframe navigation. This is any content that is automatically
# loaded in a non-toplevel frame. For example, if a page consists of
# several frames containing ads, those ad URLs will have this transition
# type. The user may not even realize the content in these pages is a
# separate frame, so may not care about the URL (see MANUAL below).
PAGE_TRANSITION_AUTO_SUBFRAME = 3
# For subframe navigations that are explicitly requested by the user and
# generate new navigation entries in the back/forward list. These are
# probably more important than frames that were automatically loaded in
# the background because the user probably cares about the fact that this
# link was loaded.
PAGE_TRANSITION_MANUAL_SUBFRAME = 4
# User got to this page by typing in the URL bar and selecting an entry
# that did not look like a URL. For example, a match might have the URL
# of a Google search result page, but appear like "Search Google for ...".
# These are not quite the same as TYPED navigations because the user
# didn't type or see the destination URL.
# See also KEYWORD.
PAGE_TRANSITION_GENERATED = 5
# This is a toplevel navigation. This is any content that is automatically
# loaded in a toplevel frame. For example, opening a tab to show the ASH
# screen saver, opening the devtools window, opening the NTP after the safe
# browsing warning, opening web-based dialog boxes are examples of
# AUTO_TOPLEVEL navigations.
PAGE_TRANSITION_AUTO_TOPLEVEL = 6
# The user filled out values in a form and submitted it. NOTE that in
# some situations submitting a form does not result in this transition
# type. This can happen if the form uses script to submit the contents.
PAGE_TRANSITION_FORM_SUBMIT = 7
# The user "reloaded" the page, either by hitting the reload button or by
# hitting enter in the address bar. NOTE: This is distinct from the
# concept of whether a particular load uses "reload semantics" (i.e.
# bypasses cached data). For this reason, lots of code needs to pass
# around the concept of whether a load should be treated as a "reload"
# separately from their tracking of this transition type, which is mainly
# used for proper scoring for consumers who care about how frequently a
# user typed/visited a particular URL.
#
# SessionRestore and undo tab close use this transition type too.
PAGE_TRANSITION_RELOAD = 8
# The url was generated from a replaceable keyword other than the default
# search provider. If the user types a keyword (which also applies to
# tab-to-search) in the omnibox this qualifier is applied to the transition
# type of the generated url. TemplateURLModel then may generate an
# additional visit with a transition type of KEYWORD_GENERATED against the
# url 'http://' + keyword. For example, if you do a tab-to-search against
# wikipedia the generated url has a transition qualifer of KEYWORD, and
# TemplateURLModel generates a visit for 'wikipedia.org' with a transition
# type of KEYWORD_GENERATED.
PAGE_TRANSITION_KEYWORD = 9
# Corresponds to a visit generated for a keyword. See description of
# KEYWORD for more details.
PAGE_TRANSITION_KEYWORD_GENERATED = 10
# ADDING NEW CORE VALUE? Be sure to update the LAST_CORE and CORE_MASK
# values below. Also update CoreTransitionString().
PAGE_TRANSITION_LAST_CORE = PAGE_TRANSITION_KEYWORD_GENERATED
PAGE_TRANSITION_CORE_MASK = 0xFF
# Qualifiers
# Any of the core values above can be augmented by one or more qualifiers.
# These qualifiers further define the transition.
# User used the Forward or Back button to navigate among browsing history.
PAGE_TRANSITION_FORWARD_BACK = 0x01000000
# User used the address bar to trigger this navigation.
PAGE_TRANSITION_FROM_ADDRESS_BAR = 0x02000000
# User is navigating to the home page.
PAGE_TRANSITION_HOME_PAGE = 0x04000000
# The beginning of a navigation chain.
PAGE_TRANSITION_CHAIN_START = 0x10000000
# The last transition in a redirect chain.
PAGE_TRANSITION_CHAIN_END = 0x20000000
# Redirects caused by JavaScript or a meta refresh tag on the page.
PAGE_TRANSITION_CLIENT_REDIRECT = 0x40000000
# Redirects sent from the server by HTTP headers. It might be nice to
# break this out into 2 types in the future, permanent or temporary, if we
# can get that information from WebKit.
PAGE_TRANSITION_SERVER_REDIRECT = 0x80000000
# Used to test whether a transition involves a redirect.
PAGE_TRANSITION_IS_REDIRECT_MASK = 0xC0000000
# General mask defining the bits used for the qualifiers.
PAGE_TRANSITION_QUALIFIER_MASK = 0xFFFFFF00
int16 = NewType('int16', int)
uint16 = NewType('uint16', int)
int32 = NewType('int32', int)
uint32 = NewType('uint32', int)
int64 = NewType('int64', int)
uint64 = NewType('uint64', int)
# These get written to disk, so we define types for them.
# Type for the identifier.
id_type = NewType('id_type', int)
# Type for writing the size.
size_type = NewType('size_type', int)
# File version number.
const.kFileCurrentVersion = 1
# The signature at the beginning of the file = SSNS (Sessions).
const.kFileSignature = 0x53534E53
const.kFileReadBufferSize = 1024
# chromium/chrome/browser/sessions/session_service.cc
# # Identifier for commands written to file.
# const.kCommandSetTabWindow = 0
# # OBSOLETE Superseded by kCommandSetWindowBounds3.
# # const.kCommandSetWindowBounds = 1
# const.kCommandSetTabIndexInWindow = 2
# # Original kCommandTabClosed/kCommandWindowClosed. See comment in
# # MigrateClosedPayload for details on why they were replaced.
# const.kCommandTabClosedObsolete = 3
# const.kCommandWindowClosedObsolete = 4
# const.kCommandTabNavigationPathPrunedFromBack = 5
# const.kCommandUpdateTabNavigation = 6
# const.kCommandSetSelectedNavigationIndex = 7
# const.kCommandSetSelectedTabInIndex = 8
# const.kCommandSetWindowType = 9
# # OBSOLETE Superseded by kCommandSetWindowBounds3. Except for data migration.
# # const.kCommandSetWindowBounds2 = 10;
# const.kCommandTabNavigationPathPrunedFromFront = 11
# const.kCommandSetPinnedState = 12
# const.kCommandSetExtensionAppID = 13
# const.kCommandSetWindowBounds3 = 14
# const.kCommandSetWindowAppName = 15
# const.kCommandTabClosed = 16
# const.kCommandWindowClosed = 17
# const.kCommandSetTabUserAgentOverride = 18
# const.kCommandSessionStorageAssociated = 19
# Tab Navigation
const.TabNavigation_kMaxEntries = 25
# chromium/chrome/browser/sessions/tab_restore_service.cc
# Identifier for commands written to file.
# The ordering in the file is as follows:
# . When the user closes a tab a command of type
# kCommandSelectedNavigationInTab is written identifying the tab and
# the selected index, then a kCommandPinnedState command if the tab was
# pinned and kCommandSetExtensionAppID if the tab has an app id and
# the user agent override if it was using one. This is
# followed by any number of kCommandUpdateTabNavigation commands (1 | |
and ceiling lines
# The reason it is done this way is that it allows me to track physical
# location of each pixel (which is needed for zBuffer
# and correct overlapping of objects
floorLine = getLinePixels((x1, y1), (x2, y2))
ceilingLine = getLinePixels((x4, y4), (x3, y3))
newW = max(abs(x2 - x1) + 1, len(floorLine))
newH = abs(y4 - y1) + 1
for i in range(min(len(floorLine), len(ceilingLine))):
newH = max(newH, len(getLinePixels(ceilingLine[i], floorLine[i])))
# Wall Image is resized to the number of pixel in those lines
imres = fgim.resize((newW, newH), Image.LANCZOS)
# Checks if the wall faces away (from isomeric view)
# set isTransparent if it is
isTransparent = False
if x2 <= x1 and y2 >= y1 \
or hCoefY != 0 and x2 < x1 and (x1 - x2) / (y1 - y2) > hCoefX / hCoefY \
or hCoefY != 0 and y2 > y1 and (x2 - x1) / (y2 - y1) < hCoefX / hCoefY:
isTransparent = True
# for midtextures - only show one side
# back if viewd from the back
if wall.position == "mid" and wall.isBack:
return
else:
# and front - if viewed from the front
if wall.position == "mid" and not wall.isBack:
return
# for walls made from back SideDefs, it is the other way round
if wall.isBack:
isTransparent = not isTransparent
# For mid-textures: they are never vioewd from the back
if wall.position == "mid":
isTransparent = False
# Here the actual copying of pixel begins
px = imres.load()
for i in range(min(len(floorLine), len(ceilingLine))):
line = getLinePixels(ceilingLine[i], floorLine[i])
for j in range(len(line)):
# Check if we are within the image
# Now obsolete, as now we have margins calculated
# including all possibel heights and lows
if line[j][0] < 0 or line[j][1] < 0\
or line[j][0] >= zBuffer.shape[0]\
or line[j][1] >= zBuffer.shape[1]:
continue
# get the value from the zBuffer
# actually only y matters in this implementation
lastZ = zBuffer[line[j][0], line[j][1]]
height = int((j / newH) * fgim.size[1] + wall.floor)
x = int((i / len(floorLine)) * (wall.ex - wall.sx) +
wall.sx + offsetX)
y = int((i / len(floorLine)) * (wall.ey - wall.sy) +
wall.sy + offsetY)
# if y is bigger (closer to viewer): draw the pixel
if lastZ is None or y > lastZ:
# use the trasnparency from an image
opacity = px[i, j][3]
# if it is transparent - skip it
if opacity == 0:
continue
# or 80 for facing away walls
if isTransparent:
opacity = 80
mixedpix = []
for k in range(3):
mixedpix.append((bgpx[line[j]][k] * (255 - opacity) +
px[i, j][k] * opacity) // 255)
bgpx[line[j]] = tuple(mixedpix)
# Keep tracking latest value in zBuffer
if opacity >= 80:
zBuffer[line[j][0], line[j][1]] = y
# I guess this is redundunt? I did it in attempt to save memory
fgim.close()
imres.close()
def makeTransparentSprite(sprite, px2, x, y, colorConversion):
'''Make a "transparent" sprite
Used for Spectres
Reads current image where sprite's pixels are, distorts them and returns
'''
# fuzz table, used to distort the background
# (taken from actual Doom source code)
fuzz = [1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, -1,
-1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1,
-1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1]
# canvas to build the sprite, the size of the pinkie
spectre = Image.new("RGBA", (sprite.size[0], sprite.size[1]), (0, 0, 0, 0))
sp = spectre.load()
# pinkie sprite
mask = sprite.load()
# counter to iterate over the fuzz table
fuzzN = 0
# go overthe canvas
for i in range(spectre.size[0]):
for j in range(spectre.size[1]):
# if this pixel exists on the mask
if mask[i, j][3] == 255:
picX = x - sprite.size[0] // 2 + i
picY = y - sprite.size[1] + j + fuzz[fuzzN]
# copy it from the background
sp[i, j] = px2[picX, picY]
fuzzN += 1
if fuzzN == len(fuzz):
fuzzN = 0
# original logic was to apply ColorMap N6, but it didn't look too visible
# in darker places (duh...), so I just apply gamma conversion the image
gammaCorrection(spectre, 1.3)
return spectre
def pasteThing(px2, x, y, atHeight, light, thing, sprites, zBuffer,
offsetX, offsetY, colorConversion):
''' Draw a thing on the final image
'''
if thing.sprite not in sprites:
return
sprite = sprites[thing.sprite].copy()
# Mirror if needed
if thing.mirrored:
sprite = sprite.transpose(Image.FLIP_LEFT_RIGHT)
# This is a Spectre:
# make a special sprite from distorted background
if thing.type == 58:
sprite = makeTransparentSprite(sprite, px2, x, y, colorConversion)
else:
# not Spectre
lightLevel = 31 - light//8
sprite = lightImage(sprite, lightLevel, colorConversion)
spx = sprite.load()
# Draw pixels
# Go throught the sprite image
for i in range(sprite.size[0]):
for j in range(sprite.size[1]):
# if it is not a transparent pixel
if spx[i, j][3] != 0:
# calculate position on the image
picX = x - sprite.size[0] // 2 + i
picY = y - sprite.size[1] + j
# Check if the sprite is still within the picture
if picX < 0 or picX >= zBuffer.shape[0] or \
picY < 0 or picY >= zBuffer.shape[1]:
continue
# get zBuffer data
lastZ = zBuffer[picX, picY]
# calculate physical coordinates (we only use physY, actually)
height = atHeight + j
physX = thing.x + offsetX
physY = thing.y + offsetY
# if it closer than the one in zBuffer - draw
if lastZ is None or physY > lastZ:
px2[picX, picY] = spx[i, j]
zBuffer[picX, picY] = physY
sprite.close()
def drawMap(vertexes, linedefs, sidedefs, sectors, flats, walls,
textures, thingsList, sprites, colorConversion, options):
''' Main drawing function
receives all prepared data, returns the image
'''
def floodFill(sector, startPix, erase=False):
''' Do the floodfill in the blueprint image, starting from startPix pixel
also with each drawn pixel add data to sectorData array
(to know which coordinate is part of which sector)
returns False if there is a problem (sector overspils over the boundary)
currColor - color to replace, fillColor - color to fill with.
'''
nonlocal im
nonlocal draw
nonlocal px
nonlocal sectorData
# erase flag is used to roll back overspilt flood fill:
# reverse the colors, reset sectorData back to -1
if erase:
currColor, fillColor = (0, 255, 255), (0, 0, 0)
else:
currColor, fillColor = (0, 0, 0), (0, 255, 255)
toGo = []
# if starting point is cyan (already filled) or white (border),
# don't do anything (it will bypass while and exit)
if px[startPix] == currColor:
toGo.append(startPix)
# Naive Flood Fill algorithm
# Add eligebale neighbouors to the ToGo list,
# keep doing while list is not empty
while len(toGo) > 0:
thisPix = toGo.pop()
px[thisPix] = fillColor
if erase:
sectorData[thisPix[0], thisPix[1]] = -1
else:
sectorData[thisPix[0], thisPix[1]] = sector
for dx, dy in [(-1, 0), (0, -1), (1, 0), (0, 1)]:
nextPix = (thisPix[0] + dx, thisPix[1] + dy)
# If we reached the border, something if wrong, return False
if nextPix[0] < 0 or nextPix[0] == im.size[0] \
or nextPix[1] < 0 or nextPix[1] == im.size[1]:
return False
if px[nextPix] == currColor \
and nextPix[0] >= 0 and nextPix[1] >= 0 \
and nextPix[0] < im.size[0] \
and nextPix[1] < im.size[1]:
toGo.append(nextPix)
return True
def fillSeams(sectorData):
''' Expand SecorData by 1 pix (to eliminate seams between sectors)
'''
nonlocal im
nonlocal px
# Go thorugh pixels on the blueprint, if it is white (border),
# Look at surrounding pixels.
# Replace this pixel with the first valid neighbour sector.
for i in range(im.size[0]):
for j in range(im.size[1]):
if px[i, j] == (255, 255, 255):
maxNeighbour = -1
for di, dj in [(1, 0), (0, 1), (-1, 0), (0, -1)]:
if sectorData[i + di, j + dj] is not None and \
px[i + di, j | |
<reponame>PennyLaneAI/pennylane-pq<gh_stars>1-10
# Copyright 2018 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=expression-not-assigned
r"""
Devices
=======
.. currentmodule:: pennylane_pq.devices
This plugin offers access to the following ProjectQ backends by providing
corresponding PennyLane devices:
.. autosummary::
:nosignatures:
ProjectQSimulator
ProjectQIBMBackend
ProjectQClassicalSimulator
See below for a description of the devices and the supported Operations and Observables.
ProjectQSimulator
#################
.. autoclass:: ProjectQSimulator
ProjectQIBMBackend
##################
.. autoclass:: ProjectQIBMBackend
ProjectQClassicalSimulator
##########################
.. autoclass:: ProjectQClassicalSimulator
"""
import numpy as np
import projectq as pq
from projectq.setups.ibm import get_engine_list
from projectq.ops import (
HGate,
XGate,
YGate,
ZGate,
SGate,
TGate,
SqrtXGate,
SwapGate,
Rx,
Ry,
Rz,
R,
SqrtSwapGate,
)
from pennylane import Device, DeviceError
from .pqops import CNOT, CZ, Rot, QubitUnitary, BasisState
from ._version import __version__
PROJECTQ_OPERATION_MAP = {
# native PennyLane operations also native to ProjectQ
"PauliX": XGate,
"PauliY": YGate,
"PauliZ": ZGate,
"CNOT": CNOT,
"CZ": CZ,
"SWAP": SwapGate,
"RX": Rx,
"RY": Ry,
"RZ": Rz,
"PhaseShift": R,
"Hadamard": HGate,
# operations not natively implemented in ProjectQ but provided in pqops.py
"Rot": Rot,
"QubitUnitary": QubitUnitary,
"BasisState": BasisState,
"S": SGate,
"T": TGate,
# additional operations not native to PennyLane but present in ProjectQ
"SqrtX": SqrtXGate,
"SqrtSwap": SqrtSwapGate,
# operations/expectations of ProjectQ that do not work with PennyLane
#'AllPauliZ': AllZGate, #todo: enable when multiple return values are supported
# operations/expectations of PennyLane that do not work with ProjectQ
#'QubitStateVector': StatePreparation,
# In addition we support the Identity Expectation, but only as an expectation and not as an Operation, which is we we don't put it here.
}
class _ProjectQDevice(Device): # pylint: disable=abstract-method
"""ProjectQ device for PennyLane.
Args:
wires (int or Iterable[Number, str]]): Number of subsystems represented by the device,
or iterable that contains unique labels for the subsystems as numbers (i.e., ``[-1, 0, 2]``)
or strings (``['ancilla', 'q1', 'q2']``). Default 1 if not specified.
shots (None, int): How many times the circuit should be evaluated (or sampled) to estimate
the expectation values. Defaults to ``None`` if not specified, which means that the device
returns analytical results.
Keyword Args:
backend (string): Name of the backend, i.e., either "Simulator",
"ClassicalSimulator", or "IBMBackend".
verbose (bool): If True, log messages are printed and exceptions are more verbose.
Keyword Args for Simulator backend:
gate_fusion (bool): If True, operations are cached and only executed once a
certain number of operations has been reached (only has an effect for the c++ simulator).
rnd_seed (int): Random seed (uses random.randint(0, 4294967295) by default).
Keyword Args for IBMBackend backend:
use_hardware (bool): If True, the code is run on the IBM quantum chip (instead of using
the IBM simulator)
num_runs (int): Number of runs to collect statistics (default is 1024). Is equivalent
to but takes preference over the shots parameter.
user (string): IBM Quantum Experience user name
password (string): IBM Quantum Experience password
device (string): Device to use (e.g., ‘ibmqx4’ or ‘ibmqx5’) if use_hardware is set to
True. Default is ibmqx4.
retrieve_execution (int): Job ID to retrieve instead of re-running the circuit
(e.g., if previous run timed out).
"""
name = "ProjectQ PennyLane plugin"
short_name = "projectq"
pennylane_requires = ">=0.15.0"
version = "0.4.2"
plugin_version = __version__
author = "<NAME> and Xanadu"
_capabilities = {
"backend": list(["Simulator", "ClassicalSimulator", "IBMBackend"]),
"model": "qubit",
}
@property
def _operation_map(self):
raise NotImplementedError
@property
def _observable_map(self):
raise NotImplementedError
@property
def _backend_kwargs(self):
raise NotImplementedError
def __init__(self, wires=1, shots=None, *, backend, **kwargs):
# overwrite shots with num_runs if given
if "num_runs" in kwargs:
shots = kwargs["num_runs"]
del kwargs["num_runs"]
super().__init__(wires=wires, shots=shots)
if "verbose" not in kwargs:
kwargs["verbose"] = False
self._backend = backend
self._kwargs = kwargs
self._eng = None
self._reg = None
self._first_operation = True
self.reset() # the actual initialization is done in reset()
def reset(self):
"""Reset/initialize the device by allocating qubits."""
self._reg = self._eng.allocate_qureg(self.num_wires)
self._first_operation = True
def __repr__(self):
return super().__repr__() + "Backend: " + self._backend + "\n"
def __str__(self):
return super().__str__() + "Backend: " + self._backend + "\n"
def post_measure(self):
"""Deallocate the qubits after expectation values have been retrieved."""
self._deallocate()
def apply(self, operation, wires, par):
"""Apply a quantum operation.
For plugin developers: this function should apply the operation on the device.
Args:
operation (str): name of the operation
wires (Sequence[int]): subsystems the operation is applied on
par (tuple): parameters for the operation
"""
operation = self._operation_map[operation](*par)
if isinstance(operation, BasisState) and not self._first_operation:
raise DeviceError(
"Operation {} cannot be used after other Operations have already "
"been applied on a {} device.".format(operation, self.short_name)
)
self._first_operation = False
# translate wires to reflect labels on the device
device_wires = self.map_wires(wires)
qureg = [self._reg[i] for i in device_wires.labels]
if isinstance(
operation,
(
pq.ops._metagates.ControlledGate, # pylint: disable=protected-access
pq.ops._gates.SqrtSwapGate, # pylint: disable=protected-access
pq.ops._gates.SwapGate, # pylint: disable=protected-access
),
): # pylint: disable=protected-access
qureg = tuple(qureg)
operation | qureg # pylint: disable=pointless-statement
def _deallocate(self):
"""Deallocate all qubits to make ProjectQ happy
See also: https://github.com/ProjectQ-Framework/ProjectQ/issues/2
Drawback: This is probably rather resource intensive.
"""
if self._eng is not None and self._backend == "Simulator":
# avoid an "unfriendly error message":
# https://github.com/ProjectQ-Framework/ProjectQ/issues/2
pq.ops.All(pq.ops.Measure) | self._reg # pylint: disable=expression-not-assigned
def filter_kwargs_for_backend(self, kwargs):
"""Filter the given kwargs for those relevant for the respective device/backend."""
return {key: value for key, value in kwargs.items() if key in self._backend_kwargs}
@property
def operations(self):
"""Get the supported set of operations.
Returns:
set[str]: the set of PennyLane operation names the device supports
"""
return set(self._operation_map.keys())
@property
def observables(self):
"""Get the supported set of observables.
Returns:
set[str]: the set of PennyLane observable names the device supports
"""
return set(self._observable_map.keys())
class ProjectQSimulator(_ProjectQDevice):
"""A PennyLane :code:`projectq.simulator` device for the `ProjectQ Simulator
<https://projectq.readthedocs.io/en/latest/projectq.backends.html#projectq.backends.Simulator>`_
backend.
Args:
wires (int or Iterable[Number, str]]): Number of subsystems represented by the device,
or iterable that contains unique labels for the subsystems as numbers (i.e., ``[-1, 0, 2]``)
or strings (``['ancilla', 'q1', 'q2']``).
shots (None, int): How many times the circuit should be evaluated (or sampled) to estimate
the expectation values. Defaults to ``None`` if not specified, which means that the device
returns analytical results.
Keyword Args:
gate_fusion (bool): If True, operations are cached and only executed once a
certain number of operations has been reached (only has an effect for the c++ simulator).
rnd_seed (int): Random seed (uses random.randint(0, 4294967295) by default).
verbose (bool): If True, log messages are printed and exceptions are more verbose.
This device can, for example, be instantiated from PennyLane as follows:
.. code-block:: python
import pennylane as qml
dev = qml.device('projectq.simulator', wires=XXX)
Supported PennyLane Operations:
:class:`pennylane.PauliX`,
:class:`pennylane.PauliY`,
:class:`pennylane.PauliZ`,
:class:`pennylane.CNOT`,
:class:`pennylane.CZ`,
:class:`pennylane.SWAP`,
:class:`pennylane.RX`,
:class:`pennylane.RY`,
:class:`pennylane.RZ`,
:class:`pennylane.PhaseShift`,
:class:`pennylane.Hadamard`,
:class:`pennylane.Rot`,
:class:`pennylane.QubitUnitary`,
:class:`pennylane.BasisState`,
:class:`pennylane_pq.S <pennylane_pq.ops.S>`,
:class:`pennylane_pq.T <pennylane_pq.ops.T>`,
Supported PennyLane observables:
:class:`pennylane.PauliX`,
:class:`pennylane.PauliY`,
:class:`pennylane.PauliZ`,
:class:`pennylane.Hadamard`,
:class:`pennylane.Identity`
Extra Operations:
:class:`pennylane_pq.SqrtX <pennylane_pq.ops.SqrtX>`,
:class:`pennylane_pq.SqrtSwap <pennylane_pq.ops.SqrtSwap>`
"""
short_name = "projectq.simulator"
_operation_map = PROJECTQ_OPERATION_MAP
_observable_map = dict(
{key: val for key, val in _operation_map.items() if val in [XGate, YGate, ZGate, HGate]},
**{"Identity": None}
)
_circuits = {}
_backend_kwargs = ["gate_fusion", "rnd_seed"]
def __init__(self, wires=1, shots=None, **kwargs):
kwargs["backend"] = "Simulator"
super().__init__(wires=wires, shots=shots, **kwargs)
def reset(self):
"""Reset/initialize the device by initializing the backend and engine, and allocating qubits."""
backend = pq.backends.Simulator(**self.filter_kwargs_for_backend(self._kwargs))
self._eng = pq.MainEngine(backend, verbose=self._kwargs["verbose"])
super().reset()
def pre_measure(self):
"""Flush the device before retrieving observable measurements."""
self._eng.flush(deallocate_qubits=False)
def expval(self, observable, wires, par):
"""Retrieve the requested observable expectation value."""
device_wires = self.map_wires(wires)
if observable in ["PauliX", "PauliY", "PauliZ"]:
expval = self._eng.backend.get_expectation_value(
pq.ops.QubitOperator(str(observable)[-1] + "0"), [self._reg[device_wires.labels[0]]]
)
elif observable == "Hadamard":
expval = self._eng.backend.get_expectation_value(
1 / np.sqrt(2) * pq.ops.QubitOperator("X0")
+ 1 / np.sqrt(2) * pq.ops.QubitOperator("Z0"),
[self._reg[device_wires.labels[0]]],
)
elif observable == "Identity":
expval = 1
# elif observable == 'AllPauliZ':
# expval = [self._eng.backend.get_expectation_value(
# pq.ops.QubitOperator("Z"+'0'), [qubit])
# for qubit in self._reg]
if not self.shots is None and observable != "Identity":
p0 = (expval + 1) / 2
p0 = max(min(p0, 1), 0)
n0 = np.random.binomial(self.shots, p0)
| |
Return ``self/value``.
Availability
--------
Multiple GPUs, Multiple CPUs
"""
from ._ufunc import true_divide
return true_divide(self, rhs)
def __xor__(self, rhs):
"""a.__xor__(value, /)
Return ``self^value``.
Availability
--------
Multiple GPUs, Multiple CPUs
"""
from ._ufunc import bitwise_xor
return bitwise_xor(self, rhs)
@add_boilerplate()
def all(
self,
axis=None,
out=None,
keepdims=False,
initial=None,
where=True,
):
"""a.all(axis=None, out=None, keepdims=False, initial=None, where=True)
Returns True if all elements evaluate to True.
Refer to :func:`cunumeric.all` for full documentation.
See Also
--------
cunumeric.all : equivalent function
Availability
--------
Multiple GPUs, Multiple CPUs
"""
return self._perform_unary_reduction(
UnaryRedCode.ALL,
self,
axis=axis,
res_dtype=bool,
out=out,
keepdims=keepdims,
initial=initial,
where=where,
)
@add_boilerplate()
def any(
self,
axis=None,
out=None,
keepdims=False,
initial=None,
where=True,
):
"""a.any(axis=None, out=None, keepdims=False, initial=None, where=True)
Returns True if any of the elements of `a` evaluate to True.
Refer to :func:`cunumeric.any` for full documentation.
See Also
--------
cunumeric.any : equivalent function
Availability
--------
Multiple GPUs, Multiple CPUs
"""
return self._perform_unary_reduction(
UnaryRedCode.ANY,
self,
axis=axis,
res_dtype=bool,
out=out,
keepdims=keepdims,
initial=initial,
where=where,
)
@add_boilerplate()
def argmax(self, axis=None, out=None, keepdims=False):
"""a.argmax(axis=None, out=None)
Return indices of the maximum values along the given axis.
Refer to :func:`cunumeric.argmax` for full documentation.
See Also
--------
cunumeric.argmax : equivalent function
Availability
--------
Multiple GPUs, Multiple CPUs
"""
if out is not None and out.dtype != np.int64:
raise ValueError("output array must have int64 dtype")
if axis is not None and not isinstance(axis, int):
raise ValueError("axis must be an integer")
return self._perform_unary_reduction(
UnaryRedCode.ARGMAX,
self,
axis=axis,
res_dtype=np.dtype(np.int64),
out=out,
keepdims=keepdims,
)
@add_boilerplate()
def argmin(self, axis=None, out=None, keepdims=False):
"""a.argmin(axis=None, out=None)
Return indices of the minimum values along the given axis.
Refer to :func:`cunumeric.argmin` for detailed documentation.
See Also
--------
cunumeric.argmin : equivalent function
Availability
--------
Multiple GPUs, Multiple CPUs
"""
if out is not None and out.dtype != np.int64:
raise ValueError("output array must have int64 dtype")
if axis is not None and not isinstance(axis, int):
raise ValueError("axis must be an integer")
return self._perform_unary_reduction(
UnaryRedCode.ARGMIN,
self,
axis=axis,
res_dtype=np.dtype(np.int64),
out=out,
keepdims=keepdims,
)
def astype(
self, dtype, order="C", casting="unsafe", subok=True, copy=True
):
"""a.astype(dtype, order='C', casting='unsafe', subok=True, copy=True)
Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or data-type
Typecode or data-type to which the array is cast.
order : ``{'C', 'F', 'A', 'K'}``, optional
Controls the memory layout order of the result.
'C' means C order, 'F' means Fortran order, 'A'
means 'F' order if all the arrays are Fortran contiguous,
'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible.
Default is 'K'.
casting : ``{'no', 'equiv', 'safe', 'same_kind', 'unsafe'}``, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through (default),
otherwise the returned array will be forced to be a base-class
array.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to false, and the `dtype`, `order`, and `subok`
requirements are satisfied, the input array is returned instead
of a copy.
Returns
-------
arr_t : ndarray
Unless `copy` is False and the other conditions for returning the
input array are satisfied (see description for `copy` input
parameter), `arr_t` is a new array of the same shape as the input
array, with dtype, order given by `dtype`, `order`.
Availability
--------
Multiple GPUs, Multiple CPUs
"""
dtype = np.dtype(dtype)
if self.dtype == dtype:
return self
casting_allowed = np.can_cast(self.dtype, dtype, casting)
if casting_allowed:
# For numeric to non-numeric casting, the dest dtype should be
# retrived from 'promote_types' to preserve values
# e.g. ) float 64 to str, np.dtype(dtype) == '<U'
# this dtype is not safe to store
if dtype == np.dtype("str"):
dtype = np.promote_types(self.dtype, dtype)
else:
raise TypeError(
f"Cannot cast array data"
f"from '{self.dtype}' to '{dtype}' "
f"to the rule '{casting}'"
)
result = ndarray(self.shape, dtype=dtype, inputs=(self,))
result._thunk.convert(self._thunk, warn=False)
return result
@add_boilerplate()
def take(self, indices, axis=None, out=None, mode="raise"):
"""a.take(indices, axis=None, out=None, mode="raise")
Take elements from an array along an axis.
Refer to :func:`cunumeric.take` for full documentation.
See Also
--------
cunumeric.take : equivalent function
Availability
--------
Multiple GPUs, Multiple CPUs
"""
if not np.isscalar(indices):
# if indices is a tuple or list, bring sub-tuples to the same shape
# and concatenate them
indices = convert_to_cunumeric_ndarray(indices)
if axis is None:
self = self.ravel()
axis = 0
elif axis < 0:
axis = self.ndim + axis
if axis < 0 or axis >= self.ndim:
raise ValueError("axis argument is out of bounds")
# TODO remove "raise" logic when bounds check for advanced
# indexing is implementd
if mode == "raise":
if np.isscalar(indices):
if (indices < -self.shape[axis]) or (
indices >= self.shape[axis]
):
raise ValueError("invalid entry in indices array")
else:
if (indices < -self.shape[axis]).any() or (
indices >= self.shape[axis]
).any():
raise ValueError("invalid entry in indices array")
elif mode == "wrap":
indices = indices % self.shape[axis]
elif mode == "clip":
if np.isscalar(indices):
if indices >= self.shape[axis]:
indices = self.shape[axis] - 1
if indices < 0:
indices = 0
else:
indices = indices.clip(0, self.shape[axis] - 1)
else:
raise ValueError(
"Invalid mode '{}' for take operation".format(mode)
)
if self.shape[axis] == 0:
if indices.size != 0:
raise IndexError(
"Cannot do a non-empty take() from an empty axis."
)
return self.copy()
point_indices = tuple(slice(None) for i in range(0, axis))
point_indices += (indices,)
if out is not None:
if out.dtype != self.dtype:
raise ValueError("Type mismatch: out array has wrong type")
out[:] = self[point_indices]
return out
else:
res = self[point_indices]
if np.isscalar(indices):
res = res.copy()
return res
def choose(self, choices, out=None, mode="raise"):
"""a.choose(choices, out=None, mode='raise')
Use an index array to construct a new array from a set of choices.
Refer to :func:`cunumeric.choose` for full documentation.
See Also
--------
cunumeric.choose : equivalent function
Availability
--------
Multiple GPUs, Multiple CPUs
"""
a = self
if out is not None:
out = convert_to_cunumeric_ndarray(out, share=True)
if isinstance(choices, list):
choices = tuple(choices)
is_tuple = isinstance(choices, tuple)
if is_tuple:
n = len(choices)
dtypes = [ch.dtype for ch in choices]
ch_dtype = np.find_common_type(dtypes, [])
choices = tuple(
convert_to_cunumeric_ndarray(choices[i]).astype(ch_dtype)
for i in range(n)
)
else:
choices = convert_to_cunumeric_ndarray(choices)
n = choices.shape[0]
ch_dtype = choices.dtype
choices = tuple(choices[i, ...] for i in range(n))
if not np.issubdtype(self.dtype, np.integer):
raise TypeError("a array should be integer type")
if self.dtype is not np.int64:
a = a.astype(np.int64)
if mode == "raise":
if (a < 0).any() | (a >= n).any():
raise ValueError("invalid entry in choice array")
elif mode == "wrap":
a = a % n
elif mode == "clip":
a = a.clip(0, n - 1)
else:
raise ValueError(
f"mode={mode} not understood. Must "
"be 'raise', 'wrap', or 'clip'"
)
# we need to broadcast all arrays in choices with
# input and output arrays
if out is not None:
out_shape = np.broadcast_shapes(
a.shape, choices[0].shape, out.shape
)
else:
out_shape = np.broadcast_shapes(a.shape, choices[0].shape)
for c in choices:
out_shape = np.broadcast_shapes(out_shape, c.shape)
# if output is provided, it shape should be the same as out_shape
if out is not None and out.shape != out_shape:
raise ValueError(
f"non-broadcastable output operand with shape "
f" {str(out.shape)}"
f" doesn't match the broadcast shape {out_shape}"
)
if out is not None and out.dtype == ch_dtype:
out_arr = out
else:
# no output, create one
out_arr = ndarray(
shape=out_shape,
dtype=ch_dtype,
inputs=(a, choices),
)
ch = tuple(c._thunk for c in choices) #
out_arr._thunk.choose(
*ch,
rhs=a._thunk,
)
if out is not None and out.dtype != ch_dtype:
out._thunk.convert(out_arr._thunk)
return out
else:
return out_arr
@add_boilerplate()
def compress(self, condition, axis=None, out=None):
"""a.compress(self, condition, axis=None, out=None)
Return selected slices of an array along given axis..
Refer to :func:`cunumeric.compress` for full documentation.
See Also
| |
342989, 343019, 343037, 343051, 343061,
343073, 343081, 343087, 343127, 343141, 343153, 343163, 343169,
343177, 343193, 343199, 343219, 343237, 343243, 343253, 343261,
343267, 343289, 343303, 343307, 343309, 343313, 343327, 343333,
343337, 343373, 343379, 343381, 343391, 343393, 343411, 343423,
343433, 343481, 343489, 343517, 343529, 343531, 343543, 343547,
343559, 343561, 343579, 343583, 343589, 343591, 343601, 343627,
343631, 343639, 343649, 343661, 343667, 343687, 343709, 343727,
343769, 343771, 343787, 343799, 343801, 343813, 343817, 343823,
343829, 343831, 343891, 343897, 343901, 343913, 343933, 343939,
343943, 343951, 343963, 343997, 344017, 344021, 344039, 344053,
344083, 344111, 344117, 344153, 344161, 344167, 344171, 344173,
344177, 344189, 344207, 344209, 344213, 344221, 344231, 344237,
344243, 344249, 344251, 344257, 344263, 344269, 344273, 344291,
344293, 344321, 344327, 344347, 344353, 344363, 344371, 344417,
344423, 344429, 344453, 344479, 344483, 344497, 344543, 344567,
344587, 344599, 344611, 344621, 344629, 344639, 344653, 344671,
344681, 344683, 344693, 344719, 344749, 344753, 344759, 344791,
344797, 344801, 344807, 344819, 344821, 344843, 344857, 344863,
344873, 344887, 344893, 344909, 344917, 344921, 344941, 344957,
344959, 344963, 344969, 344987, 345001, 345011, 345017, 345019,
345041, 345047, 345067, 345089, 345109, 345133, 345139, 345143,
345181, 345193, 345221, 345227, 345229, 345259, 345263, 345271,
345307, 345311, 345329, 345379, 345413, 345431, 345451, 345461,
345463, 345473, 345479, 345487, 345511, 345517, 345533, 345547,
345551, 345571, 345577, 345581, 345599, 345601, 345607, 345637,
345643, 345647, 345659, 345673, 345679, 345689, 345701, 345707,
345727, 345731, 345733, 345739, 345749, 345757, 345769, 345773,
345791, 345803, 345811, 345817, 345823, 345853, 345869, 345881,
345887, 345889, 345907, 345923, 345937, 345953, 345979, 345997,
346013, 346039, 346043, 346051, 346079, 346091, 346097, 346111,
346117, 346133, 346139, 346141, 346147, 346169, 346187, 346201,
346207, 346217, 346223, 346259, 346261, 346277, 346303, 346309,
346321, 346331, 346337, 346349, 346361, 346369, 346373, 346391,
346393, 346397, 346399, 346417, 346421, 346429, 346433, 346439,
346441, 346447, 346453, 346469, 346501, 346529, 346543, 346547,
346553, 346559, 346561, 346589, 346601, 346607, 346627, 346639,
346649, 346651, 346657, 346667, 346669, 346699, 346711, 346721,
346739, 346751, 346763, 346793, 346831, 346849, 346867, 346873,
346877, 346891, 346903, 346933, 346939, 346943, 346961, 346963,
347003, 347033, 347041, 347051, 347057, 347059, 347063, 347069,
347071, 347099, 347129, 347131, 347141, 347143, 347161, 347167,
347173, 347177, 347183, 347197, 347201, 347209, 347227, 347233,
347239, 347251, 347257, 347287, 347297, 347299, 347317, 347329,
347341, 347359, 347401, 347411, 347437, 347443, 347489, 347509,
347513, 347519, 347533, 347539, 347561, 347563, 347579, 347587,
347591, 347609, 347621, 347629, 347651, 347671, 347707, 347717,
347729, 347731, 347747, 347759, 347771, 347773, 347779, 347801,
347813, 347821, 347849, 347873, 347887, 347891, 347899, 347929,
347933, 347951, 347957, 347959, 347969, 347981, 347983, 347987,
347989, 347993, 348001, 348011, 348017, 348031, 348043, 348053,
348077, 348083, 348097, 348149, 348163, 348181, 348191, 348209,
348217, 348221, 348239, 348241, 348247, 348253, 348259, 348269,
348287, 348307, 348323, 348353, 348367, 348389, 348401, 348407,
348419, 348421, 348431, 348433, 348437, 348443, 348451, 348457,
348461, 348463, 348487, 348527, 348547, 348553, 348559, 348563,
348571, 348583, 348587, 348617, 348629, 348637, 348643, 348661,
348671, 348709, 348731, 348739, 348757, 348763, 348769, 348779,
348811, 348827, 348833, 348839, 348851, 348883, 348889, 348911,
348917, 348919, 348923, 348937, 348949, 348989, 348991, 349007,
349039, 349043, 349051, 349079, 349081, 349093, 349099, 349109,
349121, 349133, 349171, 349177, 349183, 349187, 349199, 349207,
349211, 349241, 349291, 349303, 349313, 349331, 349337, 349343,
349357, 349369, 349373, 349379, 349381, 349387, 349397, 349399,
349403, 349409, 349411, 349423, 349471, 349477, 349483, 349493,
349499, 349507, 349519, 349529, 349553, 349567, 349579, 349589,
349603, 349637, 349663, 349667, 349697, 349709, 349717, 349729,
349753, 349759, 349787, 349793, 349801, 349813, 349819, 349829,
349831, 349837, 349841, 349849, 349871, 349903, 349907, 349913,
349919, 349927, 349931, 349933, 349939, 349949, 349963, 349967,
349981, 350003, 350029, 350033, 350039, 350087, 350089, 350093,
350107, 350111, 350137, 350159, 350179, 350191, 350213, 350219,
350237, 350249, 350257, 350281, 350293, 350347, 350351, 350377,
350381, 350411, 350423, 350429, 350431, 350437, 350443, 350447,
350453, 350459, 350503, 350521, 350549, 350561, 350563, 350587,
350593, 350617, 350621, 350629, 350657, 350663, 350677, 350699,
350711, 350719, 350729, 350731, 350737, 350741, 350747, 350767,
350771, 350783, 350789, 350803, 350809, 350843, 350851, 350869,
350881, 350887, 350891, 350899, 350941, 350947, 350963, 350971,
350981, 350983, 350989, 351011, 351023, 351031, 351037, 351041,
351047, 351053, 351059, 351061, 351077, 351079, 351097, 351121,
351133, 351151, 351157, 351179, 351217, 351223, 351229, 351257,
351259, 351269, 351287, 351289, 351293, 351301, 351311, 351341,
351343, 351347, 351359, 351361, 351383, 351391, 351397, 351401,
351413, 351427, 351437, 351457, 351469, 351479, 351497, 351503,
351517, 351529, 351551, 351563, 351587, 351599, 351643, 351653,
351661, 351667, 351691, 351707, 351727, 351731, 351733, 351749,
351751, 351763, 351773, 351779, 351797, 351803, 351811, 351829,
351847, 351851, 351859, 351863, 351887, 351913, 351919, 351929,
351931, 351959, 351971, 351991, 352007, 352021, 352043, 352049,
352057, 352069, 352073, 352081, 352097, 352109, 352111, 352123,
352133, 352181, 352193, 352201, 352217, 352229, 352237, 352249,
352267, 352271, 352273, 352301, 352309, 352327, 352333, 352349,
352357, 352361, 352367, 352369, 352381, 352399, 352403, 352409,
352411, 352421, 352423, 352441, 352459, 352463, 352481, 352483,
352489, 352493, 352511, 352523, 352543, 352549, 352579, 352589,
352601, 352607, 352619, 352633, 352637, 352661, 352691, 352711,
352739, 352741, 352753, 352757, 352771, 352813, 352817, 352819,
352831, 352837, 352841, 352853, 352867, 352883, 352907, 352909,
352931, 352939, 352949, 352951, 352973, 352991, 353011, 353021,
353047, 353053, 353057, 353069, 353081, 353099, 353117, 353123,
353137, 353147, 353149, 353161, 353173, 353179, 353201, 353203,
353237, 353263, 353293, 353317, 353321, 353329, 353333, 353341,
353359, 353389, 353401, 353411, 353429, 353443, 353453, 353459,
353471, 353473, 353489, 353501, 353527, 353531, 353557, 353567,
353603, 353611, 353621, 353627, 353629, 353641, 353653, 353657,
353677, 353681, 353687, 353699, 353711, 353737, 353747, 353767,
353777, 353783, 353797, 353807, 353813, 353819, 353833, 353867,
353869, 353879, 353891, 353897, 353911, 353917, 353921, 353929,
353939, 353963, 354001, 354007, 354017, 354023, 354031, 354037,
354041, 354043, 354047, 354073, 354091, 354097, 354121, 354139,
354143, 354149, 354163, 354169, 354181, 354209, 354247, 354251,
354253, 354257, 354259, 354271, 354301, 354307, 354313, 354317,
354323, 354329, 354337, 354353, 354371, 354373, 354377, 354383,
354391, 354401, 354421, 354439, 354443, 354451, 354461, 354463,
354469, 354479, 354533, 354539, 354551, 354553, 354581, 354587,
354619, 354643, 354647, 354661, 354667, 354677, 354689, 354701,
354703, 354727, 354737, 354743, 354751, 354763, 354779, 354791,
354799, 354829, 354833, 354839, 354847, 354869, 354877, 354881,
354883, 354911, 354953, 354961, 354971, 354973, 354979, 354983,
354997, 355007, 355009, 355027, 355031, 355037, 355039, 355049,
355057, 355063, 355073, 355087, 355093, 355099, 355109, 355111,
355127, 355139, 355171, 355193, 355211, 355261, 355297, 355307,
355321, 355331, 355339, 355343, 355361, 355363, 355379, 355417,
355427, 355441, 355457, 355463, 355483, 355499, 355501, 355507,
355513, 355517, 355519, 355529, 355541, 355549, 355559, 355571,
355573, 355591, 355609, 355633, 355643, 355651, 355669, 355679,
355697, 355717, 355721, 355723, 355753, 355763, 355777, 355783,
355799, 355811, 355819, 355841, 355847, 355853, 355867, 355891,
355909, 355913, 355933, 355937, 355939, 355951, 355967, 355969,
356023, 356039, 356077, 356093, 356101, 356113, 356123, 356129,
356137, 356141, 356143, 356171, 356173, 356197, 356219, 356243,
356261, 356263, 356287, 356299, 356311, 356327, 356333, 356351,
356387, 356399, 356441, 356443, 356449, 356453, 356467, 356479,
356501, 356509, 356533, 356549, 356561, 356563, 356567, 356579,
356591, 356621, 356647, 356663, 356693, 356701, 356731, 356737,
356749, 356761, 356803, 356819, 356821, 356831, 356869, 356887,
356893, 356927, 356929, 356933, 356947, 356959, 356969, 356977,
356981, 356989, 356999, 357031, 357047, 357073, 357079, 357083,
357103, 357107, 357109, 357131, 357139, 357169, 357179, 357197,
357199, 357211, 357229, 357239, 357241, 357263, 357271, 357281,
357283, 357293, 357319, 357347, 357349, 357353, 357359, 357377,
357389, 357421, 357431, 357437, 357473, 357503, 357509, 357517,
357551, 357559, 357563, 357569, 357571, 357583, 357587, 357593,
357611, 357613, 357619, 357649, 357653, 357659, 357661, 357667,
357671, 357677, 357683, 357689, 357703, 357727, 357733, 357737,
357739, 357767, 357779, 357781, 357787, 357793, 357809, 357817,
357823, 357829, 357839, 357859, 357883, 357913, 357967, 357977,
357983, 357989, 357997, 358031, 358051, 358069, 358073, 358079,
358103, 358109, 358153, 358157, 358159, 358181, 358201, 358213,
358219, 358223, 358229, 358243, 358273, 358277, 358279, 358289,
358291, 358297, 358301, 358313, 358327, 358331, 358349, 358373,
358417, 358427, 358429, 358441, 358447, 358459, 358471, 358483,
358487, 358499, 358531, 358541, 358571, 358573, 358591, 358597,
358601, 358607, 358613, 358637, 358667, 358669, 358681, 358691,
358697, 358703, 358711, 358723, 358727, 358733, 358747, 358753,
358769, 358783, 358793, 358811, 358829, 358847, 358859, 358861,
358867, 358877, 358879, 358901, 358903, 358907, 358909, 358931,
358951, 358973, 358979, 358987, 358993, 358999, 359003, 359017,
359027, 359041, 359063, 359069, 359101, 359111, 359129, 359137,
359143, 359147, 359153, | |
<reponame>bronreichardtchu/koffee
"""
NAME:
koffee.py
KOFFEE - Keck Outflow Fitter For Emission linEs
AUTHOR:
<NAME>
Swinburne
2019
EMAIL:
<<EMAIL>>
PURPOSE:
To fit gaussians to emission lines in 3D data cubes.
Written on MacOS Mojave 10.14.5, with Python 3
FUNCTIONS INCLUDED:
mock_data
check_blue_chi_square
plot_fit
fit_cube
read_output_files
DICTIONARIES INCLUDED:
all_the_lines - holds the wavelengths of emission lines to fit
dodgy_spaxels - holds location of spaxels saturated in OIII 5007
MODIFICATION HISTORY:
v.1.0 - first created May 2019
v.1.0.1 - thinking about renaming this KOFFEE - Keck Outflow Fitter For Emission linEs
v.1.0.2 - Added a continuum, being the average of the first 10 pixels in the input spectrum, which is then subtracted from the entire data spectrum so that the Gaussians fit properly, and aren't trying to fit the continuum as well (5th June 2019)
v.1.0.3 - added a loop over the entire cube, with an exception if the paramaters object comes out weird and a progress bar
v.1.0.4 - added a continuum to the mock data, and also added a feature so that the user can define what S/N they want the mock data to have
v.1.0.5 - adding functions to combine data cubes either by pixel, or by regridding the wavelength (ToDo: include variance cubes in this too)
"""
import pickle
import pathlib
import numpy as np
from datetime import date
from tqdm import tqdm #progress bar module
#make sure matplotlib doesn't create any windows
#import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import prepare_cubes as pc
import koffee_fitting_functions as kff
from astropy.modeling import models
from lmfit import Parameters
from lmfit import Model
import importlib
importlib.reload(kff)
#===============================================================================
#MOCK DATA
#===============================================================================
def mock_data(amp, mean, stddev, snr):
"""
Creates a mock data set with gaussians. A list of values for each gaussian
property is input; each property must have the same length.
Parameters
----------
amp : list of floats
amplitudes of the Gaussians
mean : list of floats
means of the Gaussians
stddev : list of floats
standard deviations of the Gaussians
snr : float
the desired signal-to-noise ratio
Returns
-------
x : :obj:'~numpy.ndarray'
the wavelength vector
y : :obj:'~numpy.ndarray'
the flux/intensity
"""
np.random.seed(42)
#create 'wavelengths'
x = np.linspace(-40.,40.,800)
#create flux
gaussians = [0]*len(amp)
for i in range(len(amp)):
gaussians[i] = models.Gaussian1D(amp[i], mean[i], stddev[i])
#add gaussians together
g = 0
for i in range(len(gaussians)):
g += gaussians[i](x)
#add noise assuming the mean value of the spectrum continuum is 1.0
noise = 1.0/snr
y = g + np.random.normal(0.,noise,x.shape)
return x, y
#===============================================================================
#USEFUL DICTIONARIES
#===============================================================================
#wavelengths of emission lines at rest in vacuum, taken from
#http://classic.sdss.org/dr6/algorithms/linestable.html
all_the_lines = {
"Hdelta" : 4102.89,
"Hgamma" : 4341.68,
"Hbeta" : 4862.68,
"Halpha": 6564.61,
"OII_1" : 3727.092,
"OII_2" : 3729.875,
"HeI" : 3889.0,
"SII" : 4072.3,
"OIII_1" : 4364.436,
"OIII_2" : 4932.603,
"OIII_3" : 4960.295,
"OIII_4" : 5008.240
}
#dictionary of spaxels which are saturated in OIII_4 and need to fit OIII_3 instead
dodgy_spaxels = {
"IRAS08" : [(27,9), (28,9), (29,9), (30,9), (31,9), (32,9)]
}
#===============================================================================
#MAKING KOFFEE SMARTER
#===============================================================================
def check_blue_chi_square(wavelength, flux, best_fit, g_model, OII_doublet_fit=False):
"""
Checks the chi squared value of the blue side of the fit. If there's a large
residual, KOFFEE will shift the starting point for the flow gaussian mean to
the blue in fit_cube().
Parameters
----------
wavelength : :obj:'~numpy.ndarray'
the wavelength vector
flux : :obj:'~numpy.ndarray'
the data vector
best_fit : class
the best_fit object
g_model :
the gaussian model object
OII_doublet_fit : boolean
whether it was a fit to the OII doublet or not (default = False)
Returns
-------
chi_square : int
the chi squared residual of the blue side of the fit
"""
#get the residuals
residual = best_fit.residual
#we only want them for the area which is between 1 sigma to the blue of the systemic mean, and 5A blue of that.
#create the wavelength mask
if str(type(g_model)) == "<class 'lmfit.model.Model'>":
one_sigma_blue = (best_fit.best_values['gauss_mean'] - best_fit.best_values['gauss_sigma'])-1.0
if str(type(g_model)) == "<class 'lmfit.model.CompositeModel'>":
if OII_doublet_fit == True:
one_sigma_blue = (best_fit.best_values['Galaxy_red_mean'] - best_fit.best_values['Galaxy_red_sigma'])-1.0
else:
try:
one_sigma_blue = (best_fit.best_values['Galaxy_mean'] - best_fit.best_values['Galaxy_sigma'])-1.0
except:
one_sigma_blue = (best_fit.best_values['gauss_mean'] - best_fit.best_values['gauss_sigma'])-1.0
blue_left_bound = one_sigma_blue - 4.0
lam_mask = (wavelength > blue_left_bound) & (wavelength < one_sigma_blue)
#print(blue_left_bound, one_sigma_blue)
#calculate the chi squared
chi_square = np.sum(residual[lam_mask]**2)
return chi_square
#===============================================================================
#PLOTTING
#===============================================================================
def plot_fit(wavelength, flux, g_model, pars, best_fit, plot_initial=False, include_const=False):
"""
Plots the fit to the data with residuals
Parameters
----------
wavelength : :obj:'~numpy.ndarray'
wavelength vector
flux : :obj:'~numpy.ndarray'
the flux of the spectrum
initial_fit :
the initial model before fitting
best_fit : class
the best fitting model
plot_initial : boolean
Default is False. Whether to plot the initial guess or not.
Returns
-------
A figure of the fit to the data, with a panel below showing the residuals
"""
#create a more finely sampled wavelength space
fine_sampling = np.linspace(min(wavelength), max(wavelength), 1000)
#get parameters from the best_fit
best_fit_pars = best_fit.params
#plot the stuff
fig1 = plt.figure(figsize=(9,5))
#add_axes has (xstart, ystart, xend, yend)
frame1 = fig1.add_axes((.1,.3,.8,.6))
plt.step(wavelength, flux, where='mid', label='Data')
#get initial guess for fit
if plot_initial:
initial_fit = g_model.eval(pars, x=wavelength)
plt.step(wavelength, initial_fit, where='mid', label='Initial Guess')
#if the model was a 1-component Gaussian, plot the best fit gaussian
if str(type(g_model)) == "<class 'lmfit.model.Model'>":
label = "Best Fit (Amp: {:.2f}, Mean: {:.2f}, \n Sigma: {:.2f})".format(best_fit.best_values['gauss_amp'], best_fit.best_values['gauss_mean'], best_fit.best_values['gauss_sigma'])
plt.step(fine_sampling, best_fit.eval(x=fine_sampling), where='mid', label=label)
#if the original model was a multiple-component Gaussian fit, plot the two gaussians and constant separately
if str(type(g_model)) == "<class 'lmfit.model.CompositeModel'>":
plt.step(fine_sampling, best_fit.eval(x=fine_sampling), where='mid', label='Best Fit')
for i in range(len(best_fit.components)):
try:
#get the parameters for the amp, center and sigma
amp_par = best_fit.params[best_fit.components[i].prefix+'amp']
mean_par = best_fit.params[best_fit.components[i].prefix+'mean']
sigma_par = best_fit.params[best_fit.components[i].prefix+'sigma']
#put the parameter values into a string for the graph label
label = best_fit.components[i].prefix[:-1]+" (Amp: {:.2f}, Mean: {:.2f}, \n Sigma: {:.2f})".format(amp_par.value, mean_par.value, sigma_par.value)
#plot the curves
plt.step(fine_sampling, best_fit.components[i].eval(best_fit_pars, x=fine_sampling), where='mid', label=label)
except:
#if the try doesn't work, it should be the constant, so use this line instead
#make the label for the constant component
label = best_fit.components[i].prefix[:-1]+": {:.2f}".format(best_fit.best_values['Constant_Continuum_c'])
#plot the constant line
plt.step(fine_sampling, np.full_like(fine_sampling, best_fit.components[i].eval(best_fit_pars, x=fine_sampling)), where='mid', label=label)
#plt.xlim(best_fit.params[best_fit.components[0].prefix+'mean'].value-8.0, best_fit.params[best_fit.components[0].prefix+'mean'].value+8.0)
plt.ylabel('Flux ($10^{-16}$ erg s$^{-1}$ cm$^{-2}$ $\AA^{-1}$)')
frame1.axes.get_xaxis().set_ticks([])
plt.legend(loc='upper right', fontsize=8, frameon=False)
#create frame for residual plot
frame2 = fig1.add_axes((.1,.1,.8,.2))
difference = best_fit.best_fit - flux
plt.scatter(wavelength, difference, c='r', s=2)
#plt.xlim(best_fit.params[best_fit.components[0].prefix+'mean'].value-8.0, best_fit.params[best_fit.components[0].prefix+'mean'].value+8.0)
plt.ylabel('Residuals')
plt.xlabel('Wavelength ($\AA$)')
#plt.show()
return fig1
#===============================================================================
#MAIN FUNCTION - APPLY TO WHOLE CUBE
def fit_cube(galaxy_name, redshift, emission_line, output_folder_loc, emission_line2=None, OII_doublet=False, filename=None, filename2=None, var_filename=None, data_cube_stuff=None, emission_dict=all_the_lines, cont_subtract=False, include_const=False, plotting=True, method='leastsq', correct_bad_spaxels=False, koffee_checks=True):
"""
Fits the entire cube, and checks whether one or two gaussians fit the
emission line best. Must have either the filename to the fits file, or the
data_cube_stuff.
Parameters
----------
galaxy_name : str
name of the galaxy
redshift : int
redshift of the galaxy
emission_line : str
the emission line to be fit. Options:
"Hdelta", "Hgamma", "Hbeta", "Halpha", "OII_1", "OII_2", "HeI", "SII",
"OIII_1", "OIII_2", "OIII_3", "OIII_4"
output_folder_loc : str
file path to where to put the results and plots output folder
emission_line2 : str
the second emission line to be fit using the results from the first.
Default is None. Options:
"Hdelta", "Hgamma", "Hbeta", "Halpha", "OII_1", "OII_2", "HeI", "SII",
"OIII_1", "OIII_2", "OIII_3", "OIII_4"
OII_doublet : boolean
whether to fit the OII doublet using the results from the first fit.
Default is False. Uses "OII_1", from the dictionary
filename : str
the file path to the data cube - if data_cube_stuff is not given
filename2 : str
the file path to the second data cube - generally the continuum subtracted
cube. If this is not None, the first cube is used to create the S/N mask,
and this cube is used for fitting.
var_filename : str or None
the filepath to the variance cube, if it's separate to the data cube file
and the fit should be weighted. Default is None.
data_cube_stuff : list of :obj:'~numpy.ndarray'
[lamdas, data] if the filename is not given
emission_dict : dict
dictionary of emission lines
cont_subtract : bool
when True, use the first 10 pixels in the spectrum to define the continuum
and subtracts it. Use False when continuum has already been fit and
subtracted.
include_const : bool
when True, a constant is included in the gaussian fit. Default is False.
plotting : bool
when True, each best_fit is plotted with its residuals and saved
method : str
the | |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2012-2016 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`ZhaoEtAl2006Asc`, :class:`ZhaoEtAl2006SInter`,
:class:`ZhaoEtAl2006SSlab`, :class:`ZhaoEtAl2006SInterNSHMP2008` and
:class:`ZhaoEtAl2006SSlabNSHMP2014`
"""
from __future__ import division
import numpy as np
# standard acceleration of gravity in m/s**2
from scipy.constants import g
import copy
from openquake.hazardlib.gsim.base import GMPE, CoeffsTable
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGA, PGV, SA
class ZhaoEtAl2006Asc(GMPE):
"""
Implements GMPE developed by <NAME> et al. and published as
"Attenuation Relations of Strong Ground Motion in Japan Using Site
Classification Based on Predominant Period" (2006, Bulletin of the
Seismological Society of America, Volume 96, No. 3, pages 898-913).
This class implements the equations for 'Active Shallow Crust'
(that's why the class name ends with 'Asc').
"""
#: Supported tectonic region type is active shallow crust, this means
#: that factors SI, SS and SSL are assumed 0 in equation 1, p. 901.
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST
#: Supported intensity measure types are spectral acceleration,
#: and peak ground acceleration, see paragraph 'Development of Base Model'
#: p. 901.
DEFINED_FOR_INTENSITY_MEASURE_TYPES = set([
PGA,
SA
])
#: Supported intensity measure component is geometric mean
#: of two horizontal components :
#: attr:`~openquake.hazardlib.const.IMC.AVERAGE_HORIZONTAL`, see paragraph
#: 'Development of Base Model', p. 901.
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.AVERAGE_HORIZONTAL
#: Supported standard deviation types are inter-event, intra-event
#: and total, see equation 3, p. 902.
DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([
const.StdDev.TOTAL,
const.StdDev.INTER_EVENT,
const.StdDev.INTRA_EVENT
])
#: Required site parameters is Vs30.
#: See table 2, p. 901.
REQUIRES_SITES_PARAMETERS = set(('vs30', ))
#: Required rupture parameters are magnitude, rake, and focal depth.
#: See paragraph 'Development of Base Model', p. 901.
REQUIRES_RUPTURE_PARAMETERS = set(('mag', 'rake', 'hypo_depth'))
#: Required distance measure is Rrup.
#: See paragraph 'Development of Base Model', p. 902.
REQUIRES_DISTANCES = set(('rrup', ))
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extracting dictionary of coefficients specific to required
# intensity measure type.
C = self.COEFFS_ASC[imt]
# mean value as given by equation 1, p. 901, without considering the
# interface and intraslab terms (that is SI, SS, SSL = 0) and the
# inter and intra event terms, plus the magnitude-squared term
# correction factor (equation 5 p. 909).
mean = self._compute_magnitude_term(C, rup.mag) +\
self._compute_distance_term(C, rup.mag, dists.rrup) +\
self._compute_focal_depth_term(C, rup.hypo_depth) +\
self._compute_faulting_style_term(C, rup.rake) +\
self._compute_site_class_term(C, sites.vs30) +\
self._compute_magnitude_squared_term(P=0.0, M=6.3, Q=C['QC'],
W=C['WC'], mag=rup.mag)
# convert from cm/s**2 to g
mean = np.log(np.exp(mean) * 1e-2 / g)
stddevs = self._get_stddevs(C['sigma'], C['tauC'], stddev_types,
num_sites=len(sites.vs30))
return mean, stddevs
def _get_stddevs(self, sigma, tau, stddev_types, num_sites):
"""
Return standard deviations as defined in equation 3 p. 902.
"""
stddevs = []
for stddev_type in stddev_types:
assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
if stddev_type == const.StdDev.TOTAL:
sigma_t = np.sqrt(sigma ** 2 + tau ** 2)
stddevs.append(sigma_t + np.zeros(num_sites))
elif stddev_type == const.StdDev.INTRA_EVENT:
stddevs.append(sigma + np.zeros(num_sites))
elif stddev_type == const.StdDev.INTER_EVENT:
stddevs.append(tau + np.zeros(num_sites))
return stddevs
def _compute_magnitude_term(self, C, mag):
"""
Compute first term in equation 1, p. 901.
"""
return C['a'] * mag
def _compute_distance_term(self, C, mag, rrup):
"""
Compute second and third terms in equation 1, p. 901.
"""
term1 = C['b'] * rrup
term2 = - np.log(rrup + C['c'] * np.exp(C['d'] * mag))
return term1 + term2
def _compute_focal_depth_term(self, C, hypo_depth):
"""
Compute fourth term in equation 1, p. 901.
"""
# p. 901. "(i.e, depth is capped at 125 km)".
focal_depth = hypo_depth
if focal_depth > 125.0:
focal_depth = 125.0
# p. 902. "We used the value of 15 km for the
# depth coefficient hc ...".
hc = 15.0
# p. 901. "When h is larger than hc, the depth terms takes
# effect ...". The next sentence specifies h>=hc.
return float(focal_depth >= hc) * C['e'] * (focal_depth - hc)
def _compute_faulting_style_term(self, C, rake):
"""
Compute fifth term in equation 1, p. 901.
"""
# p. 900. "The differentiation in focal mechanism was
# based on a rake angle criterion, with a rake of +/- 45
# as demarcation between dip-slip and strike-slip."
return float(rake > 45.0 and rake < 135.0) * C['FR']
def _compute_site_class_term(self, C, vs30):
"""
Compute nine-th term in equation 1, p. 901.
"""
# map vs30 value to site class, see table 2, p. 901.
site_term = np.zeros(len(vs30))
# hard rock
site_term[vs30 > 1100.0] = C['CH']
# rock
site_term[(vs30 > 600) & (vs30 <= 1100)] = C['C1']
# hard soil
site_term[(vs30 > 300) & (vs30 <= 600)] = C['C2']
# medium soil
site_term[(vs30 > 200) & (vs30 <= 300)] = C['C3']
# soft soil
site_term[vs30 <= 200] = C['C4']
return site_term
def _compute_magnitude_squared_term(self, P, M, Q, W, mag):
"""
Compute magnitude squared term, equation 5, p. 909.
"""
return P * (mag - M) + Q * (mag - M) ** 2 + W
#: Coefficient table obtained by joining table 4 (except columns for
#: SI, SS, SSL), table 5 (both at p. 903) and table 6 (only columns for
#: QC WC TauC), p. 907.
COEFFS_ASC = CoeffsTable(sa_damping=5, table="""\
IMT a b c d e FR CH C1 C2 C3 C4 sigma QC WC tauC
pga 1.101 -0.00564 0.0055 1.080 0.01412 0.251 0.293 1.111 1.344 1.355 1.420 0.604 0.0 0.0 0.303
0.05 1.076 -0.00671 0.0075 1.060 0.01463 0.251 0.939 1.684 1.793 1.747 1.814 0.640 0.0 0.0 0.326
0.10 1.118 -0.00787 0.0090 1.083 0.01423 0.240 1.499 2.061 2.135 2.031 2.082 0.694 0.0 0.0 0.342
0.15 1.134 -0.00722 0.0100 1.053 0.01509 0.251 1.462 1.916 2.168 2.052 2.113 0.702 0.0 0.0 0.331
0.20 1.147 -0.00659 0.0120 1.014 0.01462 0.260 1.280 1.669 2.085 2.001 2.030 0.692 0.0 0.0 0.312
0.25 1.149 -0.00590 0.0140 0.966 0.01459 0.269 1.121 1.468 1.942 1.941 1.937 0.682 0.0 0.0 0.298
0.30 1.163 -0.00520 0.0150 0.934 0.01458 0.259 0.852 1.172 1.683 1.808 1.770 0.670 0.0 0.0 0.300
0.40 1.200 -0.00422 0.0100 0.959 0.01257 0.248 0.365 0.655 1.127 1.482 1.397 0.659 0.0 0.0 0.346
0.50 1.250 -0.00338 0.0060 1.008 0.01114 0.247 -0.207 0.071 0.515 0.934 0.955 0.653 -0.0126 0.0116 0.338
0.60 1.293 -0.00282 0.0030 1.088 0.01019 0.233 -0.705 -0.429 -0.003 0.394 0.559 0.653 -0.0329 0.0202 0.349
0.70 1.336 -0.00258 0.0025 1.084 0.00979 0.220 -1.144 -0.866 -0.449 -0.111 0.188 0.652 -0.0501 0.0274 0.351
0.80 1.386 -0.00242 0.0022 1.088 0.00944 0.232 -1.609 -1.325 -0.928 -0.620 -0.246 0.647 -0.0650 0.0336 0.356
0.90 1.433 -0.00232 0.0020 1.109 0.00972 0.220 -2.023 -1.732 -1.349 -1.066 -0.643 0.653 -0.0781 0.0391 0.348
1.00 1.479 -0.00220 0.0020 1.115 0.01005 0.211 -2.451 -2.152 -1.776 -1.523 -1.084 0.657 -0.0899 0.0440 0.338
1.25 1.551 -0.00207 0.0020 1.083 0.01003 0.251 -3.243 -2.923 -2.542 -2.327 -1.936 0.660 -0.1148 0.0545 0.313
1.50 1.621 -0.00224 0.0020 1.091 0.00928 0.248 -3.888 -3.548 -3.169 -2.979 -2.661 0.664 -0.1351 0.0630 0.306
2.00 1.694 -0.00201 0.0025 1.055 0.00833 0.263 -4.783 -4.410 -4.039 -3.871 -3.640 0.669 -0.1672 0.0764 0.283
2.50 1.748 -0.00187 0.0028 1.052 0.00776 0.262 -5.444 -5.049 -4.698 -4.496 -4.341 0.671 -0.1921 0.0869 0.287
3.00 1.759 -0.00147 0.0032 1.025 0.00644 0.307 -5.839 -5.431 -5.089 -4.893 -4.758 0.667 -0.2124 0.0954 0.278
4.00 1.826 -0.00195 0.0040 1.044 0.00590 0.353 -6.598 -6.181 -5.882 -5.698 -5.588 0.647 -0.2445 0.1088 0.273
5.00 1.825 -0.00237 0.0050 1.065 0.00510 0.248 -6.752 -6.347 -6.051 -5.873 -5.798 0.643 -0.2694 0.1193 0.275
""")
class ZhaoEtAl2006SInter(ZhaoEtAl2006Asc):
"""
Implements GMPE developed by <NAME> et al and published as
"Attenuation Relations of Strong Ground Motion in Japan Using Site
Classification Based on Predominant | |
False
except TypeError as e:
logger.debug("make_transfer: money of wrong type")
self.make_report(
from_acc = payment['from_acc'],
to_acc = payment['to_acc'],
value = 0,
kind = payment['kind'],
name = payment['name'],
code = C_transfer_ERR,
message = e.message(),
meta = payment['meta']
)
return False
# first, try to get the money from the sender account, tm = TransferMessage()
tm_sender = payment['from_acc'].payment_output(
account_str = payment['to_acc'].name,
payment = -money,
kind = payment['kind'],
description = payment['name'],
meta = payment['meta']
)
# if sending money succeeded, try the receiver side
if tm_sender.code == C_transfer_OK:
logger.debug("make_transfer: sender code is OK")
# in the wired case that money is less than what has been returned by the sender,
# throw an error message
if money < (-tm_sender.money):
raise ValueError("%f was requested from account '%s' but %f returned" % (money,
payment['from_acc'].name,
-tm_sender.money))
if money > (-tm_sender.money):
# if payment is fixed, throw an error, otherwise proceed
if payment['fixed']:
raise ValueError("%f was requested from account '%s' but %f returned" % (money,
payment['from_acc'].name,
-tm_sender.money))
else:
money = -tm_sender.money
tm_receiver = payment['to_acc'].payment_input(
account_str = payment['from_acc'].name,
payment = money,
kind = payment['kind'],
description = payment['name'],
meta = payment['meta']
)
# if receiving succeeded, return success
if tm_receiver.code == C_transfer_OK:
# in the wired case that money is less than what has been returned by the sender,
# throw an error message
if money < tm_receiver.money:
raise ValueError("%f was submitted to account '%s' but %f returned" % (money,
payment['to_acc'].name,
tm_receiver.money))
# if the receiver does not accept the entir money
if money > tm_receiver.money:
# check, whether payment is fixed
if payment['fixed']:
raise ValueError("%f was submitted to account '%s' but %f returned because it is fixed" % (money,
payment['to_acc'].name,
tm_receiver.money))
else:
# if payment is not fixed, we need to transfer the difference back to
# the sender account
payment['from_acc'].return_money( money - tm_receiver.money)
logger.debug("make_transfer: receiver code is OK")
self.make_report(
from_acc = payment['from_acc'],
to_acc = payment['to_acc'],
value = tm_receiver.money,
kind = payment['kind'],
name = payment['name'],
code = C_transfer_OK,
message = '',
meta = payment['meta']
)
return True
else:
# if an error on the receiver side happened,
# return the money back and report that
logger.debug("make_transfer: receiver code is not ok")
payment['from_acc'].return_money(money)
self.make_report(
from_acc = payment['from_acc'],
to_acc = payment['to_acc'],
value = tm_sender.money,
kind = payment['kind'],
name = payment['name'],
code = tm_receiver.code,
message = tm_receiver.message,
meta = payment['meta']
)
return False
else:
# if an error occured on the sending side, report this and return false
logger.debug("make_transfer: sending code is not OK")
self.make_report(
from_acc = payment['from_acc'],
to_acc = payment['to_acc'],
value = money,
kind = payment['kind'],
name = payment['name'],
code = tm_sender.code,
message = tm_sender.message,
meta = payment['meta']
)
return False
def simulate(self, date_stop = None, delta = None, last_report = True):
""" Simulation routine for the entire simulation """
# Initialization
date_stop = validate.valid_date_stop(date_stop)
if (not self._payments_iter):
self._payments_iter = self._payments.payment(self._current_date)
if (not self._next_pay):
try:
self._next_pay = next(self._payments_iter, C_default_payment)
except StopIteration:
# if there are no payments, create a date for a payment
# that lies in the distant future
self._next_pay = [{'date': Bank_Date.max}]
delta = validate.valid_delta(delta)
temp_delta = 0
while ((self._current_date < date_stop) and # ...stop-date is reached
(temp_delta < delta.days) and # and delta has not been exeeded
((self._current_date - self._date_start).days < C_max_time)): # ...number of simulated days exceeds max
# 0. set the current day
for account in self._accounts:
if account._date_start <= self._current_date:
account.set_date(self._current_date)
# 1. execute start-of-day function
# everything that should happen before the money transfer
for account in self._accounts:
if account._date_start <= self._current_date:
account.start_of_day()
# 2. execute all controller functions
for controller in self._controller:
controller(self)
# 3. apply all payments for the day in correct temporal order
if self._next_pay[0]['date'].date() == self._current_date.date():
for payment in self._next_pay:
self.make_transfer(payment)
self._next_pay = next(self._payments_iter, C_default_payment)
# 4. execute end-of-day function
# everything that should happen after the money transfer
for account in self._accounts:
if account._date_start <= self._current_date:
account.end_of_day()
# go to the next day within the simulation
self._day += 1
self._current_date = self._date_start + timedelta(days = self._day)
temp_delta += 1
def reports(self, interval='yearly'):
""" Returns a tuple of reports for a given interval """
return (account.report.create_report(interval) for account in self._accounts)
def plt_summary(self, interval='yearly'):
""" plots a summary of the simulation """
reports = self.reports(interval=interval)
plt.summary(*reports)
def report_sum_of(self, semantic):
""" creates the sum for every report.sum_of(semantic) of each account """
return sum([a.report.sum_of(semantic) for a in self._accounts])
def print_reports(self, interval):
""" Creates for every account a report for a given interval """
for a in self._accounts:
print(a.name)
print(a.report.create_report(interval))
print(' ')
class Account(object):
""" Basic class for all types of accounts with reporting and simulation
functionality
obligatory methods for each account to be part of a simulation
- set_date
- start_of_day
- end_of_day
- payment_output
- payment_input
- return_money
"""
def __init__(self, amount, interest, date=None, name = None, meta = {}):
self._date_start = validate.valid_date(date)
self._name = validate.valid_name(name)
self._meta = meta
# check for problems
assert((isinstance(amount, int) or (isinstance(amount, float))))
if interest > 1.:
interest = interest / 100.
## generic variables, which are basically used in any class ##
## that inherits from account ##
# setting up the report and the semantics
self._report = Report(name = self._name)
self._account = int(amount * 100) # amount of money to start with
self._interest = interest # interest rate
self._current_date = self._date_start # current date of the simulation
self._caccount = self._account # current account, this variable
# is used in all subclasses
# sum helper variables for interest calculation and keep
self._sum_interest = 0
def __str__(self):
return self._name
@property
def date(self):
return self._date
@property
def date_start(self):
return self._date_start
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
self._report.name = self._name + ' - ' + str(self._date_start.strftime(C_format_date))
@property
def meta(self):
return self._meta
@property
def account(self):
return self._caccount / 100
def get_account(self):
""" alternative method to get the current account value. this method
can be used, e.g. in payment-definitions to transfer the amount of
money that a specific account has in the moment this payment is done.
Instead of using an actual value, this method is called, evaluated and
the return value is used """
return self.account
@property
def interest(self):
return self._interest / 100
@property
def payments(self):
return self._payments
@property
def current_date(self):
return self._current_date
@property
def report(self):
return self._report
def as_df(self):
return self.report.as_df()
def report_time(self, date):
""" returns true, if the requirements for a report are met """
return True
def get_table_json(self, report):
""" Creates a table for a given report """
return {'header': [], 'rows': []}
def get_all_tables_json(self):
""" Creates tables for all intervals in report """
# create all intervals
daily = self._report
monthly = daily.create_report(interval='monthly')
yearly = monthly.create_report(interval='yearly')
return [{'category': 'Yearly',
'data': self.get_table_json(yearly)},
{'category': 'Monthly',
'data': self.get_table_json(monthly)},
{'category': 'Daily',
'data': self.get_table_json(daily)} ]
def get_report_json(self, interval="yearly"):
""" creates a data-structure of the report data that can be used for
displaying the report as table in html files (in jinja2 templates).
interval can be one of the common intervals of the report class (e.g.
yearly, monthly, daily) or None. If None, thee raw data are exported.
If interval is 'all', all intervals will be returned with a
different json structure """
if interval is 'all':
# create all intervals
return self.get_all_tables_json()
else:
if interval is None:
report = self._report
else:
report = self._report.create_report(interval)
return self.get_table_json(report)
def payment_input(self, account_str, payment, kind, description, meta):
""" Input function for payments. This account is the receiver
of a transfer. This function, if derived from,
can account for special checks for input operations """
return TransferMessage(C_transfer_OK, money = payment)
def payment_output(self, account_str, payment, kind, description, meta):
""" Output function for payments. This account is the sender
of a transfer. This function, if derived from,
can account for special checks for output operations """
return TransferMessage(C_transfer_OK, money = payment)
def return_money(self, money):
""" this is a hard return of transfer-money, in case the receiving side
| |
#!/usr/bin/python
# Copyright (c) 2006-2013 Regents of the University of Minnesota.
# For licensing terms, see the file LICENSE.
# Usage:
#
# $ INSTANCE=minnesota ./statewide_munis_lookup.py --help
#
script_name = ('Populate MN cities lookup')
script_version = '1.0'
__version__ = script_version
__author__ = 'Cyclopath <<EMAIL>>'
__date__ = '2013-10-24'
# ***
# SYNC_ME: Search: Scripts: Load pyserver.
import os
import sys
sys.path.insert(0, os.path.abspath('%s/../../util'
% (os.path.abspath(os.curdir),)))
import pyserver_glue
import conf
import g
# ***
# NOTE: Make sure this always comes before other Ccp imports
import logging
from util_ import logging2
from util_.console import Console
log_level = logging.DEBUG
#log_level = logging2.VERBOSE1
#log_level = logging2.VERBOSE2
#log_level = logging2.VERBOSE4
#log_level = logging2.VERBOSE
conf.init_logging(True, True, Console.getTerminalSize()[0]-1, log_level)
log = g.log.getLogger('stwd_munis_l')
# ***
try:
from osgeo import ogr
from osgeo import osr
except ImportError:
import ogr
import osr
import os
import sys
import re
import time
import traceback
import conf
import g
from grax.access_level import Access_Level
from grax.access_scope import Access_Scope
from grax.access_style import Access_Style
from item import item_versioned
from item import item_user_access
from item import geofeature
from item.feat import region
from item.grac import group
from item.grac import new_item_policy
from item.util import revision
from util_ import db_glue
from util_ import misc
from util_.script_args import Ccp_Script_Args
from util_.script_base import Ccp_Script_Base
from util_.shapefile_wrapper import Shapefile_Wrapper
from util_.streetaddress import addressconf
from util_.streetaddress import streetaddress
from merge.ccp_merge_layer_base import Ccp_Merge_Layer_Base
# ***
debug_skip_commit = False
#debug_skip_commit = True
# *** Cli Parser class
class ArgParser_Script(Ccp_Script_Args):
#
def __init__(self):
Ccp_Script_Args.__init__(self, script_name, script_version)
# ***
#
def prepare(self):
Ccp_Script_Args.prepare(self)
self.add_argument(
'--citys_state', dest='citys_state', action='store', type=str,
default=conf.admin_district_abbrev, # default='MN',
help='The U.S. state in which the cities exist.')
# Download the city polygon Shapefile from:
# http://www.dot.state.mn.us/maps/gdma/gis-data.html
# http://www.dot.state.mn.us/maps/gdma/data/metadata/muni.htm
# http://www.dot.state.mn.us/maps/gdma/data/datafiles/statewide/muni.zip
self.add_argument(
'--shapefile-cities', dest='shp_cities', action='store', type=str,
default='/ccp/var/shapefiles/greatermn/muni_city_names/muni.shp',
help='The path to the Shapefile of cities to import')
# Download the county polygon Shapefile from:
# http://www.dot.state.mn.us/maps/gdma/gis-data.html
# http://www.dot.state.mn.us/maps/gdma/data/metadata/county.htm
#http://www.dot.state.mn.us/maps/gdma/data/datafiles/statewide/county.zip
self.add_argument(
'--shapefile-counties', dest='shp_counties', action='store', type=str,
default='/ccp/var/shapefiles/greatermn/county/county.shp',
help='The path to the Shapefile of counties to import')
# *** Statewide_Munis_Lookup_Populate
class Statewide_Munis_Lookup_Populate(Ccp_Script_Base):
# *** Constructor
def __init__(self):
Ccp_Script_Base.__init__(self, ArgParser_Script)
# ***
#
def query_builder_prepare(self):
Ccp_Script_Base.query_builder_prepare(self)
# ***
# This script's main() is very simple: it makes one of these objects and
# calls go(). Our base class reads the user's command line arguments and
# creates a query_builder object for us at self.qb before thunking to
# go_main().
#
def go_main(self):
do_commit = False
try:
self.qb.db.transaction_begin_rw()
self.lookup_tables_reset()
self.lookup_tables_populate()
self.report_on_popular_street_types()
log.debug('Committing transaction')
if debug_skip_commit:
raise Exception('DEBUG: Skipping commit: Debugging')
do_commit = True
except Exception, e:
# FIXME: g.assurt()s that are caught here have empty msgs?
log.error('Exception!: "%s" / %s' % (str(e), traceback.format_exc(),))
finally:
self.cli_args.close_query(do_commit)
# ***
#
def lookup_tables_reset(self):
self.lookup_tables_reset_state_cities()
self.lookup_tables_reset_state_city_abbrev()
self.lookup_tables_reset_state_counties()
self.lookup_tables_reset_state_name_abbrev()
#
def lookup_tables_reset_state_cities(self):
log.debug('Dropping name lookup table')
drop_sql = "DROP TABLE IF EXISTS public.state_cities CASCADE"
self.qb.db.sql(drop_sql)
log.debug('Creating city lookup table')
create_sql = (
"""
CREATE TABLE public.state_cities (
state_city_id SERIAL PRIMARY KEY
, state_name TEXT -- E.g., "MN"
, municipal_name TEXT -- MUNI_NAME
, population INTEGER -- POPULATION
, area REAL -- AREA
, perimeter REAL -- PERIMETER
--, id_fips INT -- FIPS (Federal Information Processing Standard)
--, id_mcd INTEGER -- MCD (Minor Civil Division)
--, id_mun_ INTEGER -- MUN_
, mun_id INTEGER -- MUN_ID
)
""")
self.qb.db.sql(create_sql)
index_sql = (
"""
CREATE INDEX state_cities_state_name
ON state_cities (state_name)
""")
self.qb.db.sql(index_sql)
index_sql = (
"""
CREATE INDEX state_cities_municipal_name
ON state_cities (municipal_name)
""")
self.qb.db.sql(index_sql)
add_geom_sql = (
#"""
#SELECT AddGeometryColumn(
# 'state_cities', 'geometry', %d, 'MULTIPOLYGON', 2)
#""" % (conf.default_srid,))
"""
SELECT AddGeometryColumn(
'state_cities', 'geometry', %d, 'GEOMETRY', 2)
""" % (conf.default_srid,))
self.qb.db.sql(add_geom_sql)
#
drop_index_sql = (
"""
DROP INDEX IF EXISTS state_cities_geometry;
""")
#
create_index_sql = (
"""
CREATE INDEX state_cities_geometry ON state_cities
USING GIST (geometry);
""")
#
def lookup_tables_reset_state_city_abbrev(self):
log.debug('Dropping city name abbreviation table')
drop_sql = "DROP TABLE IF EXISTS public.state_city_abbrev CASCADE"
self.qb.db.sql(drop_sql)
log.debug('Creating city name abbreviation table')
create_sql = (
"""
CREATE TABLE public.state_city_abbrev (
state_name TEXT
, municipal_name TEXT
, municipal_abbrev TEXT
)
""")
self.qb.db.sql(create_sql)
pkey_sql = (
"""
ALTER TABLE state_city_abbrev
ADD CONSTRAINT state_city_abbrev_pkey
PRIMARY KEY (state_name, municipal_name, municipal_abbrev)
""")
self.qb.db.sql(pkey_sql)
#
def lookup_tables_reset_state_counties(self):
log.debug('Dropping county name lookup table')
drop_sql = "DROP TABLE IF EXISTS public.state_counties CASCADE"
self.qb.db.sql(drop_sql)
log.debug('Creating county name lookup table')
# The county_num is the alphanumberic order of the county name.
# The county_id is also in the MnDOT Shapefile, but other Shapefiles
# use county_num exclusively...
create_sql = (
"""
CREATE TABLE public.state_counties (
county_id INTEGER PRIMARY KEY
, state_name TEXT
, county_name TEXT
, county_num INTEGER
, area REAL
, perimeter REAL
)
""")
self.qb.db.sql(create_sql)
index_sql = (
"""
CREATE INDEX state_counties_state_name
ON state_counties (state_name)
""")
self.qb.db.sql(index_sql)
index_sql = (
"""
CREATE INDEX state_counties_county_name
ON state_counties (county_name)
""")
self.qb.db.sql(index_sql)
add_geom_sql = (
#"""
#SELECT AddGeometryColumn(
# 'state_counties', 'geometry', %d, 'MULTIPOLYGON', 2)
#""" % (conf.default_srid,))
"""
SELECT AddGeometryColumn(
'state_counties', 'geometry', %d, 'GEOMETRY', 2)
""" % (conf.default_srid,))
self.qb.db.sql(add_geom_sql)
#
drop_index_sql = (
"""
DROP INDEX IF EXISTS state_counties_geometry;
""")
#
create_index_sql = (
"""
CREATE INDEX state_counties_geometry ON state_counties
USING GIST (geometry);
""")
#
def lookup_tables_reset_state_name_abbrev(self):
log.debug('Dropping state name abbreviation table')
drop_sql = "DROP TABLE IF EXISTS public.state_name_abbrev CASCADE"
self.qb.db.sql(drop_sql)
log.debug('Creating state name abbreviation table')
create_sql = (
"""
CREATE TABLE public.state_name_abbrev (
state_name TEXT
, state_abbrev TEXT
)
""")
self.qb.db.sql(create_sql)
pkey_sql = (
"""
ALTER TABLE state_name_abbrev
ADD CONSTRAINT state_name_abbrev_pkey
PRIMARY KEY (state_name, state_abbrev)
""")
self.qb.db.sql(pkey_sql)
#
def lookup_tables_populate(self):
self.state_abbrev = self.validate_state_name_and_abbrev()
self.populate_table_city_geoms()
self.populate_table_city_abbrevs()
self.populate_table_county_geoms()
self.populate_table_state_abbrevs()
#
def validate_state_name_and_abbrev(self):
# *** Check that the state is valid.
state_abbrev = self.cli_opts.citys_state.upper()
if state_abbrev not in addressconf.States.STATE_NAMES:
try:
state_name = self.cli_opts.citys_state.lower()
state_abbrev = addressconf.States.STATE_CODES[state_name].upper()
except KeyError:
err_s = ('Please specify a valid statename, not: %s'
% (self.cli_opts.citys_state,))
log.error(err_s)
raise Exception(err_s)
return state_abbrev
#
def populate_table_city_geoms(self):
# *** Open the Shapefile.
self.shpw = Shapefile_Wrapper(self.cli_opts.shp_cities, 'MUN_ID')
self.shpw.source_open()
# *** Iterate through the layer features and make rows to insert.
log.debug('Compiling city lookup insert statement')
rows_to_insert = []
self.shpw.gdb_layer.ResetReading()
for feat in self.shpw.gdb_layer:
#geoms = self.shpw.get_polygon_geoms(feat)
#g.assurt(len(geoms) == 1)
geom = feat.GetGeometryRef()
g.assurt(geom is not None)
rows_to_insert.append(
#"('%s', %d, '%s', %d, %.5f, %.5f, ST_Multi('SRID=%d;%s'))"
"('%s', %d, '%s', %d, %.5f, %.5f, 'SRID=%d;%s')"
% (self.state_abbrev, # E.g., "MN"
int(feat.GetFieldAsString('MUN_ID')),
feat.GetFieldAsString('MUNI_NAME').lower(),
int(feat.GetFieldAsString('POPULATION')),
# MAYBE: Now that we store the geometry, these are just
# redundant calculated values...
float(feat.GetFieldAsString('AREA')),
float(feat.GetFieldAsString('PERIMETER')),
conf.default_srid,
#geoms[0].ExportToWkt(),
geom.ExportToWkt(),
# Skipping:
# int(feat.GetFieldAsString('MUN_')),
# int(feat.GetFieldAsString('MUN_ID')),
# int(feat.GetFieldAsString('FIPS')),
# int(feat.GetFieldAsString('MCD')),
))
self.shpw.source_close()
# END: C.f. Shapefile_Wrapper.source_open
# *** Populate the database table.
log.debug('Populating city lookup table')
insert_sql = (
"""
INSERT INTO public.state_cities (
state_name
, mun_id
, municipal_name
, population
, area
, perimeter
, geometry
) VALUES
%s
""" % (','.join(rows_to_insert),))
self.qb.db.sql(insert_sql)
#
def populate_table_city_abbrevs(self):
# *** Populate the city abbreviations table.
# FIXME: This code does not belong here...
if self.state_abbrev == 'MN':
# https://en.wikipedia.org/wiki/List_of_city_nicknames_in_Minnesota
insert_sql = (
"""
INSERT INTO public.state_city_abbrev (
state_name
, municipal_name
, municipal_abbrev
) VALUES
('MN', 'alexandria', 'alex'),
('MN', 'appleton', 'app'),
('MN', 'arden hills', 'a hills'),
('MN', 'austin', 'spamtown'),
('MN', 'austin', 'spamtown usa'),
('MN', 'cannon falls', 'cann'),
('MN', 'detroit lakes', 'troit'),
('MN', 'east bethel', 'eb'),
('MN', 'eden prairie', 'ep'),
('MN', 'eden prairie', 'e.p.'),
('MN', 'edina', 'bubble'),
('MN', 'golden valley', 'gv'),
('MN', 'marine on saint croix', 'marine'),
('MN', 'marine on saint croix', 'mosc'),
('MN', 'minneapolis', 'city of lakes'),
('MN', 'minneapolis', 'mill city'),
('MN', 'minneapolis', 'mini apple'),
('MN', 'minneapolis', 'mpls'),
('MN', 'minnesota city', 'mn city'),
('MN', 'minnesota lake', 'mn lake'),
('MN', 'minnetonka', 'mtka'),
('MN', 'minnetonka beach', 'mtka beach'),
('MN', 'mountain iron', 'mtn iron'),
('MN', 'mountain lake', 'mtn lake'),
('MN', 'north saint paul', 'north stp'),
('MN', 'north saint paul', 'nstp'),
('MN', 'north saint paul', 'nsp'),
('MN', 'norwood young america', 'norwood'),
('MN', 'norwood young america', 'young america'),
('MN', 'new york mills', 'ny mills'),
('MN', 'park rapids', 'prap'),
('MN', 'robbinsdale', 'birdtown'),
('MN', 'rochester', 'med town'),
('MN', 'saint louis park', 'slp'),
('MN', 'saint paul', 'pigs eye'),
('MN', 'saint paul', 'stp'),
('MN', 'saint paul', 'st p'),
('MN', 'two harbors', '2harb'),
('MN', 'warroad', 'hockeytown'),
('MN', 'west saint paul', 'west stp'),
('MN', 'west saint paul', 'wstp'),
('MN', 'west saint paul', 'wsp'),
('MN', 'worthington', 'turkey capital of the world')
""")
self.qb.db.sql(insert_sql)
__verify_sql__ = (
"""
SELECT * FROM public.state_cities AS sc
JOIN public.state_city_abbrev AS sca
ON (sc.municipal_name = sca.municipal_abbrev)
""")
rows = self.qb.db.sql(__verify_sql__)
g.assurt(len(rows) == 0)
#
def populate_table_county_geoms(self):
# *** Open the Shapefile.
self.shpw = Shapefile_Wrapper(self.cli_opts.shp_counties, 'COUNTY_ID')
self.shpw.source_open()
# *** Iterate through the layer features and make rows | |
mutated AA
# ### got conflicts, engerineered mutation and expression tag examples
# cache_dir = ['pdbe', 'mutated_AA_or_NA']
# url = 'https://www.ebi.ac.uk/pdbe/api/pdb/entry/mutated_AA_or_NA/$index'
# pdbe_mut = fetch_from_web_api(url, pdbname, cache_dir)
# d['links'].append(Template(url).substitute(index=quote(str(pdbname), safe='')))
# if pdbe_mut: #success
# r = pdbe_mut[pdbname.lower()]
# d['mutations_pdbe'] = []
# for mut in r:
# mut_from = mut['mutation_details']['from']
# mut_to = mut['mutation_details']['to']
# mut_type = mut['mutation_details']['type']
# construct_seq_number = mut['residue_number']
# wt_seq_number = mut['author_residue_number']
# t = {'wt':mut_from,'mut':mut_to,'type':mut_type,'c_seq_nr':construct_seq_number,'pos':wt_seq_number}
# d['mutations_pdbe'].append(t)
# else:
# print('failed pdbe_mut')
#https://www.rcsb.org/pdb/rest/das/pdb_uniprot_mapping/alignment?query=2RH1
## uniprot mappings
### seems to be IDs of stuff then use:
# http://www.uniprot.org/uniprot/P00720.xml
cache_dir = ['rcsb', 'pdb_uniprot_mapping']
url = 'https://www.rcsb.org/pdb/rest/das/pdb_uniprot_mapping/alignment?query=$index'
uniprot_map = fetch_from_web_api(url, pdbname, cache_dir, xml = True)
d['links'].append(Template(url).substitute(index=quote(str(pdbname), safe='')))
if uniprot_map: #success
inserts = {}
inserts_fixed = {}
for block in uniprot_map[0]:
if block.tag[-5:]!='block':
continue #only interested in the blocks...
i = 0
for segment in block:
if i==0:
construct_range = [segment.attrib['start'],segment.attrib['end']]
else:
insert_range = [segment.attrib['start'],segment.attrib['end']]
insert_id = segment.attrib['intObjectId']
prev_block = segment
i += 1
i = inserts.setdefault(insert_id, [])
i.append({'c':construct_range,'i':insert_range})
for insert,blocks in inserts.items():
if insert in uniprot_mapping:
insert = uniprot_mapping[insert][0]
inserts_fixed[insert] = {}
cache_dir = ['uniprot', 'id']
url = 'http://www.uniprot.org/uniprot/$index.xml'
insert_info = fetch_from_web_api(url, insert, cache_dir, xml = True)
d['links'].append(Template(url).substitute(index=quote(str(insert), safe='')))
if insert_info:
for elm in insert_info.findall('.//{http://uniprot.org/uniprot}recommendedName'):
inserts_fixed[insert]['alt_name'] = elm.find('{http://uniprot.org/uniprot}fullName').text
else:
inserts_fixed[insert]['alt_name'] = insert
# print(insert_info.findall('.//.'))
blocks_num = len(blocks)
prev_block = None
temp = []
for i, b in enumerate(blocks): #for each block, to glue them together
if i==0:
start = [b['i'][0],b['c'][0]]
end = [b['i'][1],b['c'][1]]
# print(i,b)
if i<blocks_num-1: #if not last
# print('cur',b,'next',blocks[i+1])
if int(b['i'][1])==int(blocks[i+1]['i'][0])-1 and int(b['c'][1])==int(blocks[i+1]['c'][0])-1:
#if insert is a contination #if construct continues
end = [blocks[i+1]['i'][1],blocks[i+1]['c'][1]]
else:
#gap
temp.append({'i_start':start[0],'i_end':end[0],'c_start':start[1],'c_end':end[1]})
# temp.append([start,end])
start = [blocks[i+1]['i'][0],blocks[i+1]['c'][0]]
end = [blocks[i+1]['i'][1],blocks[i+1]['c'][1]]
temp.append({'i_start':start[0],'i_end':end[0],'c_start':start[1],'c_end':end[1]})
i = inserts_fixed[insert].setdefault('positions', [])
i.append(temp)
d['inserts'] = inserts_fixed
else:
pass
# print('failed uniprot_map')
return d
def add_construct(d):
#delete if already name there
Construct.objects.filter(name = d['construct_crystal']['pdb_name']).delete()
protein = Protein.objects.filter(entry_name=d['construct_crystal']['uniprot']).get()
structure = Structure.objects.filter(pdb_code__index=d['construct_crystal']['pdb'].upper()).get()
protein_conformation = structure.protein_conformation
construct = Construct()
construct.protein = protein
construct.name = d['construct_crystal']['pdb_name'].strip()
construct.json = json.dumps(d, indent=4, separators=(',', ': '))
construct.structure = structure
#CrystalInfo
crystal = CrystalInfo()
crystal.resolution = structure.resolution
crystal.pdb_data = structure.pdb_data
crystal.pdb_code = structure.pdb_code.index
crystal.save()
construct.crystal = crystal
#Contact INFO
if 'contact_info' in d:
construct.contributor, created = ContributorInfo.objects.get_or_create(name = d['contact_info']['name_cont'],
pi_email = d['contact_info']['pi_email'],
pi_name = d['contact_info']['pi_name'],
urls = d['contact_info']['url'],
date = datetime.datetime.strptime(d['contact_info']['date'], '%m/%d/%Y').strftime('%Y-%m-%d'),
address = d['contact_info']['address'])
construct.save()
#MUTATIONS
for mutation in d['mutations']:
if 'type' not in mutation:
mutation['type'] = ''
if 'remark' not in mutation:
mutation['remark'] = ''
res_wt = Residue.objects.get(protein_conformation__protein=protein_conformation.protein.parent, sequence_number=mutation['pos'])
# if res_wt.amino_acid != mutation['wt']:
# print('aa dont match',construct,mutation['pos'],"annotated wt:", mutation['wt'], "DB wt:",res_wt.amino_acid, "Annotated Mut",mutation['mut'])
mutation_type, created = ConstructMutationType.objects.get_or_create(slug=slugify(mutation['type']),name=mutation['type'], effect=None)
#construct=construct, TODO: create a unique one for each mutation per construct to avoid unambiguity
mut = ConstructMutation.objects.create(construct=construct, sequence_number=mutation['pos'],wild_type_amino_acid=mutation['wt'],mutated_amino_acid=mutation['mut'],remark=mutation['remark'], residue=res_wt)
mut.effects.add(mutation_type)
#construct.mutations.add(mut)
#print(d['raw_data'])
#make sure to order auxiliary correct
ip_lookup = {}
if 'raw_data' in d:
for name,aux in d['auxiliary'].items():
id = name.replace('aux','')
aux['sort'] = 0
try:
aux['sort'] = int(d['raw_data']["sort_pos_"+id])
except:
pass
d['auxiliary'] = OrderedDict(sorted(d['auxiliary'].items(), key=lambda x: (x[1]['sort'], x[0])))
temp = OrderedDict()
for i, (name,aux) in enumerate(d['auxiliary'].items()):
old_id = name.replace('aux','')
temp['aux'+str(i)] = aux
ip_lookup[old_id] = str(i)
d['auxiliary'] = temp
#DELETIONS
insert_deletions = {}
for deletion in d['deletions']:
# if a 'deletion' is a single type and of non-user origin, assume its an insert and the pos is not actually deleted (3odu)
dele = False
if 'start' in deletion:
dele, created = ConstructDeletion.objects.get_or_create(construct=construct, start=deletion['start'],end=deletion['end'])
else:
if deletion['origin']=='user':
dele, created = ConstructDeletion.objects.get_or_create(construct=construct, start=deletion['pos'],end=deletion['pos'])
# if dele:
# construct.deletions.add(dele)
if deletion['origin']!='user':
id = deletion['origin'].split('_')[1]
if id in ip_lookup:
id = ip_lookup[id]
insert_deletions[id] = deletion
#INSERTIONS (AUX)
for name,aux in d['auxiliary'].items():
id = name.replace('aux','')
aux_type, created = ConstructInsertionType.objects.get_or_create(name=aux['type'],subtype=aux['subtype'])
insert = ConstructInsertion.objects.create(construct=construct, insert_type=aux_type,presence=aux['presence'],position=aux['position']+"_"+id)
if insert.presence == 'YES' and insert.position.startswith('Within Receptor'):
#need to fetch range
if 'start' in aux:
insert.start = aux['start']
insert.end = aux['start']
else:
if insert_deletions[id]['type'] == 'range':
insert.start = insert_deletions[id]['start']
insert.end = insert_deletions[id]['end']
else:
insert.start = insert_deletions[id]['pos']
insert.end = insert_deletions[id]['pos']
insert.save()
# construct.insertions.add(insert)
#MODIFICATIONS
for modification in d['modifications']:
mod, created = ConstructModification.objects.get_or_create(construct=construct, modification=modification['type'],position_type=modification['position'][0],
pos_start=modification['position'][1][0],
pos_end=modification['position'][1][1],remark=modification['remark'] )
# construct.modifications.add(mod)
#EXPRESSION
if 'expression' in d:
if 'expr_method' in d['expression']:
if 'expr_remark' not in d['expression']:
d['expression']['expr_remark'] = ''
if d['expression']['host_cell'] == 'other [See next field]':
d['expression']['host_cell'] = d['expression']['other_host_cell']
if d['expression']['host_cell_type'] == 'other [See next field]':
d['expression']['host_cell_type'] = d['expression']['other_host']
if d['expression']['expr_method'] == 'Other [In case of E.Coli or Yeast recombinant expression]':
d['expression']['expr_method'] = d['expression']['expr_other']
construct.expression,created = ExpressionSystem.objects.get_or_create(expression_method=d['expression']['expr_method'],
host_cell_type=d['expression']['host_cell_type'],
host_cell=d['expression']['host_cell'],
remarks=d['expression']['expr_remark'])
#solubilization
if 'solubilization' in d:
if 'deterg_type' in d['solubilization']:
c_list = ChemicalList()
list_name,created = ChemicalListName.objects.get_or_create(name='Solubilization')
c_list.name = list_name
c_list.save()
for item,value in d['solubilization'].items():
if item.startswith(('deterg_type')):
d_id = ''
if item != 'deterg_type': #if it has deterg_type_2 index
d_id = "_" + item.split('_')[2]
if value == 'other [See next field]':
value = d['raw_data']['other_deterg_type'+ d_id]
ct, created = ChemicalType.objects.get_or_create(name='detergent')
chem, created = Chemical.objects.get_or_create(name=value, chemical_type=ct)
if 'deterg_concentr' + d_id in d['solubilization']:
cc, created = ChemicalConc.objects.get_or_create(concentration=d['solubilization']['deterg_concentr' + d_id], concentration_unit=d['solubilization']['deterg_concentr_unit' + d_id], chemical=chem)
else: #if no concentr is dictionary, then it was inputted before caputring concentration for additinal chemicals
cc, created = ChemicalConc.objects.get_or_create(concentration='', concentration_unit='',chemical=chem)
c_list.chemicals.add(cc)
ct, created = ChemicalType.objects.get_or_create(name='additive')
chem, created = Chemical.objects.get_or_create(name=d['solubilization']['solub_additive'], chemical_type=ct)
cc, created = ChemicalConc.objects.get_or_create(concentration=d['solubilization']['additive_concentr'], concentration_unit=d['solubilization']['addit_concentr_unit'], chemical=chem)
c_list.chemicals.add(cc)
solubilization = Solubilization.objects.create(chemical_list = c_list)
construct.solubilization = solubilization
construct.save()
#Purification
purification = Purification.objects.create()
for puri,step in d['solubilization'].items():
if not puri.startswith(('chem_enz_treatment','sol_remark')):
continue
else:
if step == 'other [See next field]':
continue #there will be sol_remark instead
if step == 'None':
continue #dont put in none step
s,created = PurificationStep.objects.get_or_create(name=step)
purification.steps.add(s)
construct.purification = purification
construct.save()
#CRYSTALLIZATION
if 'crystallization' in d:
if 'crystal_type' in d['crystallization']:
c = Crystallization()
if d['crystallization']['crystal_method'] == 'other [See next field]':
d['crystallization']['crystal_method'] = d['raw_data']['other_method']
if d['crystallization']['crystal_type'] == 'other [See next field]':
d['crystallization']['crystal_type'] = d['raw_data']['other_crystal_type']
sub_name = "" if 'lcp_lipid' not in d['crystallization'] else d['crystallization']['lcp_lipid']
c_type, created = CrystallizationTypes.objects.get_or_create(name=d['crystallization']['crystal_type'], sub_name=sub_name)
c_method, created = CrystallizationMethods.objects.get_or_create(name=d['crystallization']['crystal_method'])
c.crystal_type = c_type
c.crystal_method = c_method
if 'crystal_remark' in d['crystallization']:
c.remarks = d['crystallization']['crystal_remark']
c.temp = d['crystallization']['temperature']
if d['crystallization']['ph']=='single_ph':
c.ph_start = d['crystallization']['ph_single']
c.ph_end = d['crystallization']['ph_single']
else:
c.ph_start = d['crystallization']['ph_range_one']
c.ph_end = d['crystallization']['ph_range_two']
c.protein_conc = d['crystallization']['protein_concentr']
c.protein_conc_unit = d['crystallization']['protein_conc_unit']
c.save()
#MAKE LISTS
c_list = ChemicalList()
list_name,created = ChemicalListName.objects.get_or_create(name='Additional')
c_list.name = list_name
c_list.save()
if 'chemical_components' in d['crystallization']:
for chemical in d['crystallization']['chemical_components']:
if 'type' not in chemical: #to fix legacy json files
chemical['type'] = 'unknown'
ct, created = ChemicalType.objects.get_or_create(name=chemical['type'])
chem, created = Chemical.objects.get_or_create(name=chemical['component'], chemical_type=ct)
cc, created = ChemicalConc.objects.get_or_create(concentration=chemical['value'], concentration_unit=chemical['unit'], chemical=chem)
c_list.chemicals.add(cc)
c.chemical_lists.add(c_list)
if d['crystallization']['crystal_type']=='lipidic cubic phase': #make list of LCP stuff
c_list = ChemicalList()
# c_list.name = d['crystallization']['lcp_lipid']
list_name,created = ChemicalListName.objects.get_or_create(name='LCP')
c_list.name = list_name
c_list.save()
ct, created = ChemicalType.objects.get_or_create(name='LCP Lipid additive')
chem, created = Chemical.objects.get_or_create(name=d['crystallization']['lcp_add'], chemical_type=ct)
cc, created = ChemicalConc.objects.get_or_create(concentration=d['crystallization']['lcp_conc'], concentration_unit=d['crystallization']['lcp_conc_unit'], chemical=chem)
c_list.chemicals.add(cc)
c.chemical_lists.add(c_list)
#DETERGENT
if 'detergent' in d['crystallization']:
c_list = ChemicalList()
list_name,created = ChemicalListName.objects.get_or_create(name='Detergent')
c_list.name = list_name
c_list.save()
ct, created = ChemicalType.objects.get_or_create(name='detergent')
chem, created = Chemical.objects.get_or_create(name=d['crystallization']['detergent'], chemical_type=ct)
cc, created = ChemicalConc.objects.get_or_create(concentration=d['crystallization']['deterg_conc'], concentration_unit=d['crystallization']['deterg_conc_unit'], chemical=chem)
c_list.chemicals.add(cc)
c.chemical_lists.add(c_list)
#LIPID
if 'lipid' in d['crystallization']:
c_list = ChemicalList()
list_name,created = ChemicalListName.objects.get_or_create(name='Lipid')
c_list.name = list_name
c_list.save()
ct, created = ChemicalType.objects.get_or_create(name='lipid')
chem, created = Chemical.objects.get_or_create(name=d['crystallization']['lipid'], chemical_type=ct)
cc, created = ChemicalConc.objects.get_or_create(concentration=d['crystallization']['lipid_concentr'], concentration_unit=d['crystallization']['lipid_concentr_unit'], chemical=chem)
c_list.chemicals.add(cc)
c.chemical_lists.add(c_list)
#Use ligand function to get ligand if it exists or otherwise create. Lots of checks for inchi/smiles/name
ligand = get_or_make_ligand(d['construct_crystal']['ligand_id'],d['construct_crystal']['ligand_id_type'],d['construct_crystal']['ligand_name'])
if 'ligand_activity' not in d['construct_crystal']:
d['construct_crystal']['ligand_activity'] = 'unknown'
if ligand and 'ligand_activity' in d['construct_crystal']:
role_slug = slugify(d['construct_crystal']['ligand_activity'])
try:
lr, created = LigandRole.objects.get_or_create(slug=role_slug,
defaults={'name': d['construct_crystal']['ligand_activity']})
except IntegrityError:
lr = LigandRole.objects.get(slug=role_slug)
if ligand:
ligand_c = CrystallizationLigandConc()
ligand_c.construct_crystallization = c
ligand_c.ligand = ligand
if lr:
ligand_c.ligand_role = lr
if 'ligand_conc' in d['construct_crystal']:
ligand_c.ligand_conc = d['construct_crystal']['ligand_conc']
if 'ligand_conc_unit' in d['construct_crystal']:
ligand_c.ligand_conc_unit = d['construct_crystal']['ligand_conc_unit']
ligand_c.save()
c.ligands.add(ligand_c)
construct.crystallization = c
construct.save()
def convert_ordered_to_disordered_annotation(d):
if 'raw_data' not in d:
d['raw_data'] = {}
d['raw_data']['pdb'] = d['construct_crystal']['pdb']
d['raw_data']['uniprot'] = d['construct_crystal']['uniprot']
if 'pdb_name' not in d['construct_crystal']:
d['raw_data']['pdb_name'] = d['construct_crystal']['uniprot']+"_construct"
else:
d['raw_data']['pdb_name'] = d['construct_crystal']['pdb_name']
#contributor
for k,v in d['contact_info'].items():
d['raw_data'][k] = v
#growth information
if 'crystal_growth' in d:
d['raw_data']['crystal_type'] = d['crystal_growth'][0]['grow_method']
i = 2 #starts with two for some reason, ask Anna
insert_starts = {}
for aux,v in d['auxiliary'].items():
# print(aux)
d['raw_data']['protein_type_'+str(i)] = v['type']
d['raw_data']['position_'+str(i)] = v['position']
d['raw_data']['presence_'+str(i)] = v['presence']
d['raw_data']['fusion_prot_'+str(i)] = "Please Select"
d['raw_data']['aux'+str(i)+'_subtype'] = v['subtype']
if 'start' in v:
| |
forAnalysis:
jobStatBrokerClouds.setdefault(previousCloud, {})
# use number of jobs in the cloud
jobStatBroker = jobStatBrokerClouds[previousCloud]
if site not in jobStatBroker:
jobStatBroker[site] = {}
if tmpProGroup not in jobStatBroker[site]:
jobStatBroker[site][tmpProGroup] = {'assigned':0,'activated':0,'running':0,'transferring':0}
# count # of assigned and activated jobs for prod by taking priorities in to account
nRunJobsPerGroup = None
if not forAnalysis and prevSourceLabel in ['managed','test']:
jobStatBrokerCloudsWithPrio.setdefault(prevPriority,
taskBuffer.getJobStatisticsBrokerage(
prevPriority,
prevPriority+prioInterval))
jobStatBrokerCloudsWithPrio[prevPriority].setdefault(previousCloud, {})
jobStatBrokerCloudsWithPrio[prevPriority][previousCloud].setdefault(site, {})
jobStatBrokerCloudsWithPrio[prevPriority][previousCloud][site].setdefault(
tmpProGroup, {'assigned':0,'activated':0,'running':0,'transferring':0})
nAssJobs = jobStatBrokerCloudsWithPrio[prevPriority][previousCloud][site][tmpProGroup]['assigned']
nActJobs = jobStatBrokerCloudsWithPrio[prevPriority][previousCloud][site][tmpProGroup]['activated']
nRunJobsPerGroup = jobStatBrokerCloudsWithPrio[prevPriority][previousCloud][site][tmpProGroup]['running']
# add newly assigned jobs
for tmpNewPriority in newJobStatWithPrio:
if tmpNewPriority < prevPriority:
continue
if previousCloud not in newJobStatWithPrio[tmpNewPriority]:
continue
if site not in newJobStatWithPrio[tmpNewPriority][previousCloud]:
continue
if tmpProGroup not in newJobStatWithPrio[tmpNewPriority][previousCloud][site]:
continue
nAssJobs += newJobStatWithPrio[tmpNewPriority][previousCloud][site][tmpProGroup]
else:
nAssJobs = jobStatBroker[site][tmpProGroup]['assigned']
if forAnalysis and 'defined' in jobStatBroker[site][tmpProGroup]:
nAssJobs += jobStatBroker[site][tmpProGroup]['defined']
nActJobs = jobStatBroker[site][tmpProGroup]['activated']
# number of jobs per node
if site not in nWNmap:
nJobsPerNode = 1
elif jobStatistics[site]['running']==0 or nWNmap[site]['updateJob']==0:
nJobsPerNode = 1
else:
if nRunJobsPerGroup is None:
nJobsPerNode = float(jobStatistics[site]['running'])/float(nWNmap[site]['updateJob'])
else:
if nRunJobsPerGroup == 0:
nJobsPerNode = 1.0/float(nWNmap[site]['updateJob'])
else:
nJobsPerNode = float(nRunJobsPerGroup)/float(nWNmap[site]['updateJob'])
# limit of the number of transferring jobs
if tmpSiteSpec.transferringlimit == 0:
maxTransferring = 2000
else:
maxTransferring = tmpSiteSpec.transferringlimit
# get ration of transferring to running
if not forAnalysis and tmpSiteSpec.cloud not in ['ND']:
nTraJobs = 0
nRunJobs = 0
for tmpGroupForTra in jobStatBroker[site]:
tmpCountsForTra = jobStatBroker[site][tmpGroupForTra]
if 'running' in tmpCountsForTra:
nRunJobs += tmpCountsForTra['running']
if 'transferring' in tmpCountsForTra:
nTraJobs += tmpCountsForTra['transferring']
tmpLog.debug(' running=%s transferring=%s max=%s' % (nRunJobs,nTraJobs,maxTransferring))
if max(maxTransferring,2*nRunJobs) < nTraJobs:
tmpLog.debug(" skip: %s many transferring=%s > max(%s,2*running=%s)" % (site,nTraJobs,maxTransferring,nRunJobs))
resultsForAnal['transferring'].append(site)
if prevSourceLabel in ['managed','test']:
# make message
message = '%s - too many transferring' % site
if message not in loggerMessages:
loggerMessages.append(message)
continue
# get ratio of running jobs = run(cloud)/run(all) for multi cloud (disabled)
multiCloudFactor = 1
# country preference
preferredCountryWeight = 1.0
preferredCountryWeightStr = ''
if forAnalysis:
if preferredCountries != [] and tmpSiteSpec.countryGroup != []:
for tmpCountry in preferredCountries:
if tmpCountry in tmpSiteSpec.countryGroup:
# avoid negative weight or zero-divide
if tmpSiteSpec.availableCPU >= tmpSiteSpec.pledgedCPU and tmpSiteSpec.pledgedCPU > 0:
preferredCountryWeight = float(tmpSiteSpec.availableCPU) / float(tmpSiteSpec.pledgedCPU)
preferredCountryWeightStr = "*(%s/%s)" % (tmpSiteSpec.availableCPU,tmpSiteSpec.pledgedCPU)
resultsForAnal['prefcountry'].append((site,tmpCountry))
break
tmpLog.debug(' country preference=%s' % preferredCountryWeightStr[1:])
# calculate weight
if specialWeight != {}:
if not pd2pT1:
# weight for T2 PD2P
nSubs = 1
if site in specialWeight:
nSubs = specialWeight[site]
tmpLog.debug(' %s nSubs:%s assigned:%s activated:%s running:%s nWNsG:%s nWNsU:%s' % \
(site,nSubs,nAssJobs,nActJobs,nRunningMap[site],nPilotsGet,nPilotsUpdate))
winv = float(nSubs) * float(nAssJobs+nActJobs) / float(1+nRunningMap[site]) / (1.0+float(nPilotsGet)/float(1+nPilotsUpdate))
if getWeight:
weightUsedByBrokerage[site] = "(1+%s/%s)*%s/%s/%s" % (nPilotsGet,1+nPilotsUpdate,1+nRunningMap[site],nAssJobs+nActJobs,nSubs)
else:
# weight for T1 PD2P
tmpLog.debug(' %s MoU:%s' % (site,specialWeight[site]))
winv = 1.0 / float(specialWeight[site])
if getWeight:
weightUsedByBrokerage[site] = "%s" % specialWeight[site]
else:
if not forAnalysis:
if nRunJobsPerGroup is None:
tmpLog.debug(' %s assigned:%s activated:%s running:%s nPilotsGet:%s nPilotsUpdate:%s multiCloud:%s' %
(site,nAssJobs,nActJobs,jobStatistics[site]['running'],nPilotsGet,nPilotsUpdate,multiCloudFactor))
else:
tmpLog.debug(' %s assigned:%s activated:%s runningGroup:%s nPilotsGet:%s nPilotsUpdate:%s multiCloud:%s' %
(site,nAssJobs,nActJobs,nRunJobsPerGroup,nPilotsGet,nPilotsUpdate,multiCloudFactor))
else:
tmpLog.debug(' %s assigned:%s activated:%s running:%s nWNsG:%s nWNsU:%s' %
(site,nAssJobs,nActJobs,nRunningMap[site],nPilotsGet,nPilotsUpdate))
if forAnalysis:
winv = float(nAssJobs+nActJobs) / float(1+nRunningMap[site]) / (1.0+float(nPilotsGet)/float(1+nPilotsUpdate))
else:
if nRunJobsPerGroup is None:
winv = float(nAssJobs+nActJobs) / float(1+jobStatistics[site]['running']) / (float(1+nPilotsGet)/float(1+nPilotsUpdate))
else:
winv = float(nAssJobs+nActJobs) / float(1+nRunJobsPerGroup) / (float(1+nPilotsGet)/float(1+nPilotsUpdate))
winv *= float(multiCloudFactor)
# send jobs to T1 when they require many or large inputs
if _isTooManyInput(nFilesPerJob,inputSizePerJob):
if site == siteMapper.getCloud(previousCloud)['source'] or \
(site=='NIKHEF-ELPROD' and previousCloud=='NL' and prevProType=='reprocessing') or \
(previousCloud in hospitalQueueMap and site in hospitalQueueMap[previousCloud]):
cloudT1Weight = 2.0
# use weight in cloudconfig
try:
tmpCloudT1Weight = float(siteMapper.getCloud(previousCloud)['weight'])
if tmpCloudT1Weight != 0.0:
cloudT1Weight = tmpCloudT1Weight
except Exception:
pass
winv /= cloudT1Weight
tmpLog.debug(' special weight for %s : nInputs/Job=%s inputSize/Job=%s weight=%s' %
(site,nFilesPerJob,inputSizePerJob,cloudT1Weight))
# found at least one candidate
foundOneCandidate = True
tmpLog.debug('Site:%s 1/Weight:%s' % (site, winv))
if forAnalysis and trustIS and reportLog:
resultsForAnal['weight'].append((site,'(1+%s/%s)*%s/%s%s' % (nPilotsGet,1+nPilotsUpdate,1+nRunningMap[site],
nAssJobs+nActJobs,preferredCountryWeightStr)))
# choose largest nMinSites weights
minSites[site] = winv
if len(minSites) > nMinSites:
maxSite = site
maxWinv = winv
for tmpSite in minSites:
tmpWinv = minSites[tmpSite]
if tmpWinv > maxWinv:
maxSite = tmpSite
maxWinv = tmpWinv
# delte max one
del minSites[maxSite]
# remove too different weights
if len(minSites) >= 2:
# look for minimum
minSite = list(minSites)[0]
minWinv = minSites[minSite]
for tmpSite in minSites:
tmpWinv = minSites[tmpSite]
if tmpWinv < minWinv:
minSite = tmpSite
minWinv = tmpWinv
# look for too different weights
difference = 2
removeSites = []
for tmpSite in minSites:
tmpWinv = minSites[tmpSite]
if tmpWinv > minWinv*difference:
removeSites.append(tmpSite)
# remove
for tmpSite in removeSites:
del minSites[tmpSite]
# set default
if len(minSites) == 0:
# cloud's list
if forAnalysis or siteMapper.checkCloud(previousCloud):
minSites[scanSiteList[0]] = 0
else:
minSites[panda_config.def_sitename] = 0
# release not found
if forAnalysis and trustIS:
candidateForAnal = False
# use only one site for prod_test to skip LFC scan
if prevProType in skipBrokerageProTypes:
if len(minSites) > 1:
minSites = {list(minSites)[0]:0}
# choose site
tmpLog.debug('Min Sites:%s' % minSites)
if len(fileList) ==0 or prevIsJEDI is True:
# choose min 1/weight
minSite = list(minSites)[0]
minWinv = minSites[minSite]
for tmpSite in minSites:
tmpWinv = minSites[tmpSite]
if tmpWinv < minWinv:
minSite = tmpSite
minWinv = tmpWinv
chosenCE = siteMapper.getSite(minSite)
else:
# compare # of files in LRC
maxNfiles = -1
for site in minSites:
tmp_chosen_ce = siteMapper.getSite(site)
# search LRC
if site in _disableLRCcheck:
tmpOKFiles = {}
else:
# get files from LRC
tmpOKFiles = _getOkFiles(tmp_chosen_ce, fileList, allLFNs, allOkFilesMap,
job.proSourceLabel, job.job_label, tmpLog, allScopes)
nFiles = len(tmpOKFiles)
tmpLog.debug('site:%s - nFiles:%s/%s %s' % (site,nFiles,len(fileList),str(tmpOKFiles)))
# choose site holding max # of files
if nFiles > maxNfiles:
chosenCE = tmp_chosen_ce
maxNfiles = nFiles
okFiles = tmpOKFiles
# set job spec
tmpLog.debug('indexJob : %s' % indexJob)
tmpLog.debug('nInputs/Job : %s' % nFilesPerJob)
tmpLog.debug('inputSize/Job : %s' % inputSizePerJob)
for tmpJob in jobs[indexJob-iJob-1:indexJob-1]:
# set computingSite
if (not candidateForAnal) and forAnalysis and trustIS:
resultsForAnalStr = 'ERROR : No candidate. '
if resultsForAnal['rel'] != []:
if prevCmtConfig in ['','NULL',None]:
resultsForAnalStr += 'Release:%s was not found at %s. ' % (prevRelease,str(resultsForAnal['rel']))
else:
resultsForAnalStr += 'Release:%s/%s was not found at %s. ' % (prevRelease,prevCmtConfig,str(resultsForAnal['rel']))
if resultsForAnal['pilot'] != []:
resultsForAnalStr += '%s are inactive (no pilots for last 3 hours). ' % str(resultsForAnal['pilot'])
if resultsForAnal['disk'] != []:
resultsForAnalStr += 'Disk shortage < %sGB at %s. ' % (diskThresholdAna,str(resultsForAnal['disk']))
if resultsForAnal['memory'] != []:
resultsForAnalStr += 'Insufficient RAM at %s. ' % str(resultsForAnal['memory'])
if resultsForAnal['maxtime'] != []:
resultsForAnalStr += 'Shorter walltime limit than maxCpuCount:%s at ' % prevMaxCpuCount
for tmpItem in resultsForAnal['maxtime']:
if siteMapper.checkSite(tmpItem):
resultsForAnalStr += '%s:%s,' % (tmpItem,siteMapper.getSite(tmpItem).maxtime)
resultsForAnalStr = resultsForAnalStr[:-1]
resultsForAnalStr += '. '
if resultsForAnal['status'] != []:
resultsForAnalStr += '%s are not online. ' % str(resultsForAnal['status'])
if resultsForAnal['reliability'] != []:
resultsForAnalStr += 'Insufficient reliability at %s. ' % str(resultsForAnal['reliability'])
resultsForAnalStr = resultsForAnalStr[:-1]
tmpJob.computingSite = resultsForAnalStr
else:
tmpJob.computingSite = chosenCE.sitename
tmpLog.debug('PandaID:%s -> site:%s' % (tmpJob.PandaID,tmpJob.computingSite))
# fail jobs if no sites have the release
if (not foundRelease or (tmpJob.relocationFlag != 1 and not foundOneCandidate)) and (tmpJob.prodSourceLabel in ['managed','test']):
# reset
if tmpJob.relocationFlag not in [1,2]:
tmpJob.computingSite = None
tmpJob.computingElement = None
# go to waiting
tmpJob.jobStatus = 'waiting'
tmpJob.brokerageErrorCode = ErrorCode.EC_Release
if tmpJob.relocationFlag in [1,2]:
try:
if resultsForAnal['pilot'] != []:
tmpJob.brokerageErrorDiag = '%s no pilots' % tmpJob.computingSite
elif resultsForAnal['disk'] != []:
tmpJob.brokerageErrorDiag = 'SE full at %s' % tmpJob.computingSite
elif resultsForAnal['memory'] != []:
tmpJob.brokerageErrorDiag = 'RAM shortage at %s' % tmpJob.computingSite
elif resultsForAnal['status'] != []:
tmpJob.brokerageErrorDiag = '%s not online' % tmpJob.computingSite
elif resultsForAnal['share'] != []:
tmpJob.brokerageErrorDiag = '%s zero share' % tmpJob.computingSite
elif resultsForAnal['cpucore'] != []:
tmpJob.brokerageErrorDiag = "CPU core mismatch at %s" % tmpJob.computingSite
elif resultsForAnal['maxtime'] != []:
tmpJob.brokerageErrorDiag = "short walltime at %s" % tmpJob.computingSite
elif resultsForAnal['transferring'] != []:
tmpJob.brokerageErrorDiag = 'too many transferring at %s' % tmpJob.computingSite
elif resultsForAnal['scratch'] != []:
tmpJob.brokerageErrorDiag = 'small scratch disk at %s' % tmpJob.computingSite
elif useCacheVersion:
tmpJob.brokerageErrorDiag = '%s/%s not found at %s' % (tmpJob.homepackage,tmpJob.cmtConfig,tmpJob.computingSite)
else:
tmpJob.brokerageErrorDiag = '%s/%s not found at %s' % (tmpJob.AtlasRelease,tmpJob.cmtConfig,tmpJob.computingSite)
except Exception:
errtype,errvalue = sys.exc_info()[:2]
tmpLog.error("failed to set diag for %s: %s %s" % (tmpJob.PandaID,errtype,errvalue))
tmpJob.brokerageErrorDiag = 'failed to set diag. see brokerage log in the panda server'
elif prevBrokergageSiteList not in [[],None]:
try:
# make message
tmpJob.brokerageErrorDiag = makeCompactDiagMessage(prevBrokerageNote,resultsForAnal)
except Exception:
errtype,errvalue | |
print("to_reject", type(to_reject))
print("to_reject ...(len)", len(to_reject))
print(np.take(peaklist, to_reject, axis=0))
print("to_reject2 ...(len)", len(to_reject2))
print(np.take(peaklist, to_reject2, axis=0))
print(np.take(peaklist, to_reject3, axis=0))
print("After fitting, {}/{} peaks have been rejected\n due to (final - initial position)> FitPixelDev = {}".format(
len(to_reject3), len(peaklist), FitPixelDev))
print("{} spots have been rejected\n due to negative baseline".format(len(to_reject2)))
print("{} spots have been rejected\n due to much intensity ".format(len(to_reject4)))
print("{} spots have been rejected\n due to weak intensity ".format(len(to_reject5)))
print("{} spots have been rejected\n due to small peak size".format(len(to_reject6)))
print("{} spots have been rejected\n due to large peak size".format(len(to_reject7)))
# spots indices to reject
ToR = (set(to_reject)
| set(to_reject2)
| set(to_reject3)
| set(to_reject4)
| set(to_reject5)
| set(to_reject6)
| set(to_reject7)) # to reject
# spot indices to take
ToTake = set(np.arange(len(peaklist))) - ToR
if verbose:
print("index ToTake", ToTake)
print("nb indices in ToTake", len(ToTake))
if len(ToTake) < 1:
return None, par, peaklist
# print "Ipixmax",Ipixmax
if Ipixmax is None:
Ipixmax = peak_I
else:
# ask for maximum intensity in ROI, see
pass
# all peaks list building
tabpeak = np.array([peak_X, peak_Y, peak_I, peak_fwaxmaj, peak_fwaxmin, peak_inclination,
Xdev, Ydev, peak_bkg, Ipixmax]).T
# print("Results of all fits in tabpeak", tabpeak)
tabpeak = np.take(tabpeak, list(ToTake), axis=0)
# print "tabpeak.shape",tabpeak.shape
if len(tabpeak.shape) > 1: # several peaks
intense_rank = np.argsort(tabpeak[:, 2])[::-1] # sort by decreasing intensity-bkg
# print "intense_rank", intense_rank
tabIsorted = tabpeak[intense_rank]
# print "tabIsorted.shape case 1",tabIsorted.shape
else: # single peak
# tabIsorted = np.array(tabpeak)[:,0]
print("tabIsorted.shape case 2", tabIsorted.shape)
if position_definition == 1: # XMAS offset
tabIsorted[:, :2] = tabIsorted[:, :2] + np.array([1, 1])
if verbose>1:
print("\n\nIntensity sorted\n\n")
print(tabIsorted[:10])
print("X,Y", tabIsorted[:10, :2])
if verbose:
print("\n{} fitted peak(s)\n".format(len(tabIsorted)))
if purgeDuplicates and len(tabIsorted) > 2:
if verbose: print("Removing duplicates from fit")
# remove duplicates (close points), the most intense pixel is kept
# minimum distance fit solutions
pixeldistance = boxsize
# tabXY, index_todelete
_, index_todelete = GT.purgeClosePoints2(tabIsorted[:, :2], pixeldistance)
# print tabXY
# print index_todelete
tabIsorted = np.delete(tabIsorted, tuple(index_todelete), axis=0)
if verbose:
print(
"\n{} peaks found after removing duplicates minimum intermaxima distance = {})".format(
len(tabIsorted), pixeldistance))
return tabIsorted, par, peaklist
def PeakSearch(filename, stackimageindex=-1, CCDLabel="PRINCETON", center=None,
boxsizeROI=(200, 200), # use only if center != None
PixelNearRadius=5,
removeedge=2,
IntensityThreshold=400,
thresholdConvolve=200,
paramsHat=(4, 5, 2),
boxsize=15,
verbose=0,
position_definition=1,
local_maxima_search_method=1,
peakposition_definition="max",
fit_peaks_gaussian=1,
xtol=0.00001,
return_histo=1,
FitPixelDev=25, # to_reject3 parameter
write_execution_time=1,
Saturation_value=65535, # to be merged in CCDLabel
Saturation_value_flatpeak=65535,
MinIntensity=0,
PeakSizeRange=(0, 200),
Data_for_localMaxima=None,
Fit_with_Data_for_localMaxima=False,
Remove_BlackListedPeaks_fromfile=None,
maxPixelDistanceRejection=15.0,
NumberMaxofFits=5000,
reject_negative_baseline=True,
formulaexpression="A-1.1*B",
listrois=None,
outputIpixmax=True):
r"""
Find local intensity maxima as starting position for fittinng and return peaklist.
:param filename: string, full path to image data file
:param stackimageindex: integer, index corresponding to the position of image data on a stacked images file
if -1 means single image data w/o stacking
:param CCDLabel: string, label for CCD 2D detector used to read the image data file see dict_LaueTools.py
:param center: position
.. todo:: to be removed: position of the ROI center in CCD frame
:param boxsizeROI: dimensions of the ROI to crop the data array
only used if center != None
:param boxsize: half length of the selected ROI array centered on each peak, used for:
- fitting a peak
- estimating the background around a peak
- shifting array in second method of local maxima search (shifted arrays)
:param IntensityThreshold: integer, pixel intensity level above which potential peaks are kept for fitting position procedure. For local maxima method 0 and 1, this level is relative to zero intensity. For local maxima method 2, this level is relative to lowest intensity in the ROI (local background).
.. note:: Start with high value, because if too high, few peaks are found (only the most important), and if too low, too many local maxima are found leading to time consuming fitting procedure.
:param thresholdConvolve: integer, pixel intensity level in convolved image above which potential peaks are kept for fitting position procedure. This threshold step on convolved image is applied prior to the local threshold step with IntensityThreshold on initial image (with respect to the local background)
:param paramsHat: mexican hat kernel parameters (see :func:`LocalMaxima_ndimage`)
:param PixelNearRadius: integer, pixel distance between two regions considered as peaks.
.. note:: Start rather with a large value. If too low, there are very much peaks duplicates and this is very time consuming.
:param local_maxima_search_method: integer, Select method for find the local maxima, each of them will fitted
- 0 extract all pixel above intensity threshold
- 1 find pixels are highest than their neighbours in horizontal, vertica and diagonal direction (up to a given pixel distance)
- 2 find local hot pixels which after numerical convolution give high intensity above threshold (thresholdConvolve) then threshold (IntensityThreshold) on raw data
:param peakposition_definition: 'max' or 'center' for local_maxima_search_method == 2 to assign to the blob position its hottest pixel position or its center (no weight)
:param Saturation_value_flatpeak: saturation value of detector for local maxima search method 1
:param Remove_BlackListedPeaks_fromfile:
- None
- file fullpath, str, to a peaklist file containing peaks that will be deleted in peak list resulting from
the local maxima search procedure (prior to peak refinement)
- ndarray of nx2 X Y pixels cooordinates (avoid reading file in peaksearch series)
:param maxPixelDistanceRejection: maximum distance between black listed peaks and current peaks
(found by peak search) to be rejected
:param NumberMaxofFits: highest acceptable number of local maxima peak to be refined with a 2D modelPeakSearch
:param fit_peaks_gaussian:
- 0 no position and shape refinement procedure performed from local maxima (or blob) result
- 1 2D gaussian peak refinement
- 2 2D lorentzian peak refinement
:param xtol: relative error on solution (x vector) see args for leastsq in scipy.optimize
:param FitPixelDev: largest pixel distance between initial (from local maxima search)
and refined peak position
:param position_definition: due to various conventional habits when reading array, add some offset to fitdata XMAS or fit2d peak search values:
- 0 no offset (python numpy convention)
- 1 XMAS offset (first pixel is counted as located at 1 instead of 0)
- 2 fit2d offset (obsolete)
:param return_histo: - 0 3 output elements
- 1 4 elemts, last one is histogram of data
- 2 4 elemts, last one is the nb of raw blob found after convolution and threshold
:param Data_for_localMaxima: object to be used only for initial step of finding local maxima (blobs) search
(and not necessarly for peaks fitting procedure):
- ndarray = array data
- 'auto_background' = calculate and remove background computed from image data itself (read in file 'filename')
- path to image file (string) = B image to be used in a mathematical operation with Ato current image
:param Fit_with_Data_for_localMaxima: use 'Data_for_localMaxima' object as image when refining peaks position and shape
with initial peak position guess from local maxima search
:param formulaexpression: string containing A (raw data array image) and B (other data array image)
expressing mathematical operation,e.g:
'A-3.2*B+10000'
for simple background substraction (with B as background data):
'A-B' or 'A-alpha*B' with alpha > 1.
:param reject_negative_baseline: True reject refined peak result if intensity baseline (local background) is negative
(2D model is maybe not suitable)
:param outputIpixmax: compute maximal pixel intensity for all peaks found
:return: - peak list sorted by decreasing (integrated intensity - fitted bkg)
-peak_X,peak_Y,peak_I,peak_fwaxmaj,peak_fwaxmin,peak_inclination,Xdev,Ydev,peak_bkg
for fit_peaks_gaussian == 0 (no fitdata) and local_maxima_search_method==2 (convolution)
if peakposition_definition ='max' then X,Y,I are from the hottest pixels
if peakposition_definition ='center' then X,Y are blob center and I the hottest blob pixel
.. warning:: nb of output elements depends on 'return_histo' argument
"""
if return_histo in (0, 1):
return_nb_raw_blobs = 0
if return_histo in (2,):
return_nb_raw_blobs = 1
if write_execution_time:
t0 = ttt.time()
# user input its own shaped Data array
if isinstance(Data_for_localMaxima, np.ndarray):
# if verbose:
# print("Using 'Data_for_localMaxima' ndarray for finding local maxima")
Data = Data_for_localMaxima
# print "min, max intensity", np.amin(Data), np.amax(Data)
# TODO to test with VHR
framedim = Data.shape
ttread = ttt.time()
# Data | |
<gh_stars>1-10
# coding: utf-8
"""
Apteco API
An API to allow access to Apteco Marketing Suite resources # noqa: E501
The version of the OpenAPI document: v2
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ElementStatus(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'description': 'str',
'type': 'str',
'successful_campaigns_count': 'int',
'errored_campaigns_count': 'int',
'inactive_campaigns_count': 'int',
'needs_approval_campaigns_count': 'int',
'channel_types': 'list[str]',
'first_ran': 'datetime',
'last_ran': 'datetime',
'statistics_timestamp': 'datetime',
'path': 'list[ElementKey]'
}
attribute_map = {
'id': 'id',
'description': 'description',
'type': 'type',
'successful_campaigns_count': 'successfulCampaignsCount',
'errored_campaigns_count': 'erroredCampaignsCount',
'inactive_campaigns_count': 'inactiveCampaignsCount',
'needs_approval_campaigns_count': 'needsApprovalCampaignsCount',
'channel_types': 'channelTypes',
'first_ran': 'firstRan',
'last_ran': 'lastRan',
'statistics_timestamp': 'statisticsTimestamp',
'path': 'path'
}
def __init__(self, id=None, description=None, type=None, successful_campaigns_count=None, errored_campaigns_count=None, inactive_campaigns_count=None, needs_approval_campaigns_count=None, channel_types=None, first_ran=None, last_ran=None, statistics_timestamp=None, path=None): # noqa: E501
"""ElementStatus - a model defined in OpenAPI""" # noqa: E501
self._id = None
self._description = None
self._type = None
self._successful_campaigns_count = None
self._errored_campaigns_count = None
self._inactive_campaigns_count = None
self._needs_approval_campaigns_count = None
self._channel_types = None
self._first_ran = None
self._last_ran = None
self._statistics_timestamp = None
self._path = None
self.discriminator = None
self.id = id
self.description = description
self.type = type
if successful_campaigns_count is not None:
self.successful_campaigns_count = successful_campaigns_count
if errored_campaigns_count is not None:
self.errored_campaigns_count = errored_campaigns_count
if inactive_campaigns_count is not None:
self.inactive_campaigns_count = inactive_campaigns_count
if needs_approval_campaigns_count is not None:
self.needs_approval_campaigns_count = needs_approval_campaigns_count
if channel_types is not None:
self.channel_types = channel_types
if first_ran is not None:
self.first_ran = first_ran
if last_ran is not None:
self.last_ran = last_ran
if statistics_timestamp is not None:
self.statistics_timestamp = statistics_timestamp
if path is not None:
self.path = path
@property
def id(self):
"""Gets the id of this ElementStatus. # noqa: E501
The element's id # noqa: E501
:return: The id of this ElementStatus. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ElementStatus.
The element's id # noqa: E501
:param id: The id of this ElementStatus. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def description(self):
"""Gets the description of this ElementStatus. # noqa: E501
The element's description # noqa: E501
:return: The description of this ElementStatus. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this ElementStatus.
The element's description # noqa: E501
:param description: The description of this ElementStatus. # noqa: E501
:type: str
"""
if description is None:
raise ValueError("Invalid value for `description`, must not be `None`") # noqa: E501
self._description = description
@property
def type(self):
"""Gets the type of this ElementStatus. # noqa: E501
The element's type # noqa: E501
:return: The type of this ElementStatus. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ElementStatus.
The element's type # noqa: E501
:param type: The type of this ElementStatus. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
allowed_values = ["Unknown", "Diagram", "Programme", "Area", "Campaign", "Message", "Group", "Audience", "Content", "Delivery", "Pool", "Responses", "Transition", "PauseAction"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def successful_campaigns_count(self):
"""Gets the successful_campaigns_count of this ElementStatus. # noqa: E501
The number of campaigns that currently have a success status within this element # noqa: E501
:return: The successful_campaigns_count of this ElementStatus. # noqa: E501
:rtype: int
"""
return self._successful_campaigns_count
@successful_campaigns_count.setter
def successful_campaigns_count(self, successful_campaigns_count):
"""Sets the successful_campaigns_count of this ElementStatus.
The number of campaigns that currently have a success status within this element # noqa: E501
:param successful_campaigns_count: The successful_campaigns_count of this ElementStatus. # noqa: E501
:type: int
"""
self._successful_campaigns_count = successful_campaigns_count
@property
def errored_campaigns_count(self):
"""Gets the errored_campaigns_count of this ElementStatus. # noqa: E501
The number of campaigns that currently have an errored status within this element # noqa: E501
:return: The errored_campaigns_count of this ElementStatus. # noqa: E501
:rtype: int
"""
return self._errored_campaigns_count
@errored_campaigns_count.setter
def errored_campaigns_count(self, errored_campaigns_count):
"""Sets the errored_campaigns_count of this ElementStatus.
The number of campaigns that currently have an errored status within this element # noqa: E501
:param errored_campaigns_count: The errored_campaigns_count of this ElementStatus. # noqa: E501
:type: int
"""
self._errored_campaigns_count = errored_campaigns_count
@property
def inactive_campaigns_count(self):
"""Gets the inactive_campaigns_count of this ElementStatus. # noqa: E501
The number of campaigns that currently have an inactive status within this element # noqa: E501
:return: The inactive_campaigns_count of this ElementStatus. # noqa: E501
:rtype: int
"""
return self._inactive_campaigns_count
@inactive_campaigns_count.setter
def inactive_campaigns_count(self, inactive_campaigns_count):
"""Sets the inactive_campaigns_count of this ElementStatus.
The number of campaigns that currently have an inactive status within this element # noqa: E501
:param inactive_campaigns_count: The inactive_campaigns_count of this ElementStatus. # noqa: E501
:type: int
"""
self._inactive_campaigns_count = inactive_campaigns_count
@property
def needs_approval_campaigns_count(self):
"""Gets the needs_approval_campaigns_count of this ElementStatus. # noqa: E501
The number of campaigns that currently have a message that needs approval within this element # noqa: E501
:return: The needs_approval_campaigns_count of this ElementStatus. # noqa: E501
:rtype: int
"""
return self._needs_approval_campaigns_count
@needs_approval_campaigns_count.setter
def needs_approval_campaigns_count(self, needs_approval_campaigns_count):
"""Sets the needs_approval_campaigns_count of this ElementStatus.
The number of campaigns that currently have a message that needs approval within this element # noqa: E501
:param needs_approval_campaigns_count: The needs_approval_campaigns_count of this ElementStatus. # noqa: E501
:type: int
"""
self._needs_approval_campaigns_count = needs_approval_campaigns_count
@property
def channel_types(self):
"""Gets the channel_types of this ElementStatus. # noqa: E501
The different types of channel that have been used by deliveries within this element # noqa: E501
:return: The channel_types of this ElementStatus. # noqa: E501
:rtype: list[str]
"""
return self._channel_types
@channel_types.setter
def channel_types(self, channel_types):
"""Sets the channel_types of this ElementStatus.
The different types of channel that have been used by deliveries within this element # noqa: E501
:param channel_types: The channel_types of this ElementStatus. # noqa: E501
:type: list[str]
"""
allowed_values = ["Unknown", "Control", "Broadcast", "File", "Ftp", "Facebook", "MicrosoftDynamics", "SalesForce", "PushNotification", "Twitter", "Google", "LinkedIn", "Composite"] # noqa: E501
if not set(channel_types).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `channel_types` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(channel_types) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._channel_types = channel_types
@property
def first_ran(self):
"""Gets the first_ran of this ElementStatus. # noqa: E501
The first time that any deliveries ran within this element # noqa: E501
:return: The first_ran of this ElementStatus. # noqa: E501
:rtype: datetime
"""
return self._first_ran
@first_ran.setter
def first_ran(self, first_ran):
"""Sets the first_ran of this ElementStatus.
The first time that any deliveries ran within this element # noqa: E501
:param first_ran: The first_ran of this ElementStatus. # noqa: E501
:type: datetime
"""
self._first_ran = first_ran
@property
def last_ran(self):
"""Gets the last_ran of this ElementStatus. # noqa: E501
The last time that any deliveries ran within this element # noqa: E501
:return: The last_ran of this ElementStatus. # noqa: E501
:rtype: datetime
"""
return self._last_ran
@last_ran.setter
def last_ran(self, last_ran):
"""Sets the last_ran of this ElementStatus.
The last time that any deliveries ran within this element # noqa: E501
:param last_ran: The last_ran of this ElementStatus. # noqa: E501
:type: datetime
"""
self._last_ran = last_ran
@property
def statistics_timestamp(self):
"""Gets the statistics_timestamp of this ElementStatus. # noqa: E501
The date and time that the statistics were calculated # noqa: E501
:return: The statistics_timestamp of this ElementStatus. # noqa: E501
:rtype: datetime
"""
return self._statistics_timestamp
@statistics_timestamp.setter
def statistics_timestamp(self, statistics_timestamp):
"""Sets the statistics_timestamp of this ElementStatus.
The date and time that the statistics were calculated # noqa: E501
:param statistics_timestamp: The statistics_timestamp of this ElementStatus. # noqa: E501
:type: datetime
"""
self._statistics_timestamp = statistics_timestamp
@property
def | |
float]]]
) -> np.float64:
"""
Function for calculating the log-likelihood for the sampled
parameter cube.
Parameters
----------
params : np.ndarray, pymultinest.run.LP_c_double
Cube with physical parameters.
prior : dict(str, tuple(float, float))
Dictionary with Gaussian priors for one or multiple
parameters. The prior can be set for any of the atmosphere
or calibration parameters, e.g.
``prior={'teff': (1200., 100.)}``. Additionally, a prior
can be set for the mass, e.g. ``prior={'mass': (13., 3.)}``
for an expected mass of 13 Mjup with an uncertainty of
3 Mjup.
Returns
-------
float
Log-likelihood.
"""
# Initilize dictionaries for different parameter types
spec_scaling = {}
phot_scaling = {}
err_scaling = {}
corr_len = {}
corr_amp = {}
dust_param = {}
disk_param = {}
veil_param = {}
param_dict = {}
for item in self.bounds:
# Add the parameters from the params to their dictionaries
if item[:8] == "scaling_" and item[8:] in self.spectrum:
spec_scaling[item[8:]] = params[self.cube_index[item]]
elif item[:6] == "error_" and item[6:] in self.spectrum:
err_scaling[item[6:]] = params[self.cube_index[item]]
elif item[:9] == "corr_len_" and item[9:] in self.spectrum:
corr_len[item[9:]] = 10.0 ** params[self.cube_index[item]] # (um)
elif item[:9] == "corr_amp_" and item[9:] in self.spectrum:
corr_amp[item[9:]] = params[self.cube_index[item]]
elif item[-6:] == "_error" and item[:-6] in self.filter_name:
phot_scaling[item[:-6]] = params[self.cube_index[item]]
elif item[-6:] == "_error" and item[:-6] in self.instr_name:
phot_scaling[item[:-6]] = params[self.cube_index[item]]
elif item[:8] == "lognorm_":
dust_param[item] = params[self.cube_index[item]]
elif item[:9] == "powerlaw_":
dust_param[item] = params[self.cube_index[item]]
elif item[:4] == "ism_":
dust_param[item] = params[self.cube_index[item]]
elif item == "disk_teff":
disk_param["teff"] = params[self.cube_index[item]]
elif item == "disk_radius":
disk_param["radius"] = params[self.cube_index[item]]
elif item == "veil_a":
veil_param["veil_a"] = params[self.cube_index[item]]
elif item == "veil_b":
veil_param["veil_b"] = params[self.cube_index[item]]
elif item == "veil_ref":
veil_param["veil_ref"] = params[self.cube_index[item]]
elif item == "spec_weight":
pass
else:
param_dict[item] = params[self.cube_index[item]]
# Add the distance manually because it should
# not be provided in the bounds dictionary
distance = params[self.cube_index["distance"]]
for item in self.fix_param:
# Add the fixed parameters to their dictionaries
if item[:8] == "scaling_" and item[8:] in self.spectrum:
spec_scaling[item[8:]] = self.fix_param[item]
elif item[:6] == "error_" and item[6:] in self.spectrum:
err_scaling[item[6:]] = self.fix_param[item]
elif item[:9] == "corr_len_" and item[9:] in self.spectrum:
corr_len[item[9:]] = self.fix_param[item] # (um)
elif item[:9] == "corr_amp_" and item[9:] in self.spectrum:
corr_amp[item[9:]] = self.fix_param[item]
elif item[:8] == "lognorm_":
dust_param[item] = self.fix_param[item]
elif item[:9] == "powerlaw_":
dust_param[item] = self.fix_param[item]
elif item[:4] == "ism_":
dust_param[item] = self.fix_param[item]
elif item == "disk_teff":
disk_param["teff"] = self.fix_param[item]
elif item == "disk_radius":
disk_param["radius"] = self.fix_param[item]
elif item == "spec_weight":
pass
else:
param_dict[item] = self.fix_param[item]
if self.model == "planck" and self.n_planck > 1:
for i in range(self.n_planck - 1):
if param_dict[f"teff_{i+1}"] > param_dict[f"teff_{i}"]:
return -np.inf
if param_dict[f"radius_{i}"] > param_dict[f"radius_{i+1}"]:
return -np.inf
if disk_param:
if disk_param["teff"] > param_dict["teff"]:
return -np.inf
if disk_param["radius"] < param_dict["radius"]:
return -np.inf
if self.model != "powerlaw":
if "radius_0" in param_dict and "radius_1" in param_dict:
flux_scaling_0 = (param_dict["radius_0"] * constants.R_JUP) ** 2 / (
distance * constants.PARSEC
) ** 2
flux_scaling_1 = (param_dict["radius_1"] * constants.R_JUP) ** 2 / (
distance * constants.PARSEC
) ** 2
# The scaling is applied manually because of the interpolation
del param_dict["radius_0"]
del param_dict["radius_1"]
else:
flux_scaling = (param_dict["radius"] * constants.R_JUP) ** 2 / (
distance * constants.PARSEC
) ** 2
# The scaling is applied manually because of the interpolation
del param_dict["radius"]
for item in self.spectrum:
if item not in spec_scaling:
spec_scaling[item] = 1.0
if item not in err_scaling:
err_scaling[item] = None
if self.param_interp is not None:
# Sort the parameters in the correct order for
# spectrum_interp because spectrum_interp creates
# a list in the order of the keys in param_dict
param_tmp = param_dict.copy()
param_dict = {}
for item in self.param_interp:
param_dict[item] = param_tmp[item]
ln_like = 0.0
for key, value in prior.items():
if key == "mass":
mass = read_util.get_mass(
params[self.cube_index["logg"]],
params[self.cube_index["radius"]],
)
ln_like += -0.5 * (mass - value[0]) ** 2 / value[1] ** 2
else:
ln_like += (
-0.5
* (params[self.cube_index[key]] - value[0]) ** 2
/ value[1] ** 2
)
if "lognorm_ext" in dust_param:
cross_tmp = self.cross_sections["Generic/Bessell.V"](
dust_param["lognorm_sigma"], 10.0 ** dust_param["lognorm_radius"]
)[0]
n_grains = (
dust_param["lognorm_ext"] / cross_tmp / 2.5 / np.log10(np.exp(1.0))
)
elif "powerlaw_ext" in dust_param:
cross_tmp = self.cross_sections["Generic/Bessell.V"](
dust_param["powerlaw_exp"], 10.0 ** dust_param["powerlaw_max"]
)
n_grains = (
dust_param["powerlaw_ext"] / cross_tmp / 2.5 / np.log10(np.exp(1.0))
)
for i, obj_item in enumerate(self.objphot):
# Get filter name
phot_filter = self.modelphot[i].filter_name
# Shortcut for weight
weight = self.weights[phot_filter]
if self.model == "planck":
readplanck = read_planck.ReadPlanck(filter_name=phot_filter)
phot_flux = readplanck.get_flux(param_dict, synphot=self.modelphot[i])[
0
]
elif self.model == "powerlaw":
powerl_box = read_util.powerlaw_spectrum(
self.modelphot[i].wavel_range, param_dict
)
phot_flux = self.modelphot[i].spectrum_to_flux(
powerl_box.wavelength, powerl_box.flux
)[0]
else:
if self.binary:
# Star 0
param_0 = read_util.binary_to_single(param_dict, 0)
phot_flux_0 = self.modelphot[i].spectrum_interp(
list(param_0.values())
)[0][0]
# Scale the spectrum by (radius/distance)^2
if "radius" in self.modelpar:
phot_flux_0 *= flux_scaling
elif "radius_0" in self.modelpar:
phot_flux_0 *= flux_scaling_0
# Star 1
param_1 = read_util.binary_to_single(param_dict, 1)
phot_flux_1 = self.modelphot[i].spectrum_interp(
list(param_1.values())
)[0][0]
# Scale the spectrum by (radius/distance)^2
if "radius" in self.modelpar:
phot_flux_1 *= flux_scaling
elif "radius_1" in self.modelpar:
phot_flux_1 *= flux_scaling_1
# Weighted flux of two stars
phot_flux = (
params[self.cube_index["spec_weight"]] * phot_flux_0
+ (1.0 - params[self.cube_index["spec_weight"]]) * phot_flux_1
)
else:
phot_flux = self.modelphot[i].spectrum_interp(
list(param_dict.values())
)[0][0]
phot_flux *= flux_scaling
if disk_param:
phot_tmp = self.diskphot[i].spectrum_interp([disk_param["teff"]])[0][0]
phot_flux += (
phot_tmp
* (disk_param["radius"] * constants.R_JUP) ** 2
/ (distance * constants.PARSEC) ** 2
)
if "lognorm_ext" in dust_param:
cross_tmp = self.cross_sections[phot_filter](
dust_param["lognorm_sigma"], 10.0 ** dust_param["lognorm_radius"]
)[0]
phot_flux *= np.exp(-cross_tmp * n_grains)
elif "powerlaw_ext" in dust_param:
cross_tmp = self.cross_sections[phot_filter](
dust_param["powerlaw_exp"], 10.0 ** dust_param["powerlaw_max"]
)[0]
phot_flux *= np.exp(-cross_tmp * n_grains)
elif "ism_ext" in dust_param:
read_filt = read_filter.ReadFilter(phot_filter)
filt_wavel = np.array([read_filt.mean_wavelength()])
ism_reddening = dust_param.get("ism_red", 3.1)
ext_filt = dust_util.ism_extinction(
dust_param["ism_ext"], ism_reddening, filt_wavel
)
phot_flux *= 10.0 ** (-0.4 * ext_filt[0])
if obj_item.ndim == 1:
phot_var = obj_item[1] ** 2
# Get the telescope/instrument name
instr_check = phot_filter.split(".")[0]
if phot_filter in phot_scaling:
# Inflate photometric error for filter
phot_var += phot_scaling[phot_filter] ** 2 * obj_item[0] ** 2
elif instr_check in phot_scaling:
# Inflate photometric error for instrument
phot_var += phot_scaling[instr_check] ** 2 * obj_item[0] ** 2
ln_like += -0.5 * weight * (obj_item[0] - phot_flux) ** 2 / phot_var
# Only required when fitting an error inflation
ln_like += -0.5 * weight * np.log(2.0 * np.pi * phot_var)
else:
for j in range(obj_item.shape[1]):
phot_var = obj_item[1, j] ** 2
if (
self.model == "powerlaw"
and f"{phot_filter}_error" in param_dict
):
phot_var += (
param_dict[f"{phot_filter}_error"] ** 2
* obj_item[0, j] ** 2
)
ln_like += (
-0.5 * weight * (obj_item[0, j] - phot_flux) ** 2 / phot_var
)
# Only required when fitting an error inflation
ln_like += -0.5 * weight * np.log(2.0 * np.pi * phot_var)
for i, item in enumerate(self.spectrum.keys()):
# Calculate or interpolate the model spectrum
# Shortcut for the weight
weight = self.weights[item]
if self.model == "planck":
# Calculate a blackbody spectrum
readplanck = read_planck.ReadPlanck(
(
0.9 * self.spectrum[item][0][0, 0],
1.1 * self.spectrum[item][0][-1, 0],
)
)
model_box = readplanck.get_spectrum(param_dict, 1000.0, smooth=True)
# Resample the spectrum to the observed wavelengths
model_flux = spectres.spectres(
self.spectrum[item][0][:, 0], model_box.wavelength, model_box.flux
)
else:
# Interpolate the model spectrum from the grid
if self.binary:
# Star 1
param_0 = read_util.binary_to_single(param_dict, 0)
model_flux_0 = self.modelspec[i].spectrum_interp(
list(param_0.values())
)[0, :]
# Scale the spectrum by (radius/distance)^2
if "radius" in self.modelpar:
model_flux_0 *= flux_scaling
elif "radius_1" in self.modelpar:
model_flux_0 *= flux_scaling_0
# Star 2
param_1 = read_util.binary_to_single(param_dict, 1)
model_flux_1 = self.modelspec[i].spectrum_interp(
list(param_1.values())
)[0, :]
# Scale the spectrum by (radius/distance)^2
if "radius" in self.modelpar:
model_flux_1 *= flux_scaling
elif "radius_1" in self.modelpar:
model_flux_1 *= flux_scaling_1
# Weighted flux of two stars
model_flux = (
params[self.cube_index["spec_weight"]] * model_flux_0
+ (1.0 - params[self.cube_index["spec_weight"]]) * model_flux_1
)
else:
model_flux = self.modelspec[i].spectrum_interp(
list(param_dict.values())
)[0, :]
# Scale the spectrum by (radius/distance)^2
model_flux *= flux_scaling
# Veiling
if (
"veil_a" in veil_param
and "veil_b" in veil_param
and "veil_ref" in veil_param
):
if item == "MUSE":
lambda_ref = 0.5 # (um)
veil_flux = veil_param["veil_ref"] + veil_param["veil_b"] * (
self.spectrum[item][0][:, 0] - lambda_ref
)
model_flux = veil_param["veil_a"] * model_flux + veil_flux
# Scale the spectrum data
data_flux = | |
:return: N x (T-1) x 7
"""
vos = []
for p in poses:
pvos = [calc_vo_relative_logq(p[i].unsqueeze(0), p[i+1].unsqueeze(0))
for i in range(len(p)-1)]
vos.append(torch.cat(pvos, dim=0))
vos = torch.stack(vos, dim=0)
return vos
def calc_vos_safe(poses):
"""
calculate the VOs, from a list of consecutive poses
:param poses: N x T x 7
:return: N x (T-1) x 7
"""
vos = []
for p in poses:
pvos = [calc_vo_logq_safe(p[i].unsqueeze(0), p[i+1].unsqueeze(0))
for i in range(len(p)-1)]
vos.append(torch.cat(pvos, dim=0))
vos = torch.stack(vos, dim=0)
return vos
def calc_vos_safe_fc(poses):
"""
calculate the VOs, from a list of consecutive poses (fully connected)
:param poses: N x T x 7
:return: N x TC2 x 7
"""
vos = []
for p in poses:
pvos = []
for i in range(p.size(0)):
for j in range(i+1, p.size(0)):
pvos.append(calc_vo_logq_safe(
p[i].unsqueeze(0), p[j].unsqueeze(0)))
vos.append(torch.cat(pvos, dim=0))
vos = torch.stack(vos, dim=0)
return vos
# NUMPY
def qlog(q):
"""
Applies logarithm map to q
:param q: (4,)
:return: (3,)
"""
if all(q[1:] == 0):
q = np.zeros(3)
else:
q = np.arccos(q[0]) * q[1:] / np.linalg.norm(q[1:])
return q
def qexp(q):
"""
Applies the exponential map to q
:param q: (3,)
:return: (4,)
"""
n = np.linalg.norm(q)
q = np.hstack((np.cos(n), np.sinc(n/np.pi)*q))
return q
def process_poses(poses_in, mean_t, std_t, align_R, align_t, align_s):
"""
processes the 1x12 raw pose from dataset by aligning and then normalizing
:param poses_in: N x 12
:param mean_t: 3
:param std_t: 3
:param align_R: 3 x 3
:param align_t: 3
:param align_s: 1
:return: processed poses (translation + quaternion) N x 7
"""
poses_out = np.zeros((len(poses_in), 6))
poses_out[:, 0:3] = poses_in[:, [3, 7, 11]]
# align
for i in range(len(poses_out)):
R = poses_in[i].reshape((3, 4))[:3, :3]
q = txq.mat2quat(np.dot(align_R, R))
q *= np.sign(q[0]) # constrain to hemisphere
q = qlog(q)
poses_out[i, 3:] = q
t = poses_out[i, :3] - align_t
poses_out[i, :3] = align_s * \
np.dot(align_R, t[:, np.newaxis]).squeeze()
# normalize translation
poses_out[:, :3] -= mean_t
poses_out[:, :3] /= std_t
return poses_out
def log_quaternion_angular_error(q1, q2):
return quaternion_angular_error(qexp(q1), qexp(q2))
def quaternion_angular_error(q1, q2):
"""
angular error between two quaternions
:param q1: (4, )
:param q2: (4, )
:return:
"""
d = abs(np.dot(q1, q2))
d = min(1.0, max(-1.0, d))
theta = 2 * np.arccos(d) * 180 / np.pi
return theta
def skew(x):
"""
returns skew symmetric matrix from vector
:param x: 3 x 1
:return:
"""
s = np.asarray([[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]])
return s
def dpq_q(p):
"""
returns the jacobian of quaternion product pq w.r.t. q
:param p: 4 x 1
:return: 4 x 4
"""
J = np.zeros((4, 4))
J[0, 0] = p[0]
J[0, 1:] = -p[1:].squeeze()
J[1:, 0] = p[1:].squeeze()
J[1:, 1:] = p[0] * np.eye(3) + skew(p[1:])
return J
def dpsq_q(p):
"""
returns the jacobian of quaternion product (p*)q w.r.t. q
:param p: 4 x 1
:return: 4 x 4
"""
J = np.zeros((4, 4))
J[0, 0] = p[0]
J[0, 1:] = -p[1:].squeeze()
J[1:, 0] = -p[1:].squeeze()
J[1:, 1:] = p[0] * np.eye(3) - skew(p[1:])
return J
def dpsq_p(q):
"""
returns the jacobian of quaternion product (p*)q w.r.t. p
:param q: 4 x 1
:return: 4 x 4
"""
J = np.zeros((4, 4))
J[0, 0] = q[0]
J[0, 1:] = q[1:].squeeze()
J[1:, 0] = q[1:].squeeze()
J[1:, 1:] = -q[0] * np.eye(3) + skew(q[1:])
return J
def dqstq_q(q, t):
"""
jacobian of q* t q w.r.t. q
:param q: 4 x 1
:param t: 3 x 1
:return: 3 x 4
"""
J = np.zeros((3, 4))
J[:, :1] = q[0]*t - np.cross(q[1:], t, axis=0)
J[:, 1:] = -np.dot(t, q[1:].T) + np.dot(t.T, q[1:])*np.eye(3) + \
np.dot(q[1:], t.T) + q[0]*skew(t)
J *= 2
return J
def dqstq_t(q):
"""
jacobian of q* t q w.r.t. t
:param q: 4 x 1
:return: 3 x 3
"""
J = (q[0]*q[0] - np.dot(q[1:].T, q[1:])) * np.eye(3) + 2*np.dot(q[1:], q[1:].T) -\
2*q[0]*skew(q[1:])
return J
def m_rot(x):
"""
returns Jacobian of exponential map w.r.t. manifold increment
:param x: part of state vector affected by increment, 4 x 1
:return: 4 x 3
"""
# jacobian of full q wrt qm (quaternion update on manifold),
# evaluated at qv = (0, 0, 0)
# full q is derived using either the exponential map or q0 = sqrt(1-qm^2)
jm = np.vstack((np.zeros((1, 3)), np.eye(3))) # 4 x 3
m = np.dot(dpq_q(p=x), jm)
return m
class PoseGraph:
def __init__(self):
"""
implements pose graph optimization from
"Hybrid Hessians for Optimization of Pose Graphs" - <NAME> et al
and "A Tutorial on Graph-Based SLAM" - W. Burgard et al
"""
self.N = 0
self.z = np.zeros((0, 0))
def jacobian(self, L_ax, L_aq, L_rx, L_rq):
# 6 because updates for rotation are on manifold
J = np.zeros((0, 6*self.N))
# unary constraints
for i in range(self.N):
# translation constraint
jt = np.zeros((3, J.shape[1]))
jt[:, 6*i: 6*i+3] = np.eye(3)
J = np.vstack((J, np.dot(L_ax, jt)))
# rotation constraint
jr = np.zeros((4, J.shape[1]))
jr[:, 6*i+3: 6*i+6] = m_rot(x=self.z[7*i+3: 7*i+7])
J = np.vstack((J, np.dot(L_aq, jr)))
# pairwise constraints
for i in range(self.N-1):
# translation constraint
jt = np.zeros((3, J.shape[1]))
dt = dqstq_t(q=self.z[7*i+3: 7*i+7])
# dt = np.eye(3)
jt[:, 6*i: 6*i+3] = -dt
jt[:, 6*(i+1): 6*(i+1)+3] = dt
# m = m_rot(x=self.z[7*i+3 : 7*i+7])
# a = dqstq_q(q=self.z[7*i+3 : 7*i+7],
# t=self.z[7*(i+1) : 7*(i+1)+3]-self.z[7*i : 7*i+3])
# jt[:, 6*i+3 : 6*i+6] = np.dot(a, m)
J = np.vstack((J, np.dot(L_rx, jt)))
# rotation constraint
jr = np.zeros((4, J.shape[1]))
m = m_rot(x=self.z[7*i+3: 7*i+7])
a = dpsq_p(q=self.z[7*(i+1)+3: 7*(i+1)+7])
jr[:, 6*i+3: 6*i+6] = np.dot(a, m)
m = m_rot(x=self.z[7*(i+1)+3: 7*(i+1)+7])
b = dpsq_q(p=self.z[7*i+3: 7*i+7])
jr[:, 6*(i+1)+3: 6*(i+1)+6] = np.dot(b, m)
J = np.vstack((J, np.dot(L_rq, jr)))
return J
def residuals(self, poses, vos, L_ax, L_aq, L_rx, L_rq):
"""
computes the residuals
:param poses: N x 7
:param vos: (N-1) x 7
:param L_ax: 3 x 3
:param L_aq: 4 x 4
:param L_rx: 3 x 3
:param L_rq: 4 x 4
:return:
"""
r = np.zeros((0, 1))
# unary residuals
L = np.zeros((7, 7))
L[:3, :3] = L_ax
L[3:, 3:] = L_aq
for i in range(self.N):
rr = self.z[7*i: 7*(i+1)] - np.reshape(poses[i], (-1, 1))
r = np.vstack((r, np.dot(L, rr)))
# pairwise residuals
for i in range(self.N-1):
# translation residual
v = self.z[7*(i+1):7*(i+1)+3, 0]-self.z[7*i:7*i+3, 0]
q = txq.qinverse(self.z[7*i+3:7*i+7, 0])
rt = txq.rotate_vector(v, q)
rt = rt[:, np.newaxis] - vos[i, :3].reshape((-1, 1))
# rt = self.z[7*(i+1) : 7*(i+1)+3] - self.z[7*i : 7*i+3] - \
# vos[i, :3].reshape((-1, 1))
r = np.vstack((r, np.dot(L_rx, rt)))
# rotation residual
q0 = self.z[7*i+3: 7*i+7].squeeze()
q1 = self.z[7*(i+1)+3: 7*(i+1)+7].squeeze()
qvo = txq.qmult(txq.qinverse(q0), q1).reshape((-1, 1))
rq = qvo - vos[i, 3:].reshape((-1, 1))
r = np.vstack((r, np.dot(L_rq, rq)))
return r
def update_on_manifold(self, x):
"""
Updates the state vector on manifold
:param x: manifold increment, column vector
:return:
"""
for i in range(self.N):
# update translation
t = x[6*i: 6*i+3]
self.z[7*i: 7*i+3] += t
# update rotation
qm = x[6*i+3: 6*i+6] # quaternion on the manifold
dq = np.zeros(4)
# method in Burgard paper
# dq[1:] = qm.squeeze()
# dq[0] = math.sqrt(1 - sum(np.square(qm))) # incremental quaternion
# method of exponential map
n = np.linalg.norm(qm)
dq[0] = math.cos(n)
dq[1:] = np.sinc(n/np.pi) * qm.squeeze()
q = self.z[7*i+3: 7*i+7].squeeze()
q = txq.qmult(q, dq).reshape((-1, 1))
self.z[7*i+3: 7*i+7] = q
def optimize(self, poses, vos, sax=1, saq=1, srx=1, srq=1, n_iters=10):
"""
run PGO, with init = poses
:param poses:
:param vos:
:param sax: sigma for absolute translation
:param saq: sigma for absolute rotation
:param srx: sigma for relative translation
:param srq: sigma for relative rotation
:param n_iters:
:return:
"""
self.N = len(poses)
# init state vector with the predicted poses
self.z = np.reshape(poses.copy(), (-1, 1))
# construct the information matrices
L_ax = np.linalg.cholesky(np.eye(3) / sax)
L_aq = np.linalg.cholesky(np.eye(4) / saq)
L_rx = np.linalg.cholesky(np.eye(3) / srx)
L_rq = np.linalg.cholesky(np.eye(4) / srq)
for n_iter in range(n_iters):
J = self.jacobian(L_ax.T, L_aq.T, L_rx.T, L_rq.T)
r = self.residuals(poses.copy(), vos.copy(), L_ax.T, L_aq.T, L_rx.T,
L_rq.T)
H = np.dot(J.T, J) # hessian
b = np.dot(J.T, r) # residuals
# solve Hx = -b for x
R = slin.cholesky(H) # H = R' | |
import os, sys
parentPath = os.path.abspath("..")
if parentPath not in sys.path:
sys.path.insert(0, parentPath)
from model.title import extractTitle
from converter.pdfextractor import PDFContainer
from model.location import Location
from config.dictionary import refWords, Coves, Seas, Bays, Islands
from model.keywords import KeywordExtractor
from model.reference import ExtracrReference
from model.ner import NERExtractor
from converter.dataContainer import DataPerson, DataLocation, DataKeyword, DataRef
from converter.load import insertData, insertDataFromFile
import datetime
import string
import random
import nltk.data
import codecs
import re
class Extractor:
def __init__(self, config):
self.pStart = -1
self.pEnd = -1
self.metaTitle = False
self.metaContent = False
self.metaName = False
self.metaLocation = False
self.metaKeyWord = False
self.metaRef = False
self.metaOrg = False
self.metaMisc = False
self.metaAll = False
self.config = config
self.INFilename = 'in.pdf'
self.OUTFilename = 'out.txt'
self.pdf = None
self.title = ''
self.origin = ''
self.descriptAbstract = ''
self.descriptPurpose = ''
self.descriptSupplemental = ''
self.dateBegin = ''
self.dateEnd = ''
self.statusProgress = ''
self.statusUpdate = ''
self.access = ''
#self.names = set()
#self.locations = set()
#self.keys = []
self.typeOut = "txt"
self.refs = []
self.contact = DataPerson("")
self.namesData = []
self.keywordsData = []
self.keywordsLocData = []
self.locationsData = []
self.contact = DataPerson('')
self.genUUID = True
self.uuid = ""
self.miscData = []
self.orgData = []
self.locData = []
def reinit():
self.namesData = []
self.keywordsData = []
self.locationsData = []
def addName(self):
self.namesData.append(DataPerson(''))
def addKeyword(self):
self.keywordsData.append(DataKeyword(''))
def addKeywordLoc(self):
self.keywordsLocData.append(DataKeyword(''))
def addLocation(self):
self.locationsData.append(DataLocation(''))
def addReference(self):
self.refs.append(DataRef(''))
def delName(self, id):
self.namesData[id-1:id] = []
def delKeyword(self, id):
self.keywordsData[id-1:id] = []
def delKeywordLoc(self, id):
self.keywordsLocData[id-1:id] = []
def delLocation(self, id):
self.locationsData[id-1:id] = []
def delReference(self, id):
self.locationsData[id-1:id] = []
def extractRange(self):
self.typeOut = "txt"
self.pdf = PDFContainer(format=self.config.outPDFFormat, codec=self.config.fileCodec)
if self.pdf.format == "filter":
self.pdf.convertPDFFilter(self.INFilename)
else:
self.pdf.convertPDFAlternative(self.INFilename)
self.tokenizer = nltk.data.load(self.config.sentencesSplitterModel)
self.extractorNer = NERExtractor(self.config)
self.extractorLoc = Location(self.config.minTanimoto)
if self.pEnd == -1:
self.pEnd = self.pStart
# extract ner
txt = self.pdf.getPages(self.pStart, self.pEnd)
sents = txt.split('\n') # tokenizer.tokenize(txt)
if self.metaName or self.metaAll:
names = self.extractTags(sents, ["I-PER", "B-PER"])
for s in names:
self.namesData.append(s)
if self.metaLocation or self.metaAll:
loc = self.extractTags(sents, ["I-LOC", "B-LOC"])
for s in loc:
self.locData.append(s)
if self.metaOrg or self.metaAll:
org = self.extractTags(sents, ["I-ORG", "B-ORG"])
for s in org:
self.orgData.append(s)
if self.metaMisc or self.metaAll:
misc = self.extractTags(sents, ["I-MISC", "B-MISC"])
for s in misc:
self.miscData.append(s)
if self.metaLocation or self.metaAll:
# extract locations with coords
sents = self.tokenizer.tokenize(txt)
self.extractLocation(sents)
if self.metaKeyWord or self.metaAll:
# extract key words
self.extractKeyWords(txt)
if self.metaRef or self.metaAll:
# extract refs
self.extractRefs(txt)
#SAVE
print(self.OUTFilename+' - OUT_FILE')
res = ""
if self.metaName or self.metaAll:
res += 'NAMES\n'
for s in self.namesData:
res += s + '\n'
if self.metaLocation or self.metaAll:
res += 'LOCATIONS\n'
for s in self.locationsData:
res += s.genText()+'\n'
res += 'OTHER LOCATIONS\n'
for s in self.locData:
res += s + '\n'
if self.metaOrg or self.metaAll:
res += 'ORGANISATION\n'
for s in self.orgData:
res += s + '\n'
if self.metaMisc or self.metaAll:
res += 'MISC\n'
for s in self.miscData:
res += s + '\n'
if self.metaKeyWord or self.metaAll:
res += 'KEY WORDS\n'
i = 0
for s in self.keywordsData:
kp = s.genText()
if i >= self.config.countKeyPhrases:
break
if len(kp.split()) > self.config.maxKeyPhraseLength or len(kp) < 4:
continue
res += kp+'\n'
i += 1
for s in self.keywordsLocData:
kp = s.genText()
res += kp+'\n'
if self.metaRef or self.metaAll:
res += 'REFS\n'
for s in self.refs:
res += s.genText()+'\n'
self.saveFile(self.OUTFilename, res)
def extract(self):
self.pdf = PDFContainer(format=self.config.outPDFFormat, codec=self.config.fileCodec)
if self.pdf.format == "filter":
self.pdf.convertPDFFilter(self.INFilename)
else:
self.pdf.convertPDFAlternative(self.INFilename)
self.tokenizer = nltk.data.load(self.config.sentencesSplitterModel)
self.extractorNer = NERExtractor(self.config)
self.extractorLoc = Location(self.config.minTanimoto)
# extract title
txt = self.pdf.getPages(0, 3)
self.extractTitle(txt)
# extract names
txt = self.pdf.getPages(0, 10)
sents = txt.split('\n') # tokenizer.tokenize(txt)
self.extractName(sents)
# extract locations with coords
txt = self.pdf.getAllPages()
sents = self.tokenizer.tokenize(txt)
self.extractLocation(sents)
# extract key words
self.extractKeyWords(txt)
# extract refs
self.extractRefs(txt)
def save(self):
print(self.typeOut)
if self.typeOut == 'txt':
text = self.saveToTXT()
self.saveFile(self.OUTFilename, text)
elif self.typeOut == 'iso19115v2':
text = self.saveToISO19115v2()
self.saveFile(self.OUTFilename, text)
elif self.typeOut == 'fgdc':
text = self.saveToFGDC()
self.saveFile(self.OUTFilename, text)
elif self.typeOut == 'dublin':
self.saveToDublin()
def load(self):
print(self.typeOut)
if self.typeOut == 'iso19115v2':
text = self.saveToISO19115v2()
code, ans = insertData(self.config.protocol, self.config.url, self.config.user, self.config.passwd, text)
elif self.typeOut == 'fgdc':
text = self.saveToFGDC()
code, ans = insertData(self.config.protocol, self.config.url, self.config.user, self.config.passwd, text)
return code
def loadFromFile(self, infile):
print(infile)
code, ans = insertDataFromFile(self.config.protocol, self.config.url, self.config.user, self.config.passwd, infile)
return code
def extractRefs(self, txt):
extr = ExtracrReference(txt)
_refs = extr.extract()
for r in _refs:
self.refs.append(DataRef(r))
def extractTitle(self, txt):
self.title = extractTitle(txt)
_d = re.search(r'[0-9]{4}', self.title)
if _d is None:
_date = ""
else:
_date = _d.group(0)
self.dateBegin = _date
self.dateEnd = "present"
if self.genUUID:
self.uuid = self.genIdentifier()
def extractName(self, sentences):
names = self.extractTags(sentences, ["I-PER", "B-PER"])
_names = []
for s in names:
res = re.search(r'[/0-9()]|(University)|(Database)|(Ecology)|(No\.)', s, re.IGNORECASE|re.UNICODE)
if res is None:
_names.append(s)
for s in _names:
self.namesData.append(DataPerson(s))
def extractTags(self, sentences, tags):
names = set()
for sentence in sentences:
wordsRaw, preds = self.extractorNer.extractFromSentence(sentence)
test = False
res = ''
for i, w in enumerate(wordsRaw):
#STXTfile.write(w + ' - ' + preds[i] + '\n')
if preds[i] in tags: #preds[i] == "I-PER" or preds[i] == "B-PER":
#if i > 0 and (preds[i-1] == "I-LOC" or preds[i-1] == "B-LOC" or preds[i-1] == "I-ORG" or preds[i-1] == "B-ORG"):
# res += wordsRaw[i-1] + ' '
res += w + ' '
continue
else:
if res != '':
res = res.strip()
ress = res.split(',')
for r in ress:
r = r.strip()
if len(r.split(' ')) > 1:
names.add(r.strip())
res = ""
if res != '':
res = res.strip()
ress = res.split(',')
for r in ress:
r = r.strip()
if len(r.split(' ')) > 1:
names.add(r.strip())
return names
def extractLocation(self, sents):
locmap = {}
locmap.update(Coves)
locmap.update(Seas)
locmap.update(Bays)
locmap.update(Islands)
candidates = []
for s in sents:
ls = s.lower()
for w in refWords:
if ls.find(w) != -1:
candidates.append(s)
break
locations = set()
for sentence in candidates:
wordsRaw, preds = self.extractorNer.extractFromSentence(sentence)
res = ""
for i, w in enumerate(wordsRaw):
if preds[i] == "I-LOC" or preds[i] == "B-LOC":
res += w + ' '# + ' {' + preds[i] + '} '
continue
else:
if w == 'of' and res != '':
res += w + ' '
continue
res = res.strip()
if res != '':
# get coords Coves, Seas, Bays, Islands
for key in locmap:
if self.extractorLoc.isFuzzyEqual(res, key, 3):
r = key + '+' + ''.join(str(x)+'+' for x in locmap[key]) + '\n'
locations.add(r)
break
res = ''
for s in locations:
self.locationsData.append(DataLocation(s))
for s in locations:
kw = s.split('+')[0]
self.keywordsLocData.append(DataKeyword(kw))
def extractKeyWords(self, txt):
stopwords = []
with open(self.config.stopWords, encoding=self.config.fileCodec) as f:
for line in f:
stopwords.append(line[:len(line)-1])
ke = KeywordExtractor(stopwords=stopwords, punctuations=self.config.punctuations)
ke.extractKeyWords(txt)
ans = ke.getRankedPhrases()
keys = []
#STXTfile = codecs.open("regex.txt", "w", "utf-8")
for w in ans:
if len(keys) >= self.config.countKeyPhrases:
break
if len(w.split()) > self.config.maxKeyPhraseLength or len(w) < 4:
continue
if re.search('[\[\]\+\*]', w, re.IGNORECASE|re.UNICODE):
continue
_w = re.search(w, txt, re.IGNORECASE|re.UNICODE)
if _w:
keys.append(_w.group(0))
#STXTfile.close()
for k in keys:
self.keywordsData.append(DataKeyword(k))
def genIdentifier(self):
res = ''
for i in range(0, 8):
res += random.choice(string.ascii_letters+"0123456789")
res += "-"
for i in range(0, 4):
res += random.choice(string.ascii_letters+"0123456789")
res += "-"
for i in range(0, 4):
res += random.choice(string.ascii_letters+"0123456789")
res += "-"
for i in range(0, 4):
res += random.choice(string.ascii_letters+"0123456789")
res += "-"
for i in range(0, 12):
res += random.choice(string.ascii_letters+"0123456789")
return res
def saveToISO19115v2(self):
print(self.OUTFilename+' - OUT_FILE')
year = datetime.date.today().year
month = datetime.date.today().month
day = datetime.date.today().day
#<gmi:MI_Metadata xmlns:gmi="http://www.isotc211.org/2005/gmi" xmlns:gmd="http://www.isotc211.org/2005/gmd" xmlns:gco="http://www.isotc211.org/2005/gco" xmlns:gml="http://www.opengis.net/gml/3.2" xmlns:gsr="http://www.isotc211.org/2005/gsr" xmlns:gss="http://www.isotc211.org/2005/gss" xmlns:gst="http://www.isotc211.org/2005/gst" xmlns:gmx="http://www.isotc211.org/2005/gmx" xmlns:gfc="http://www.isotc211.org/2005/gfc" xmlns:srv="http://www.isotc211.org/2005/srv" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.isotc211.org/2005/gmi ftp://ftp.ncddc.noaa.gov/pub/Metadata/Online_ISO_Training/Intro_to_ISO/schemas/ISObio/schema.xsd">
res = """
<gmd:MD_Metadata xmlns:gmd="http://www.isotc211.org/2005/gmd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:gco="http://www.isotc211.org/2005/gco"
xmlns:gml="http://www.opengis.net/gml"
xsi:schemaLocation="http://www.isotc211.org/2005/gmd ../schema.xsd">
<fileIdentifier>
<gco:CharacterString>"""+self.uuid+"""</gco:CharacterString>
</fileIdentifier>
<gmd:language>
<gco:CharacterString>eng</gco:CharacterString>
</gmd:language>
<gmd:characterSet>
<gmd:MD_CharacterSetCode codeListValue="utf8"
codeList="http://standards.iso.org/ittf/PubliclyAvailableStandards/ISO_19139_Schemas/resources/codelist/ML_gmxCodelists.xml#MD_CharacterSetCode"/>
</gmd:characterSet>
<gmd:hierarchyLevel>
gmd:<MD_ScopeCode codeList="http://www.isotc211.org/2005/resources/codeList.xml#MD_ScopeCode"
codeListValue="dataset"/>
</gmd:hierarchyLevel>
<gmd:hierarchyLevelName>
<gco:CharacterString>dataset</gco:CharacterString>
</gmd:hierarchyLevelName>
<gmd:dateStamp>
<gco:DateTime>""" + str(year) + '-'+ str(month) + '-' + str(day) + """</gco:DateTime>
</gmd:dateStamp>
<gmd:metadataStandardName>
<gco:CharacterString>ISO 19115:2003/19139</gco:CharacterString>
</gmd:metadataStandardName>
<gmd:metadataStandardVersion>
<gco:CharacterString>1.0</gco:CharacterString>
</gmd:metadataStandardVersion>
"""
#res += """
#<gmd:abstract>
# <gco:CharacterString>""" + self.descriptAbstract + """</gco:CharacterString>
#</gmd:abstract>
#"""
# Contact
res += "<gmd:contact>"
res += self.contact.genISO115v2()
res += "</gmd:contact>"
if self.metaTitle or self.metaAll:
_d = re.search(r'[0-9]{4}', self.title)
if _d is None:
_date = ""
else:
_date = _d.group(0)
_title = self.title.replace('\n', ' ')
res += """
<gmd:identificationInfo>
<gmd:MD_DataIdentification>
<gmd:citation>
<gmd:CI_Citation>
<gmd:title>
<gco:CharacterString>""" + _title + """</gco:CharacterString>
</gmd:title>
<gmd:date>
<gmd:CI_Date>
<gmd:date>
<gco:DateTime>""" + _date + """</gco:DateTime>
</gmd:date>
</gmd:CI_Date>
</gmd:date>
</gmd:CI_Citation>
</gmd:citation>
</gmd:MD_DataIdentification>
"""
res += """
<abstract>
<gco:CharacterString>"""+self.descriptAbstract+"""</gco:CharacterString>
</abstract>
<purpose>
<gco:CharacterString>"""+self.descriptPurpose+"""</gco:CharacterString>
</purpose>
<status>
<MD_ProgressCode
codeList="http://www.isotc211.org/2005/resources/codeList.xml#MD_ProgressCode"
codeListValue="""+'"'+self.statusProgress+'"'+"""/>
</status>
"""
#if self.metaContent or self.metaAll:
# STXTfile.write('CONTENT\n')
# for t in self.pdf.getTitles():
# STXTfile.write(t+'\n')
if self.metaName or self.metaAll:
res += "<gmd:pointOfContact>"
for s in self.namesData:
res += s.genISO115v2()
res += "</gmd:pointOfContact>"
if self.metaLocation or self.metaAll:
res += """
<gmd:extent>
<gmd:EX_Extent>
<gmd:description>
<gco:CharacterString>Spatial extent for locations</gco:CharacterString>
</gmd:description>
"""
for s in self.locationsData:
res += s.genISO115v2()
res += """
</gmd:EX_Extent>
</gmd:extent>
"""
if self.metaKeyWord or self.metaAll:
kwTypes = {}
locTypes = {}
for s in self.keywordsData:
if s.type in kwTypes:
if s.keyword != "":
kwTypes[s.type].append(s)
else:
if s.keyword != "":
kwTypes[s.type] = []
kwTypes[s.type].append(s)
for s in self.keywordsLocData:
if s.type in locTypes:
if s.keyword != "":
locTypes[s.type].append(s)
else:
if s.keyword != "":
locTypes[s.type] = []
locTypes[s.type].append(s)
res += """
<gmd:descriptiveKeywords>
"""
for key in kwTypes.keys():
res += "<gmd:MD_Keywords>\n"
#res += "<themekt>" + key + "</themekt>\n"
for val in kwTypes[key]:
res += val.genISO115v2()
res += """
<type>
<MD_KeywordTypeCode
codeList="http://metadata.dgiwg.org/codelistRegistry?MD_KeywordTypeCode"
codeListValue="""+'"'+ key +'"'+"""/>
</type>
"""
res += "</gmd:MD_Keywords>\n"
for key in locTypes.keys():
res += "<gmd:MD_Keywords>\n"
#res += "<themekt>" + key + "</themekt>\n"
for val in locTypes[key]:
res += val.genISO115v2()
res += """
<type>
<MD_KeywordTypeCode
codeList="http://metadata.dgiwg.org/codelistRegistry?MD_KeywordTypeCode"
codeListValue="""+'"'+ key +'"'+"""/>
</type>
"""
res += "</gmd:MD_Keywords>\n"
#for s in self.keywordsData:
# res += s.genISO115v2()
#for s in self.keywordsLocData:
# res += s.genISO115v2()
res += """
</gmd:descriptiveKeywords>
"""
#if self.metaRef or self.metaAll:
# res += "<gmd:citation>\n"
# for s in self.refs:
# res += s.genISO115v2()
# res += "</gmd:citation>\n"
res += "</gmd:identificationInfo>\n</gmd:MD_Metadata>\n"
#STXTfile = codecs.open(self.OUTFilename, "w", self.config.fileCodec)
#STXTfile.write(res)
#STXTfile.close()
return res
#self.saveFile(self.OUTFilename, res)
def saveFile(self, fname, text):
STXTfile = codecs.open(fname, "w", self.config.fileCodec)
STXTfile.write(text)
STXTfile.close()
def saveToTXT(self):
print(self.OUTFilename+' - OUT_FILE')
res = ""
if self.metaTitle or self.metaAll:
res += 'TITLE\n'
res += self.title+'\n'
if self.metaContent or self.metaAll:
res += 'CONTENT\n'
if self.pdf != None:
for t in self.pdf.getTitles():
res += t+'\n'
if self.metaName or self.metaAll:
res += 'NAMES\n'
for s in self.namesData:
res += s.genText()+'\n'
if self.metaLocation or self.metaAll:
res += 'LOCATIONS\n'
for s in self.locationsData:
res += s.genText()+'\n'
if self.metaKeyWord or self.metaAll:
res += 'KEY WORDS\n'
i = 0
for s in self.keywordsData:
kp = s.genText()
if i >= self.config.countKeyPhrases:
break
if len(kp.split()) > self.config.maxKeyPhraseLength or len(kp) < 4:
continue
res += kp+'\n'
i += 1
for s in self.keywordsLocData:
kp = s.genText()
res += kp+'\n'
if self.metaRef or self.metaAll:
res += 'REFS\n'
for s in self.refs:
res += s.genText()+'\n'
#self.saveFile(self.OUTFilename, res)
return res
def saveToFGDC(self):
print(self.OUTFilename+' - OUT_FILE')
#STXTfile = codecs.open(self.OUTFilename, "w", self.config.fileCodec)
year = datetime.date.today().year
month = datetime.date.today().month
day = datetime.date.today().day
res = """
<?xml version="1.0" encoding="UTF-8"?>
<metadata xmlns:geonet="http://www.fao.org/geonetwork" xmlns:csw="http://www.opengis.net/cat/csw/2.0.2">
<idinfo>\n"""
#if self.metaRef or self.metaAll:
# res += "<citation>\n"
# for s in self.refs:
# res += s.genFGDC()
# res += "</citation>\n"
res += "<citation>\n"
_title = self.title.replace('\n', ' ')
res += """
<citeinfo>
<origin>"""+self.origin+"""</origin>
<pubdate>"""+self.dateBegin+"""</pubdate>
<title>""" + _title + """</title>
<onlink></onlink>
</citeinfo>\n"""
res += "</citation>\n"
if self.metaTitle or self.metaAll:
res += """
<descript>
<abstract>"""+self.descriptAbstract+"""</abstract>
<purpose>"""+self.descriptPurpose+"""</purpose>
<supplinf>"""+self.descriptSupplemental+"""</supplinf>
</descript>
"""
## NEED UPDATE
res += """
<timeperd>
<timeinfo>
<rngdates>
<begdate>""" + self.dateBegin + """</begdate>
<enddate>""" + self.dateEnd + """</enddate>
</rngdates>
</timeinfo>
<current>ground condition</current>
</timeperd>
<status>
<progress>""" + self.statusProgress + """</progress>
<update>""" + self.statusUpdate + """</update>
</status>
<accconst>""" + self.access + """</accconst>
<useconst>
Data not completely processed; some data experimental.
</useconst>
"""
#if self.metaContent or self.metaAll:
# STXTfile.write('CONTENT\n')
# for t in self.pdf.getTitles():
# STXTfile.write(t+'\n')
if self.metaLocation or self.metaAll:
res += """
<spdom>
"""
for s in self.locationsData:
res += s.genFGDC()
res += """
</spdom>
"""
if self.metaKeyWord or self.metaAll:
res += """
<keywords>
"""
kwTypes = {}
locTypes = {}
for s in self.keywordsData:
if s.type in kwTypes:
if s.keyword != "":
kwTypes[s.type].append(s)
else:
if s.keyword != "":
kwTypes[s.type] = []
kwTypes[s.type].append(s)
for s in self.keywordsLocData:
if s.type in locTypes:
if s.keyword != "":
locTypes[s.type].append(s)
else:
if s.keyword != "":
locTypes[s.type] = []
locTypes[s.type].append(s)
for key in kwTypes.keys():
res += "<theme>\n"
res += "<themekt>" + key + "</themekt>\n"
for val in kwTypes[key]:
res | |
#!/usr/bin/env python3
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2012 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Author: <NAME>
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import isce
from ctypes import cdll
import numpy as np
import os
import sys
from isce import logging
import math
import urllib.request, urllib.parse, urllib.error
from iscesys.Component.Component import Component
from contrib.demUtils.DemStitcher import DemStitcher
from isceobj.Image import createImage
#Parameters definitions
URL = Component.Parameter('_url',
public_name = 'URL',default = 'http://e4ftl01.cr.usgs.gov/SRTM/SRTMSWBD.003/2000.02.11',
type = str,
mandatory = False,
doc = "Url for the high resolution water body mask")
KEEP_WBDS = Component.Parameter('_keepWbds',
public_name='keepWbds',
default = False,
type = bool,
mandatory = False,
doc = "If the option is present then the single files used for stitching are kept.\n" + \
"If 'useLocalDirectory' is set then this flag is forced to True to avoid\n" + \
"accidental deletion of files (default: False)'")
## This class provides a set of convenience method to retrieve and possibly combine different DEMs from the USGS server.
# \c NOTE: the latitudes and the longitudes that describe the DEMs refer to the bottom left corner of the image.
class SWBDStitcher(DemStitcher):
def getUnzippedName(self,name,source = None):
name = name.replace('.' + self._extraExt,'')
return name.replace(self._zip,'')
##
# Given a latitude and longitude in degrees it returns the expected filename.
# @param lat \c int latitude in the range (-90,90). Actual data are restricted to (-60,60) or so.
# @param lon \c int longitude in the range [-180,180) or [0,360).
# @return \c string the filename for that location
def createFilename(self,lat,lon,source = None):
if lon > 180:
lon = -(360 - lon)
else:
lon = lon
ns,ew = self.convertCoordinateToString(lat,lon)
toAppend = '.' + self._extraExt
return ns + ew + toAppend + self._extension + self._zip
def defaultName(self,snwe):
latMin = math.floor(snwe[0])
latMax = math.ceil(snwe[1])
lonMin = math.floor(snwe[2])
lonMax = math.ceil(snwe[3])
nsMin,ewMin = self.convertCoordinateToString(latMin, lonMin)
nsMax,ewMax = self.convertCoordinateToString(latMax, lonMax)
swbdName = (
'swbdLat_' + nsMin + '_' +nsMax +
'_Lon_' + ewMin +
'_' + ewMax + '.wbd'
)
return swbdName
@staticmethod
def toRadar(maskin,latin,lonin,output):
maskim = createImage()
maskim.load(maskin + '.xml')
latim = createImage()
latim.load(latin + '.xml')
lonim = createImage()
lonim.load(lonin + '.xml')
mask = np.fromfile(maskin,maskim.toNumpyDataType())
lat = np.fromfile(latin,latim.toNumpyDataType())
lon = np.fromfile(lonin,lonim.toNumpyDataType())
mask = np.reshape(mask,[maskim.coord2.coordSize,maskim.coord1.coordSize])
startLat = maskim.coord2.coordStart
deltaLat = maskim.coord2.coordDelta
startLon = maskim.coord1.coordStart
deltaLon = maskim.coord1.coordDelta
#remember mask starts from top left corner
#deltaLat < 0
lati = np.clip(((lat - startLat)/deltaLat).astype(np.int), 0, mask.shape[0]-1)
loni = np.clip(((lon - startLon)/deltaLon).astype(np.int), 0, mask.shape[1]-1)
cropped = (mask[lati,loni] + 1).astype(maskim.toNumpyDataType())
cropped = np.reshape(cropped,(latim.coord2.coordSize,latim.coord1.coordSize))
cropped.tofile(output)
croppedim = createImage()
croppedim.initImage(output,'read',cropped.shape[1],maskim.dataType)
croppedim.renderHdr()
def createImage(self,lat,lon,source,outname):
image = createImage()
delta = 1/3600.0
try:
os.makedirs(self._downloadDir)
except:
#dir already exists
pass
width = self.getDemWidth(lon,1)
image.initImage(outname,'read',width,'BYTE')
length = image.getLength()
dictProp = {'METADATA_LOCATION':outname+'.xml','Coordinate1':{'size':width,'startingValue':min(lon[0],lon[1]),'delta':delta},'Coordinate2':{'size':length,'startingValue':max(lat[0],lat[1]),'delta':-delta},'FILE_NAME':outname}
#no need to pass the dictionaryOfFacilities since init will use the default one
image.init(dictProp)
self._image = image
return image
##
# Given a list of filenames it fetches the corresponding
# compressed (zip format) DEMs.
# @param source \c int the type of DEM. source = 1 for 1 arcsec resolution data,
# source = 3 for 3 arcsec resolution data.
# @param listFile \c list of the filenames to be retrieved.
# @param downloadDir \c string the directory where the DEMs are downloaded.
# If the directory does not exists it will be created. If the argument is not
# provided then the files are downloaded in the location defined by the
# self._downloadDir that is defaulted to the current directory.
# @param region \c list \c strings regions where to look for the files. It must
# have the same length of \c listFile. If not provided the files are searched by
# scanning the content of each region. Use method getRegionList to get the list of
# possible regions for a given source. Set region only if sure that all the requested
# file are contained in it.
def getDems(self,source,listFile,downloadDir = None,region = None):
if downloadDir is None:
downloadDir = self._downloadDir
else:
self._downloadDir = downloadDir
if not (downloadDir) is None:
try:
os.makedirs(downloadDir)
except:
#dir already exists
pass
for fileNow in listFile:
url = self.getFullHttp(source)
opener = urllib.request.URLopener()
try:
if not os.path.exists(os.path.join(downloadDir,fileNow)):
if(self._un is None or self._pw is None):
#opener.retrieve(url + fileNow,os.path.join(downloadDir,fileNow))
if os.path.exists(os.path.join(os.environ['HOME'],'.netrc')):
command = 'curl -n -L -c $HOME/.earthdatacookie -b $HOME/.earthdatacookie -k -f -O ' + os.path.join(url,fileNow)
else:
self.logger.error('Please create a .netrc file in your home directory containing\nmachine urs.earthdata.nasa.gov\n\tlogin yourusername\n\tpassword yourpassword')
sys.exit(1)
else:
command = 'curl -k -f -u ' + self._un + ':' + self._pw + ' -O ' + os.path.join(url,fileNow)
# curl with -O download in working dir, so save current, move to donwloadDir
# nd get back once download is finished
cwd = os.getcwd()
os.chdir(downloadDir)
print(command)
if os.system(command):
os.chdir(cwd)
raise Exception
os.chdir(cwd)
self._downloadReport[fileNow] = self._succeded
except Exception as e:
self.logger.warning('There was a problem in retrieving the file %s. Exception %s'%(os.path.join(url,fileNow),str(e)))
self._downloadReport[fileNow] = self._failed
def stitchWbd(self,lat,lon,outname, downloadDir = None, keep = None):
if downloadDir is None:
downloadDir = self._downloadDir
else:
self._downloadDir = downloadDir
tileSize = 3600
source = 1
listNames,nLat,nLon = self.getDemsInBox(lat,lon,source,downloadDir)
unzip = True
#keep track of the synthetic ones since they don't need to be unzipped
syntheticTiles = []
if self._noFilling:
#make sure that we have all the file to cover the region. check if some download failed
for k,v in self._downloadReport.items():
if v == self._failed:
unzip = False
#clean up the dowloaded files if it failed since when trying a second source it might endup
#stitching them together beacaiuse it does not re-download the ones present and unfortunately
#the dems with different resolution have the same name convention
if not self._keepAfterFailed:
os.system("rm -rf " + downloadDir + "/*.raw*")
break
else:
syntTileCreated = False
#check and send a warning if the full region is not available
if not self._succeded in self._downloadReport.values():
self.logger.warning('The full region of interested is not available. Missing region is assumed to be land')
for k,v in self._downloadReport.items():
if v == self._failed:#symlink each missing file to the reference one created in createFillingFile
if not syntTileCreated:#create the synthetic Tile the first time around
#get the abs path otherwise the symlink doesn't work
syntTileCreated = True
syntheticTiles.append(k)
if unzip:
mmap = np.memmap(outname,np.int8,'w+',shape=(nLat*tileSize,nLon*tileSize))
mmap[:,:] = 0
decompressedList = []
pos = 0
for i in range(nLat):
for j in range(nLon):
name = listNames[pos]
if name in syntheticTiles:#synthetic tiles don't need to be decompressed
pos += 1
continue
self.decompress(name,downloadDir,keep)
newName = self.getUnzippedName(name,source)
if downloadDir:
newName = os.path.join(downloadDir,newName)
decompressedList.append(bytes(newName, 'utf-8'))
data = np.reshape(np.fromfile(newName,np.int8),(3601,3601))
mmap[i*tileSize:(i+1)*tileSize,j*tileSize:(j+1)*tileSize] = data[:-1,:-1]
pos += 1
if not self._keepWbds:
for f in decompressedList:
os.remove(f)
if self._createXmlMetadata:
self.createXmlMetadata(lat,lon,source,outname)
return unzip #if False it means that failed
#still need to call it since the initialization calls the _url so the setter of
#url does not get called
def _configure(self):
pass
def _facilities(self):
super(DemStitcher,self)._facilities()
def __setstate__(self,d):
self.__dict__.update(d)
self.logger = logging.getLogger('isce.contrib.demUtils.SWBDStitcher')
return
def getWbdsInBox(self,lat,lon,downloadDir=None):
self.getDemsInBox(lat,lon,1,downloadDir)
def updateParameters(self):
self.extendParameterList(DemStitcher,SWBDStitcher)
super(SWBDStitcher,self).updateParameters()
#use this logic so the right http is returned
def getFullHttp(self,source):
return self._url
parameter_list = (
URL,
KEEP_WBDS
)
family = 'swbdstitcher'
def __init__(self,family = '', name = | |
<gh_stars>1-10
# coding=utf-8
from __future__ import absolute_import
import math
import tempfile
import webbrowser
from sys import platform
try:
from Tkinter import BooleanVar, OptionMenu, Frame, Radiobutton, StringVar, Label, Button, Text, END, Toplevel, WORD, \
Scrollbar, LEFT, RIGHT, Y
from tkFileDialog import askopenfilename, asksaveasfilename
from tkFont import Font
from tkMessageBox import showerror
except ImportError:
from tkinter import BooleanVar, OptionMenu, Frame, Radiobutton, StringVar, Label, Button, Text, END, Toplevel, WORD, \
Scrollbar, LEFT, RIGHT, Y
from tkinter.filedialog import askopenfilename, asksaveasfilename
from tkinter.font import Font
from tkinter.messagebox import showerror
help_2 = """<html>
<title>Help</title>
<h1 align="left" style="color: Black">QuickStart</h1>
<h2 align="left" style="color: Black">Option Menu(Gromacs or NAMD)</h2>
<h3 align="left" style="color: Black"> Gromacs topology file</h4>
If you use Gromacs package, click on this button.
After that choose your topology file(.top)
<h3 align="left" style="color: Black"> NAMD Colvars</h4>
If you use NAMD package, click on this button.
After that choose your restraints file(.in)
<h3 align="left" style="color: Black"> Preview</h4>
<body>
Preview an additional section in the topology file.
</body>
<h3 align="left" style="color: Black"> Write</h4>
<body>
After selecting the MD platform and topology file writes all the indices of the atoms, <br>
force constants, distances, angles and dihedral angles into it,<br>
where A is λ<sub>restr</sub> = 0 and B is λ<sub>restr</sub> = 1. <br>
The bonded-lambdas vector was interpolated <br>
between the force constant (and equilibrium posi- tions) in state A and B.
</body>
<h2 align="left" style="color: Black">Unit Button (kJ or kCal)</h2>
<body>
You can choose the desired unit of measurement,<br>
depending on what you need to output.</body>
<h2 align="left" style="color: Black">Exit</h2>
<body>
Exit the program.
</body>
<h2 align="left" style="color: Black">Free Energy</h2>
<body>
Here you can see value of ΔG.<br>
If you need some more info, see documentation.
</body>
</html>
"""
def kJ_to_kCal(E):
return E / 4.1868
def calc_dG(T, r_aA, th_a, th_A, K_r_aA, K_th_a, K_th_A, K_phi_ba, K_phi_aA, K_phi_AB):
"""BORESCH FORMULA - Calculate dG restraints off"""
R = 8.314472 * 0.001 # Gas constant in kJ/mol/K
V = 1.66 # standard volume in nm^3
th_a = math.radians(th_a) # convert angle from degrees to radians --> math.sin() wants radians
th_A = math.radians(th_A) # convert angle from degrees to radians --> math.sin() wants radians
dG = - R * T * math.log(
(8.0 * math.pi ** 2.0 * V) / (r_aA ** 2.0 * math.sin(th_a) * math.sin(th_A))
*
(
((K_r_aA * K_th_a * K_th_A * K_phi_ba * K_phi_aA * K_phi_AB) ** 0.5) / ((2.0 * math.pi * R * T) ** 3.0)
)
)
return dG
class SrollViewer(Toplevel):
def __init__(self, master, title, text):
Toplevel.__init__(self, master=master)
self.title(title)
fra = Frame(self)
fra.grid(row=0, column=0, pady=5, padx=5)
self.tx = Text(fra, width=130, height=20, wrap=WORD)
scr = Scrollbar(fra, command=self.tx.yview)
self.tx.configure(yscrollcommand=scr.set)
self.tx.pack(side=LEFT)
scr.pack(side=RIGHT, fill=Y)
self.tx.bind('<Enter>', lambda e: self._bound_to_mousewheel(e, self.tx))
self.tx.bind('<Leave>', self._unbound_to_mousewheel)
self.tx.insert(END, text)
self.tx.configure(state='disabled')
if platform == "darwin":
button_font = Font(family='Arial', size=15)
else:
button_font = Font(font=Button()["font"])
closeTopolPrevB = Button(self, text='Exit', bg='red', command=self.destroy, font=button_font)
closeTopolPrevB.grid(row=1, column=0, pady=5)
def _bound_to_mousewheel(self, event, tx):
_ = event
self.bind_all('<MouseWheel>', lambda e: self._on_mousewheel(e, tx))
self.bind_all('<Button-4>', lambda e: self._on_mousewheel(e, tx))
self.bind_all('<Button-5>', lambda e: self._on_mousewheel(e, tx))
self.bind_all('<Up>', lambda e: self._on_mousewheel(e, tx))
self.bind_all('<Down>', lambda e: self._on_mousewheel(e, tx))
def _unbound_to_mousewheel(self, event):
_ = event
self.unbind_all('<MouseWheel>')
self.unbind_all('<Button-4>')
self.unbind_all('<Button-5>')
self.unbind_all('<Up>')
self.unbind_all('<Down>')
@staticmethod
def _on_mousewheel(event, tx):
if event.num == 4 or event.keysym == 'Up':
tx.yview_scroll(-1, 'units')
elif event.num == 5 or event.keysym == 'Down':
tx.yview_scroll(1, 'units')
else:
tx.yview_scroll(int(-1 * (event.delta / 120)), 'units')
class Output(object):
def __init__(self, main, bondForceParams, atoms_def):
self.bondForceParams = bondForceParams
self.dG_off_kJ = calc_dG(
bondForceParams['T'],
bondForceParams['r_aA'],
bondForceParams['th_a'],
bondForceParams['th_A'],
bondForceParams['K_r_aA'],
bondForceParams['K_th_a'],
bondForceParams['K_th_A'],
bondForceParams['K_phi_ba'],
bondForceParams['K_phi_aA'],
bondForceParams['K_phi_AB']
)
self.dG_on_kJ = -self.dG_off_kJ
self.dG_off_kCal = kJ_to_kCal(self.dG_off_kJ)
self.dG_on_kCal = kJ_to_kCal(self.dG_on_kJ)
self.atoms_def = atoms_def
self.main = main
self.main.title('PyFepRestr')
if platform == "darwin":
self.button_font = self.label_font = self.radiobutton_font = Font(family='Arial', size=15)
else:
self.radiobutton_font = Font(font=Radiobutton()["font"])
self.label_font = Font(font=Label()["font"])
self.button_font = Font(font=Button()["font"])
self.r_var = BooleanVar()
self.r_var.set(0)
rj1 = Radiobutton(self.main, text='kJ', variable=self.r_var, value=0, command=self.refresh,
font=self.radiobutton_font)
rcal1 = Radiobutton(self.main, text="kCal", variable=self.r_var, value=1, command=self.refresh,
font=self.radiobutton_font)
rj1.grid(row=0, column=0, padx=5, pady=5)
rcal1.grid(row=0, column=1, padx=5, pady=5)
name0 = Label(self.main, text=u'\u0394G_off = ', font=self.label_font)
name1 = Label(self.main, text=u'\u0394G_on = ', font=self.label_font)
name0.grid(row=1, column=0, padx=5, pady=5)
name1.grid(row=2, column=0, padx=5, pady=5)
self.answer0 = Label(self.main, font=self.label_font)
self.answer1 = Label(self.main, font=self.label_font)
self.answer0.grid(row=1, column=1, padx=5, pady=5)
self.answer1.grid(row=2, column=1, padx=5, pady=5)
self.dimen0 = Label(self.main, font=self.label_font)
self.dimen1 = Label(self.main, font=self.label_font)
self.dimen0.grid(row=1, column=2, padx=5, pady=5)
self.dimen1.grid(row=2, column=2, padx=5, pady=5)
self.refresh()
destroyProgr = Button(self.main, text='Exit', bg='red', command=self.main.destroy,
font=self.button_font)
destroyProgr.grid(row=0, column=3, padx=5, pady=5)
helpProgr = Button(self.main, text=' ? ', bg='#ffb3fe', command=self.getHelp, font=self.button_font)
helpProgr.grid(row=4, column=0, padx=5, pady=5)
self.select_prog = StringVar()
self.select_prog.set("Gromacs topology file")
prog_list = ["Gromacs topology file", "NAMD Colvars"]
menu = OptionMenu(self.main, self.select_prog, *prog_list)
menu.grid(row=3, column=0, padx=5, pady=5)
previewButton = Button(self.main, text='Preview', bg='gray', command=self.ViewRestr,
font=self.button_font)
previewButton.grid(row=3, column=2, padx=5, pady=5)
saveFileButton = Button(self.main, text='Save', bg='gray', command=self.writeFile,
font=self.button_font)
saveFileButton.grid(row=3, column=3, padx=5, pady=5)
def refresh(self):
if self.r_var.get():
self.dimen0.configure(text='kCal/mol')
self.dimen1.configure(text='kCal/mol')
self.answer0.configure(text='{:>.3f}'.format(self.dG_off_kCal))
self.answer1.configure(text='{:>.3f}'.format(self.dG_on_kCal))
else:
self.dimen0.configure(text='kJ/mol')
self.dimen1.configure(text='kJ/mol')
self.answer0.configure(text='{:>.3f}'.format(self.dG_off_kJ))
self.answer1.configure(text='{:>.3f}'.format(self.dG_on_kJ))
self.dimen0.update()
self.dimen0.update()
self.answer0.update()
self.answer1.update()
@staticmethod
def getHelp():
with tempfile.NamedTemporaryFile('w', delete=False, suffix='.html') as f:
url = "file://" + f.name
f.write(help_2)
webbrowser.open(url)
def ViewRestr(self):
if self.select_prog.get() == "Gromacs topology file":
restraints = self.createGronacsRestr()
SrollViewer(self.main, "topol.top", restraints)
elif self.select_prog.get() == "NAMD Colvars":
restraints = self.createNAMDRestraints()
SrollViewer(self.main, "Restraints.in", restraints)
def writeFile(self):
if self.select_prog.get() == "Gromacs topology file":
topolFile = askopenfilename(initialdir="./", title="Select file",
filetypes=(("Topology files", "*.top"),
("all files", "*.*")))
restraints = self.createGronacsRestr()
elif self.select_prog.get() == "NAMD Colvars":
topolFile = asksaveasfilename(initialdir="./", title="Save as..",
filetypes=(("in files", "*.in"),
("Tcl files", "*.tcl"),
("all files", "*.*")),
initialfile="Restraints.in",
defaultextension='.in')
restraints = self.createNAMDRestraints()
if topolFile is None:
showerror("Error", "File is not selected!")
return
try:
if self.select_prog.get() == "Gromacs topology file":
with open(topolFile, 'at') as f:
f.write("\n\n" + restraints)
elif self.select_prog.get() == "NAMD Colvars":
with open(topolFile, 'wt') as f:
f.write(restraints)
except IOError:
showerror("Error", "File {:s} is not accessible for writing!".format(topolFile))
def createGronacsRestr(self):
restraints = ("[ intermolecular_interactions ]\n"
"[ bonds ]\n"
"; ai aj type bA kA bB kB\n"
" {12:5d} {13:5d} 6 {0:.3f} 0.0 {0:.3f} {6:.1f} ; {20:s} - {21:s}\n"
" \n"
"[ angles ]\n"
"; ai aj ak type thA fcA thB fcB\n"
" {14:5d} {12:5d} {13:5d} 1 {1:>6.2f} 0.0 {1:>6.2f} {7:.2f} ; {19:s} - {20:s} - {21:s}\n"
" {12:5d} {13:5d} {15:5d} 1 {2:>6.2f} 0.0 {2:>6.2f} {8:.2f} ; {20:s} - {21:s} - {22:s}\n"
"\n"
"[ dihedrals ]\n"
"; ai aj ak al type thA fcA thB fcB\n"
" {16:5d} {14:5d} {12:5d} {13:5d} 2 {3:>7.2f} 0.0 {3:>7.2f} {9:.2f} ; {18:s} - {19:s} - {20:s} - {21:s}\n"
" {14:5d} {12:5d} {13:5d} {15:5d} 2 {4:>7.2f} 0.0 {4:>7.2f} {10:>7.2f} ; {19:s} - {20:s} - {21:s} - {22:s}\n"
" {12:5d} {13:5d} {15:5d} {17:5d} 2 {5:>7.2f} 0.0 {5:>7.2f} {11:>7.2f} ; {20:s} - {21:s} - {22:s} - {23:s}\n"
"; T = {24:.1f} K\n"
"; dG_off = {25:.3f} kJ/mol ({27:.3f} kCal/mol)\n"
"; dG_on = {26:.3f} kJ/mol ({28:.3f} kCal/mol)\n"
).format(
self.bondForceParams['r_aA'], # 0
self.bondForceParams['th_a'], # 1
self.bondForceParams['th_A'], # 2
self.bondForceParams['phi_ba'], # 3
self.bondForceParams['phi_aA'], # 4
self.bondForceParams['phi_AB'], # 5
self.bondForceParams['K_r_aA'], # 6
self.bondForceParams['K_th_a'], # 7
self.bondForceParams['K_th_A'], # 8
self.bondForceParams['K_phi_ba'], # 9
self.bondForceParams['K_phi_aA'], # 10
self.bondForceParams['K_phi_AB'], # 11
self.bondForceParams['index_a'], # 12
self.bondForceParams['index_A'], # 13
self.bondForceParams['index_b'], # 14
self.bondForceParams['index_B'], # 15
self.bondForceParams['index_c'], # 16
self.bondForceParams['index_C'], # 17
self.atoms_def['index_c'], # 18
self.atoms_def['index_b'], # 19
self.atoms_def['index_a'], # 20
self.atoms_def['index_A'], # 21
self.atoms_def['index_B'], # 22
self.atoms_def['index_C'], # 23
self.bondForceParams['T'], # 24
self.dG_off_kJ, # 25
self.dG_on_kJ, # 26
self.dG_off_kCal, # 27
self.dG_on_kCal # 28
)
return restraints
def createNAMDRestraints(self):
preambula_1 = ("Colvarstrajfrequency 500\n"
"Colvarsrestartfrequency 500\n"
"\n"
"\n"
"#############################################################\n"
"# ALL COLVARS RESTRAINED\n"
"#############################################################\n"
"# COLVARS DEFINITIONS\n"
"#############################################################\n"
"\n")
colvarR_aA = ("# {:s} - {:s}\n"
"colvar {{\n"
" name R\n"
" width 1.0\n"
" lowerboundary 0.0\n"
" upperboundary 40.0\n"
" distance {{\n"
" forceNoPBC yes\n"
" group1 {{\n"
" atomnumbers {{ {:d} }}\n"
" }}\n"
" group2 {{\n"
" atomnumbers {{ {:d} }}\n"
" }}\n"
" }}\n"
"}}\n").format(self.atoms_def['index_a'],
self.atoms_def['index_A'],
self.bondForceParams['index_a'],
self.bondForceParams['index_A'])
colvarTh_a = ("# {:s} - {:s} - {:s}\n"
"colvar {{\n"
" name th_a\n"
" width 1.0\n"
" lowerboundary 0.0\n"
" upperboundary 180.0\n"
" angle {{\n"
" group1 {{\n"
" atomnumbers {{ {:d} }}\n"
" }}\n"
" group2 {{\n"
" atomnumbers {{ {:d} }}\n"
" }}\n"
" group3 {{\n"
" atomnumbers {{ {:d} }}\n"
" }}\n"
" }}\n"
"}}\n").format(self.atoms_def['index_b'],
self.atoms_def['index_a'],
self.atoms_def['index_A'],
self.bondForceParams['index_b'],
self.bondForceParams['index_a'],
self.bondForceParams['index_A'])
colvarTh_A = ("# {:s} - {:s} - {:s}\n"
"colvar {{\n"
" name th_A\n"
" width 1.0\n"
" lowerboundary 0.0\n"
" upperboundary 180.0\n"
" angle {{\n"
" group1 {{\n"
" atomnumbers {{ {:d} }}\n"
" }}\n"
" group2 {{\n"
| |
###############################
#
# (c) <NAME>, <NAME>, <NAME>, <NAME> 2017
# Student No: C14714071
# Course: DT228
# Date: 13-10-2017
#
# Title - Difference Checker
#
# Introduction:
# - The difference checker is an application created for the purpose of difference that exists
# in two images that are similar but have fundamental differences. This is achieved through the
# equalization, image manipulation, morphological applications and template matching to the two
# images allowing us to produce a final product that highlights the key differences between the one
# of the images compared to the other.
#
# Tested on OpenCV 3.2.0 with Python 2.7.
#
###############################
###############################
#
# 1. Background Information
#
# Libraries:
#
# CV2 - Extensive computer graphics library with sophisiticated algorithms and functions that can
# be worked upon to suit the needs of many image/video application.
#
# Numpy and Math - Mathematical libraries that are needed to sort through multidimensional array data
# and perform equations to correct problems that arise in the application.
#
# EasyGUI - A small graphifics librarie that allows for the quick and easy integration of UI to
# applications.
#
###############################
#
# 2. Structure
#
# 2.1 Read in images
# - Images are read in through here by manual navigation by the user. The images are stored in
# image variables for use in the application.
#
# * EasyGUI's fileopenbox() function was used to allow the user to sift through the files on
# their PC allowing them to navigate to the image files. Once the images are loaded the button
# can be pressed and will start the application.
#
# 2.2 Image Manipulation and feature detection
# - The images are passed through several functions that downscale, upscale, rotate and transform them
# so that they are better suited for the operations to detect the differences. During this process the
# features are detected and stored in an array so that they can be used later in the application as a
# basis of finding the differences.
#
# 2.2.1 Downscale image
# - Big images often cause trouble for applications as they increase run time and worse produce an
# abundant amount of false positives which would otherwise not show up if we downscale the image.
# Here we downscale our image so that we can display our results properly, reduce runtime and noise.
#
# * In our downscaleImages() we extract the width and height of the images to find out which one of
# the images is bigger. By finding the bigger image we can compared it to the smaller image thus
# creating a scaling factor which we'll use to ensure that the two images are the small dimensions.
# The resizing was done with the cv2.resize() function.
#
# 2.2.2 Rotating the image with feature detection
# - We will try to rotate the second image to match the orientation of the object in the first image. This
# allows our algorithms to perform better as the objects to be detected will be put as close together as they
# can be on the x and y planes.
#
# * In matchRotation() we first add a border to our second image (i.e. the one to be rotated). This is because
# when we rotate the image we will lose data due to the slight shift which we want to keep until the rotation is
# complete. Next, we put the images through our getMatches() function to get the features that exist in the both
# images. This function uses AKAZE as a feature detector [1] .Using the features we pass the two best lines to
# our getRotationAngle() function and by using the x and y coordinates we calcalate the atan of the two lines
# and apply this angle to the second image. We then rotate the second image using this angle and return the rotated image.
#
# 2.2.4 Location Correction
# - The location correction will move the objects within the second image as close as possible to the object in the first using
# the features. As the rotation of the image had been corrected we will simply find the sum needed to be applied to the axis to
# bring the second object to roughly the same position as the first.
#
# * The coordinates are loaded in the locationCorrection() function. The getMatches() will be used to once more find the differences
# in the images and using by using the difference we find the sum needed to be transitioned. A translation matrix is created with
# the x and y axis difference [2]. The warpAffine() function will take this in and apply it to our image.
#
# 2.3 Getting the mask
# - The mask of the differences will be populated here. Using histogram equalization techniques, morphological operations and contour
# sizing comparisons we are able to create a mask that display areas that are dense where differences exists.
#
# 2.3.1 Creating the mask
# - The initial mask is created with the differences and will be dilated to ensure that positives will cojoin together and be shown as a
# an area of difference. At the same time, false positive which are often isolated will be removed thus creating a mask of areas of difference.
#
# * In our getMask() function we first use equalizeHist() so that the distribution of pixels is even across the image. By use of our
# getDifferences() function we populate a newly created image with features that are not present in either images (the differences)
# as getDifferences() stores the differences in an array. This function use AKAZE for feature detection [1].
# Template matching is then applied to every contour seeing if it doesn't exist in the second image.
# If it did we were able to automate our "dilation" loop which works by drawing a black contour on pixels less than a certain size thus
# coloring them black while drawing a white contour around pixels that have conjoined.
# We repeat this until there are no more iterations that can be made. Just using dilation would makes all the pixels bigger.
# This only increases the biggest differences.
#
# 2.4 Applying Template Matching
# - Using the mask attained from 2.3 we are able to draw a contour around all the differences discovered. We apply CLAHE to the images to sharpen the image
# and by using template matching we can search to see if patches exist on the second image compared to the first. Thus we were able to isolate only the
# true differences in the two images.
#
# 2.4.1 Getting the patches
# - In our getAllPatches() we apply our cv2.boundingRect() to the image where patches exist thus creating an image where every patch,
# even the noise, is apparent.
#
# 2.4.2 Applying CLAHE
# - We now apply Contrasted Limited Adaptive Histogram Equalation to the images thus allowing the distribution to be sharp through the image.
# * In our normaliseImages() function we apply the CLAHE to the entire image rather than just segments of the image. The contrast
# clipping was set to 40 and the reason for us apply it to the whole image is because we wanted the image to equalize in relation towil
# itself rather than have sharp points throughout.
#
# 2.4.3 Getting the best patches
# - The patches that appear in both and aren't similar to each other are retrieved in this section. This is where only the true difference
# is retrieved from the application and is drawn to the image.
# * getBestPatches() takes in a list of contours and a theshold. It then uses template matching to find the normalised matched value.
# It then returns all the contours which are smalled than the threshold. The values for the normalised template matching can be between
# 0.0 and 1.0 where 1.0 is a perfect match [3]
# * Our getBestPatchAuto() will try multiple thesholds and determine the lowest theshold where patches are found. This makes sure that
# only the best patches are returned.
#
# 2.5 Displaying the images
# - The contour is applied to this image and then displayed using hstack next to its compared image.
#
###############################
###############################
#
# 3. Extra Notes:
#
# | |
# coding: utf-8
# 本脚本copy到工程目录下,与工程文件同级即可
import sys
import time
import re
import requests
import json
import os
import smtplib
import platform
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from release_conf import config
# @params selectType 1:dev-test,2:dev-online,3:rc-online,4:AppStore-online
# @params cmdType -a 自动上传至Fir/AppStore
def main_archive(selectType, cmdType):
print(_cmd_string(selectType, cmdType))
conf = config.ConfigInfo().proItem
### config
scheme = conf.scheme
workspace = conf.workspace
inputDoc = conf.inputDoc
outputDoc = conf.outputDoc
appIconName = conf.appIconName
# 状态变量
subject = ''
content = ''
xcarchiveOutput = _xcarchive_output(selectType, outputDoc)
# 打包归档
archiveSuccess, xcarchiveFilePath = archive(workspace, scheme, xcarchiveOutput)
# 如果打包失败,终止之后的操作
if not archiveSuccess:
content = 'archive faild ==> ' + _cmd_string(selectType, cmdType)
subject = '打包失败'
else:
# 读入info配置
version, build, bundleId, iconFile = read_ipa_info(xcarchiveFilePath, appIconName)
print(version + '\n' + build + '\n' + bundleId + '\n' + iconFile)
# 导出ipa文件
exportOptionsFilePath = _xcexport_input(selectType, inputDoc, scheme)
exportSuccess, folderPath, exprotFileName, newXcarchiveFilePath = export_ipa(xcarchiveOutput, scheme, selectType, version, exportOptionsFilePath)
if not exportSuccess:
content = 'export faild ==> ' + _cmd_string(selectType, cmdType)
subject = '导出失败'
else:
# 拼ipa文件路径
ipaPath = folderPath + exprotFileName + '.ipa'
# 导出dSYM文件
export_dSYM_file(folderPath, exprotFileName, scheme)
# 成功才会往下走,失败则抛出异常
content = content + '\n' + '======= 打包成功 ======='
content = content + '\n' + 'ipaPath:%s' % ipaPath
subject = '打包成功'
print(content)
# 自动上传/派包
if cmdType == 'a':
# 上传到ITC
if selectType == 4:
_uploadToAppStore(ipaPath)
content = content + '\n' + '======= 已经上传至ITC ======='
content = content + '\n' + '是否上传成功,请以苹果邮件和具体情况为准,成功后可进行testflight测试'
# 上传到FIR
else:
iconPath = newXcarchiveFilePath + iconFile
downloadurl, operalurl = _uploadToFir(selectType, ipaPath, iconPath, bundleId, version, build)
content = content + '\n' + '======= 上传FIR成功 ======='
content = content + '\n' + '请扫码下载:'
content = content + '\n' + '操作地址:\n ' + operalurl
content = content + '\n' + '下载地址:\n ' + downloadurl
else:
content = content + '\n' + '======= 未选择自动上传 ======='
content = content + '\n' + '如需自动上传,请在加上命令参数 -a'
if cmdType == 'a':
_sendEmail(selectType, subject, content)
print(content)
# 根据输入类型和配置信息,返回xc打包路径
def _xcarchive_output(selectType, root):
### config
if selectType == 1:
return '%sDevInner/' % root
elif selectType == 2:
return '%sDevOuter/' % root
elif selectType == 3:
return '%sDevRC/' % root
elif selectType == 4:
return '%sAppStore/' % root
return ''
def _xcexport_input(selectType, root, scheme):
### config
filename = 'ExportOptions.plist'
if selectType == 1:
return '%s%s-Dev-%s' % (root, scheme, filename)
elif selectType == 2:
return '%s%s-Dev-%s' % (root, scheme, filename)
elif selectType == 3:
return '%s%s-Dev-%s' % (root, scheme, filename)
elif selectType == 4:
return '%s%s-Dis-%s' % (root, scheme, filename)
return ''
def _uploadToAppStore(ipaPath):
conf = config.ConfigInfo().itcItem
uploaditc(ipaPath, conf.username, conf.password)
def _uploadToFir(selectType, ipaPath, iconPath, bundleId, version, build):
conf = config.ConfigInfo().firItem
fir_token = conf.token
changelog = _fir_changelog(selectType)
downloadurl, operalurl = upload_ipa(fir_token, ipaPath, iconPath, bundleId, version, build, changelog)
return downloadurl, operalurl
def _sendEmail(selectType, subject, content):
conf = config.ConfigInfo().emailItem
email_SMTP = conf.smtp
email_SMTP_port = conf.port
email_user = conf.user
email_password = <PASSWORD>
email_sender_name = conf.sendername
email_to_list = conf.tolist
email_to_list_itc = conf.toitclist
email_cc_list = conf.cclist
#
email_to_list = email_to_list if selectType != 4 else email_to_list_itc
# 打包成功并拷贝到服务器后发送邮件
se = email_create(email_SMTP, email_SMTP_port, email_user, email_password)
email_subject = subject
email_content = content
email_send(se, email_sender_name, email_to_list, email_cc_list, email_subject, email_content)
####################################################################################################
# String
####################################################################################################
def _parser_selectType(selectType):
nn = None
pn = None
if selectType == 1:
return '内网', 'Dev'
if selectType == 2:
return '外网', 'Dev'
if selectType == 3:
return 'RC', 'Dev'
if selectType == 4:
return '外网', 'Dis'
return nn, pn
def _parser_net_name(selectType):
netname = ''
if selectType == 1:
netname = 'Dev包测试环境'
elif selectType == 2:
netname = 'Dev包线上环境'
elif selectType == 3:
netname = 'Dev包RC环境'
elif selectType == 4:
netname = 'AppStore包'
return netname
def _fir_changelog(selectType):
changelog = ''
if selectType == 1:
changelog = '[测试环境](脚本自动上传,请添加更新说明)'
elif selectType == 2:
changelog = '[线上环境](脚本自动上传,请添加更新说明)'
elif selectType == 3:
changelog = '[RC环境](脚本自动上传,请添加更新说明)'
else:
changelog = '(脚本自动上传,请添加更新说明)'
return changelog
def _cmd_string(selectType, cmdType):
return 'selectType:%d, cmdType:%s' % (selectType, cmdType)
####################################################################################################
# Archve
####################################################################################################
# archive并导出ipa
def archive(workspace, scheme, outputPath) :
xcarchiveFilePath = '%s%s.xcarchive' % (outputPath, scheme)
archiveCommand = "xcodebuild archive -destination 'generic/platform=iOS' -workspace '%s' -scheme '%s' -archivePath '%s' -quiet" % (workspace, scheme, xcarchiveFilePath)
cdCommand = 'cd %s' % './'
print('archiveCommand:' + archiveCommand)
os.system('%s' % cdCommand + ';' + archiveCommand)
# 检查 archive 是否成功
if os.path.exists(xcarchiveFilePath) is False:
return False, ''
return True, xcarchiveFilePath
# 读入info配置
def read_ipa_info(xcarchiveFilePath, appIconName):
infoplistpath = xcarchiveFilePath + '/Info.plist'
iconFile = '/Products/%s/%s' % (getApplicationPath(infoplistpath), appIconName)
version = getShortVersion(infoplistpath)
build = getVersion(infoplistpath)
bundleId = getBundleID(infoplistpath)
return version, build, bundleId, iconFile
def export_ipa(exportPath, targetName, selectType, v, exportOptionsFilePath):
curTime = time.strftime("%Y-%m-%d %H-%M-%S", time.localtime())
folderName = '%s %s' % (targetName, curTime)
folderPath = '%s%s/%s/' % (exportPath, v, folderName)
createFolderIfNeed(folderPath)
# 找到 .xcarchive 文件
xcarchiveFileName = targetName + '.xcarchive'
xcarchiveFilePath = exportPath + xcarchiveFileName
# ExportOptions-xxx.plist 文件
if not os.path.exists(exportOptionsFilePath) :
return False, '', '', ''
# 导出ipa包
exportArchiveCommand = "xcodebuild -exportArchive -archivePath '%s' -exportPath '%s' -exportOptionsPlist '%s'" % (
xcarchiveFilePath, folderPath, exportOptionsFilePath)
print('exportArchiveCommand:' + exportArchiveCommand)
os.system(exportArchiveCommand)
ipaFilePath = folderPath + getFilePath(folderPath, '.ipa')
# 检查导出 ipa 是否成功
if os.path.exists(ipaFilePath) is False:
print(ipaFilePath)
return False, '', '', ''
# 把 ipa 包重新命名下
nn, pn = _parser_selectType(selectType)
exprotFileName = targetName + '-' + v + '-' + nn + '-' + pn
newIpaFilePath = folderPath + '/' + exprotFileName + '.ipa'
fileRename(ipaFilePath, newIpaFilePath)
# 导出成功后再移动 .xcarchive 文件到新目录中
newXcarchiveFilePath = folderPath + xcarchiveFileName
moveFileToFolder(xcarchiveFilePath, newXcarchiveFilePath)
return True, folderPath, exprotFileName, newXcarchiveFilePath
def export_dSYM_file(folderPath, exprotFileName, scheme):
# .dSYM 文件到新目录中
dSYMPath = folderPath + scheme + '.xcarchive/dSYMs/' + scheme.lower() + '.app.dSYM'
dSYMToOutputPath = folderPath + exprotFileName + '.dSYM'
print('dSYM:' + dSYMPath)
print('dSYM out put:' + dSYMToOutputPath)
copyFolderToFolder(dSYMPath, dSYMToOutputPath)
####################################################################################################
# 文件操作的定义
####################################################################################################
# 根据传入的文件路径,获取这个文件的文本内容
def read_file(fpath) :
f = None
# 文件内容
text = None
fileReadErrorReason = None
try:
# 打开文件
f = open(fpath, 'r')
# 读取文件
text = f.read()
except Exception as e:
fileError = True
fileReadErrorReason = '读取[' + fpath + ']文件出错!'
print(fileReadErrorReason)
print(e)
finally:
# 关闭文件
if f:
f.close()
return text, fileReadErrorReason
# 根据传入的文件路径,替换写入text内容
def write_file(fpath, text) :
fileWriteErrorReason = None
try:
# 打开文件
f = open(fpath, 'w')
# 写入文件
f.write(text)
except Exception as e:
fileWriteErrorReason = '写入[' + fpath + ']文件出错!'
print(fileWriteErrorReason)
print(e)
finally:
# 关闭文件
if f:
f.close()
return fileWriteErrorReason
####################################################################################################
# 文件&文件夹操作
####################################################################################################
def copyFileToFolder(filePath, folderPath):
cmd = "cp '%s' '%s'" % (filePath, folderPath)
os.system(cmd)
def copyFolderToFolder(folderPath1, folderPath2):
cmd = "cp -R '%s' '%s'" % (folderPath1, folderPath2)
os.system(cmd)
def removeFolder(folderPath):
cmd = "rm -rf '%s'" % folderPath
os.system(cmd)
def moveFileToFolder(filePath, folderPath):
cmd = "mv '%s' '%s'" % (filePath, folderPath)
os.system(cmd)
def fileRename(oldFilePath, newFilePath):
cmd = "mv '%s' '%s'" % (oldFilePath, newFilePath)
os.system(cmd)
def createFolderIfNeed(folderPath):
cmd = "mkdir -p '%s'" % folderPath
os.system(cmd)
def getFilePath(folderPath, pattern):
files = os.listdir(folderPath)
for i in files:
if i.endswith(pattern):
return i
####################################################################################################
# 从 info.plist 获取版本号
####################################################################################################
def getVersionWithKey(key, fpath):
f = None
# 文件内容
text = None
try:
# 打开文件
f = open(fpath, 'r')
# 读取文件
text = f.read()
l = text.split('\n')
v = None
flage = False
for line in l:
if flage:
lstripline = line.lstrip()
start = len('<string>')
end = len('</string>')
v = lstripline[start:-end]
break
else:
if key in line:
flage = True
return v
except Exception as e:
print('读取 %s 文件出错!', fpath)
print(e)
finally:
# 关闭文件
if f:
f.close()
def getShortVersion(fpath):
return getVersionWithKey('CFBundleShortVersionString', fpath)
def getVersion(fpath):
return getVersionWithKey('CFBundleVersion', fpath)
def getBundleID(fpath):
return getVersionWithKey('CFBundleIdentifier', fpath)
def getApplicationPath(fpath):
return getVersionWithKey('ApplicationPath', fpath)
####################################################################################################
# itc upload:https://help.apple.com/itc/apploader/#/apdATD1E53-D1E1A1303-D1E53A1126
####################################################################################################
def validateipa(filepath, username, password):
print('ITC验证app:')
toolcmd = 'xcrun altool'
command = "%s --validate-app -f '%s' -t ios -p '%s' -u '%s'" % (toolcmd, filepath, password, username)
print('altoolValidateCommand:' + command)
os.system(command)
return
def uploadipa(filepath, username, password):
print('ITC上传app:')
toolcmd = 'xcrun altool'
command = "%s --upload-app -f '%s' -t ios -p '%s' -u '%s'" % (toolcmd, filepath, password, username)
print('altoolUploadCommand:' + command)
os.system(command)
return
def uploaditc(filepath, username, password):
validateipa(filepath, username, password)
uploadipa(filepath, username, password)
return
####################################################################################################
# Fir
####################################################################################################
# 获取 fir 的上传凭证
def get_cert(bundle_id, api_token):
print('发起获取上传凭证请求 ========')
data = {'type': 'ios', 'bundle_id': bundle_id,
'api_token': api_token}
print(data)
req = requests.post(url='http://api.bq04.com/apps', data=data)
cert_resp = req.content
print('获取到 fir 响应 ========')
print(str(cert_resp))
return cert_resp
# 上传到icon到fir
def upload_icon(icon, path):
# 拿到相应的token
cert_key = icon['key']
cert_token = icon['token']
cert_upload_url = icon['upload_url']
print('上传 icon ========')
file = {'file': open(path, 'rb')}
param = {
"key": cert_key,
"token": cert_token
}
requests.packages.urllib3.disable_warnings()
req = requests.post(cert_upload_url,files=file, data=param, verify=False)
print(req.content)
return req.content
# 上传到ipa到fir
def upload_fir(binary, path, version, build, changelog):
# 拿到相应的token
cert_key = binary['key']
cert_token = binary['token']
cert_upload_url = binary['upload_url']
print('上传 iPA ========')
file = {'file': open(path, 'rb')}
param = | |
m.b2536 <= 1)
m.e4529 = Constraint(expr= m.b2537 + m.b2538 <= 1)
m.e4530 = Constraint(expr= m.b2537 + m.b2539 <= 1)
m.e4531 = Constraint(expr= m.b2537 + m.b2540 <= 1)
m.e4532 = Constraint(expr= m.b2537 + m.b2538 <= 1)
m.e4533 = Constraint(expr= m.b2538 + m.b2539 <= 1)
m.e4534 = Constraint(expr= m.b2538 + m.b2540 <= 1)
m.e4535 = Constraint(expr= m.b2537 + m.b2539 <= 1)
m.e4536 = Constraint(expr= m.b2538 + m.b2539 <= 1)
m.e4537 = Constraint(expr= m.b2539 + m.b2540 <= 1)
m.e4538 = Constraint(expr= m.b2537 + m.b2540 <= 1)
m.e4539 = Constraint(expr= m.b2538 + m.b2540 <= 1)
m.e4540 = Constraint(expr= m.b2539 + m.b2540 <= 1)
m.e4541 = Constraint(expr= m.b2541 + m.b2542 <= 1)
m.e4542 = Constraint(expr= m.b2541 + m.b2543 <= 1)
m.e4543 = Constraint(expr= m.b2541 + m.b2544 <= 1)
m.e4544 = Constraint(expr= m.b2541 + m.b2542 <= 1)
m.e4545 = Constraint(expr= m.b2542 + m.b2543 <= 1)
m.e4546 = Constraint(expr= m.b2542 + m.b2544 <= 1)
m.e4547 = Constraint(expr= m.b2541 + m.b2543 <= 1)
m.e4548 = Constraint(expr= m.b2542 + m.b2543 <= 1)
m.e4549 = Constraint(expr= m.b2543 + m.b2544 <= 1)
m.e4550 = Constraint(expr= m.b2541 + m.b2544 <= 1)
m.e4551 = Constraint(expr= m.b2542 + m.b2544 <= 1)
m.e4552 = Constraint(expr= m.b2543 + m.b2544 <= 1)
m.e4553 = Constraint(expr= m.b2545 + m.b2546 <= 1)
m.e4554 = Constraint(expr= m.b2545 + m.b2547 <= 1)
m.e4555 = Constraint(expr= m.b2545 + m.b2548 <= 1)
m.e4556 = Constraint(expr= m.b2545 + m.b2546 <= 1)
m.e4557 = Constraint(expr= m.b2546 + m.b2547 <= 1)
m.e4558 = Constraint(expr= m.b2546 + m.b2548 <= 1)
m.e4559 = Constraint(expr= m.b2545 + m.b2547 <= 1)
m.e4560 = Constraint(expr= m.b2546 + m.b2547 <= 1)
m.e4561 = Constraint(expr= m.b2547 + m.b2548 <= 1)
m.e4562 = Constraint(expr= m.b2545 + m.b2548 <= 1)
m.e4563 = Constraint(expr= m.b2546 + m.b2548 <= 1)
m.e4564 = Constraint(expr= m.b2547 + m.b2548 <= 1)
m.e4565 = Constraint(expr= m.b2549 + m.b2550 <= 1)
m.e4566 = Constraint(expr= m.b2549 + m.b2551 <= 1)
m.e4567 = Constraint(expr= m.b2549 + m.b2552 <= 1)
m.e4568 = Constraint(expr= m.b2549 + m.b2550 <= 1)
m.e4569 = Constraint(expr= m.b2550 + m.b2551 <= 1)
m.e4570 = Constraint(expr= m.b2550 + m.b2552 <= 1)
m.e4571 = Constraint(expr= m.b2549 + m.b2551 <= 1)
m.e4572 = Constraint(expr= m.b2550 + m.b2551 <= 1)
m.e4573 = Constraint(expr= m.b2551 + m.b2552 <= 1)
m.e4574 = Constraint(expr= m.b2549 + m.b2552 <= 1)
m.e4575 = Constraint(expr= m.b2550 + m.b2552 <= 1)
m.e4576 = Constraint(expr= m.b2551 + m.b2552 <= 1)
m.e4577 = Constraint(expr= m.b2553 + m.b2554 <= 1)
m.e4578 = Constraint(expr= m.b2553 + m.b2555 <= 1)
m.e4579 = Constraint(expr= m.b2553 + m.b2556 <= 1)
m.e4580 = Constraint(expr= m.b2553 + m.b2554 <= 1)
m.e4581 = Constraint(expr= m.b2554 + m.b2555 <= 1)
m.e4582 = Constraint(expr= m.b2554 + m.b2556 <= 1)
m.e4583 = Constraint(expr= m.b2553 + m.b2555 <= 1)
m.e4584 = Constraint(expr= m.b2554 + m.b2555 <= 1)
m.e4585 = Constraint(expr= m.b2555 + m.b2556 <= 1)
m.e4586 = Constraint(expr= m.b2553 + m.b2556 <= 1)
m.e4587 = Constraint(expr= m.b2554 + m.b2556 <= 1)
m.e4588 = Constraint(expr= m.b2555 + m.b2556 <= 1)
m.e4589 = Constraint(expr= m.b2557 + m.b2558 <= 1)
m.e4590 = Constraint(expr= m.b2557 + m.b2559 <= 1)
m.e4591 = Constraint(expr= m.b2557 + m.b2560 <= 1)
m.e4592 = Constraint(expr= m.b2557 + m.b2558 <= 1)
m.e4593 = Constraint(expr= m.b2558 + m.b2559 <= 1)
m.e4594 = Constraint(expr= m.b2558 + m.b2560 <= 1)
m.e4595 = Constraint(expr= m.b2557 + m.b2559 <= 1)
m.e4596 = Constraint(expr= m.b2558 + m.b2559 <= 1)
m.e4597 = Constraint(expr= m.b2559 + m.b2560 <= 1)
m.e4598 = Constraint(expr= m.b2557 + m.b2560 <= 1)
m.e4599 = Constraint(expr= m.b2558 + m.b2560 <= 1)
m.e4600 = Constraint(expr= m.b2559 + m.b2560 <= 1)
m.e4601 = Constraint(expr= m.b2241 - m.b2401 <= 0)
m.e4602 = Constraint(expr= -m.b2241 + m.b2242 - m.b2402 <= 0)
m.e4603 = Constraint(expr= -m.b2241 - m.b2242 + m.b2243 - m.b2403 <= 0)
m.e4604 = Constraint(expr= -m.b2241 - m.b2242 - m.b2243 + m.b2244 - m.b2404
<= 0)
m.e4605 = Constraint(expr= m.b2245 - m.b2405 <= 0)
m.e4606 = Constraint(expr= -m.b2245 + m.b2246 - m.b2406 <= 0)
m.e4607 = Constraint(expr= -m.b2245 - m.b2246 + m.b2247 - m.b2407 <= 0)
m.e4608 = Constraint(expr= -m.b2245 - m.b2246 - m.b2247 + m.b2248 - m.b2408
<= 0)
m.e4609 = Constraint(expr= m.b2249 - m.b2409 <= 0)
m.e4610 = Constraint(expr= -m.b2249 + m.b2250 - m.b2410 <= 0)
m.e4611 = Constraint(expr= -m.b2249 - m.b2250 + m.b2251 - m.b2411 <= 0)
m.e4612 = Constraint(expr= -m.b2249 - m.b2250 - m.b2251 + m.b2252 - m.b2412
<= 0)
m.e4613 = Constraint(expr= m.b2253 - m.b2413 <= 0)
m.e4614 = Constraint(expr= -m.b2253 + m.b2254 - m.b2414 <= 0)
m.e4615 = Constraint(expr= -m.b2253 - m.b2254 + m.b2255 - m.b2415 <= 0)
m.e4616 = Constraint(expr= -m.b2253 - m.b2254 - m.b2255 + m.b2256 - m.b2416
<= 0)
m.e4617 = Constraint(expr= m.b2257 - m.b2417 <= 0)
m.e4618 = Constraint(expr= -m.b2257 + m.b2258 - m.b2418 <= 0)
m.e4619 = Constraint(expr= -m.b2257 - m.b2258 + m.b2259 - m.b2419 <= 0)
m.e4620 = Constraint(expr= -m.b2257 - m.b2258 - m.b2259 + m.b2260 - m.b2420
<= 0)
m.e4621 = Constraint(expr= m.b2261 - m.b2421 <= 0)
m.e4622 = Constraint(expr= -m.b2261 + m.b2262 - m.b2422 <= 0)
m.e4623 = Constraint(expr= -m.b2261 - m.b2262 + m.b2263 - m.b2423 <= 0)
m.e4624 = Constraint(expr= -m.b2261 - m.b2262 - m.b2263 + m.b2264 - m.b2424
<= 0)
m.e4625 = Constraint(expr= m.b2265 - m.b2425 <= 0)
m.e4626 = Constraint(expr= -m.b2265 + m.b2266 - m.b2426 <= 0)
m.e4627 = Constraint(expr= -m.b2265 - m.b2266 + m.b2267 - m.b2427 <= 0)
m.e4628 = Constraint(expr= -m.b2265 - m.b2266 - m.b2267 + m.b2268 - m.b2428
<= 0)
m.e4629 = Constraint(expr= m.b2269 - m.b2429 <= 0)
m.e4630 = Constraint(expr= -m.b2269 + m.b2270 - m.b2430 <= 0)
m.e4631 = Constraint(expr= -m.b2269 - m.b2270 + m.b2271 - m.b2431 <= 0)
m.e4632 = Constraint(expr= -m.b2269 - m.b2270 - m.b2271 + m.b2272 - m.b2432
<= 0)
m.e4633 = Constraint(expr= m.b2273 - m.b2433 <= 0)
m.e4634 = Constraint(expr= -m.b2273 + m.b2274 - m.b2434 <= 0)
m.e4635 = Constraint(expr= -m.b2273 - m.b2274 + m.b2275 - m.b2435 <= 0)
m.e4636 = Constraint(expr= -m.b2273 - m.b2274 - m.b2275 + m.b2276 - m.b2436
<= 0)
m.e4637 = Constraint(expr= m.b2277 - m.b2437 <= 0)
m.e4638 = Constraint(expr= -m.b2277 + m.b2278 - m.b2438 <= 0)
m.e4639 = Constraint(expr= -m.b2277 - m.b2278 + m.b2279 - m.b2439 <= 0)
m.e4640 = Constraint(expr= -m.b2277 - m.b2278 - m.b2279 + m.b2280 - m.b2440
<= 0)
m.e4641 = Constraint(expr= m.b2281 - m.b2441 <= 0)
m.e4642 = Constraint(expr= -m.b2281 + m.b2282 - m.b2442 <= 0)
m.e4643 = Constraint(expr= -m.b2281 - m.b2282 + m.b2283 - m.b2443 <= 0)
m.e4644 = Constraint(expr= -m.b2281 - m.b2282 - m.b2283 + m.b2284 - m.b2444
<= 0)
m.e4645 = Constraint(expr= m.b2285 - m.b2445 <= 0)
m.e4646 = Constraint(expr= -m.b2285 + m.b2286 - m.b2446 <= 0)
m.e4647 = Constraint(expr= -m.b2285 - m.b2286 + m.b2287 - m.b2447 <= 0)
m.e4648 = Constraint(expr= -m.b2285 - m.b2286 - m.b2287 + m.b2288 - m.b2448
<= 0)
m.e4649 = Constraint(expr= m.b2289 - m.b2449 <= 0)
m.e4650 = Constraint(expr= -m.b2289 + m.b2290 - m.b2450 <= 0)
m.e4651 = Constraint(expr= -m.b2289 - m.b2290 + m.b2291 - m.b2451 <= 0)
m.e4652 = Constraint(expr= -m.b2289 - m.b2290 - m.b2291 + m.b2292 - m.b2452
<= 0)
m.e4653 = Constraint(expr= m.b2293 - m.b2453 <= 0)
m.e4654 = Constraint(expr= -m.b2293 + m.b2294 - m.b2454 <= 0)
m.e4655 = Constraint(expr= -m.b2293 - m.b2294 + m.b2295 - m.b2455 <= 0)
m.e4656 = Constraint(expr= -m.b2293 - m.b2294 - m.b2295 + m.b2296 - m.b2456
<= 0)
m.e4657 = Constraint(expr= m.b2297 - m.b2457 <= 0)
m.e4658 = Constraint(expr= -m.b2297 + m.b2298 - m.b2458 <= 0)
m.e4659 = Constraint(expr= -m.b2297 - m.b2298 + m.b2299 - m.b2459 <= 0)
m.e4660 = Constraint(expr= -m.b2297 - m.b2298 - m.b2299 + m.b2300 - m.b2460
<= 0)
m.e4661 = Constraint(expr= m.b2301 - m.b2461 <= 0)
m.e4662 = Constraint(expr= -m.b2301 + m.b2302 - m.b2462 <= 0)
m.e4663 = Constraint(expr= -m.b2301 - m.b2302 + m.b2303 - m.b2463 <= 0)
m.e4664 = Constraint(expr= -m.b2301 - m.b2302 - m.b2303 + m.b2304 - m.b2464
<= 0)
m.e4665 = Constraint(expr= m.b2305 - m.b2465 <= 0)
m.e4666 = Constraint(expr= -m.b2305 + m.b2306 - m.b2466 <= 0)
m.e4667 = Constraint(expr= -m.b2305 - m.b2306 + m.b2307 - m.b2467 <= 0)
m.e4668 = Constraint(expr= -m.b2305 - m.b2306 - m.b2307 + m.b2308 - m.b2468
<= 0)
m.e4669 = Constraint(expr= m.b2309 - m.b2469 <= 0)
m.e4670 = Constraint(expr= -m.b2309 + m.b2310 - m.b2470 <= 0)
m.e4671 = Constraint(expr= -m.b2309 - m.b2310 + m.b2311 - m.b2471 <= 0)
m.e4672 = Constraint(expr= -m.b2309 - m.b2310 - m.b2311 + m.b2312 - m.b2472
<= 0)
m.e4673 = Constraint(expr= m.b2313 - m.b2473 <= 0)
m.e4674 = Constraint(expr= -m.b2313 + m.b2314 - m.b2474 <= 0)
m.e4675 = Constraint(expr= -m.b2313 - m.b2314 + m.b2315 - m.b2475 <= 0)
m.e4676 = Constraint(expr= -m.b2313 - m.b2314 - m.b2315 + m.b2316 - m.b2476
<= 0)
m.e4677 = Constraint(expr= m.b2317 - m.b2477 <= 0)
m.e4678 = Constraint(expr= -m.b2317 + m.b2318 - | |
<filename>fhir/resources/STU3/tests/test_conceptmap.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/ConceptMap
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
import io
import json
import os
import unittest
import pytest
from .. import conceptmap
from ..fhirdate import FHIRDate
from .fixtures import force_bytes
@pytest.mark.usefixtures("base_settings")
class ConceptMapTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get("FHIR_UNITTEST_DATADIR") or ""
with io.open(os.path.join(datadir, filename), "r", encoding="utf-8") as handle:
js = json.load(handle)
self.assertEqual("ConceptMap", js["resourceType"])
return conceptmap.ConceptMap(js)
def testConceptMap1(self):
inst = self.instantiate_from("cm-address-use-v3.json")
self.assertIsNotNone(inst, "Must have instantiated a ConceptMap instance")
self.implConceptMap1(inst)
js = inst.as_json()
self.assertEqual("ConceptMap", js["resourceType"])
inst2 = conceptmap.ConceptMap(js)
self.implConceptMap1(inst2)
def implConceptMap1(self, inst):
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].system), force_bytes("url")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].value),
force_bytes("http://hl7.org/fhir"),
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[1].system), force_bytes("email")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[1].value),
force_bytes("<EMAIL>"),
)
self.assertEqual(inst.date.date, FHIRDate("2017-04-19T07:44:43+10:00").date)
self.assertEqual(inst.date.as_json(), "2017-04-19T07:44:43+10:00")
self.assertEqual(
force_bytes(inst.group[0].element[0].code), force_bytes("home")
)
self.assertEqual(
force_bytes(inst.group[0].element[0].target[0].code), force_bytes("H")
)
self.assertEqual(
force_bytes(inst.group[0].element[0].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(
force_bytes(inst.group[0].element[1].code), force_bytes("work")
)
self.assertEqual(
force_bytes(inst.group[0].element[1].target[0].code), force_bytes("WP")
)
self.assertEqual(
force_bytes(inst.group[0].element[1].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(
force_bytes(inst.group[0].element[2].code), force_bytes("temp")
)
self.assertEqual(
force_bytes(inst.group[0].element[2].target[0].code), force_bytes("TMP")
)
self.assertEqual(
force_bytes(inst.group[0].element[2].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(force_bytes(inst.group[0].element[3].code), force_bytes("old"))
self.assertEqual(
force_bytes(inst.group[0].element[3].target[0].code), force_bytes("OLD")
)
self.assertEqual(
force_bytes(inst.group[0].element[3].target[0].comment),
force_bytes("Bad or Old"),
)
self.assertEqual(
force_bytes(inst.group[0].element[3].target[0].equivalence),
force_bytes("narrower"),
)
self.assertEqual(
force_bytes(inst.group[0].element[3].target[1].code), force_bytes("BAD")
)
self.assertEqual(
force_bytes(inst.group[0].element[3].target[1].comment),
force_bytes("Bad or Old"),
)
self.assertEqual(
force_bytes(inst.group[0].element[3].target[1].equivalence),
force_bytes("narrower"),
)
self.assertEqual(
force_bytes(inst.group[0].source),
force_bytes("http://hl7.org/fhir/address-use"),
)
self.assertEqual(
force_bytes(inst.group[0].target),
force_bytes("http://hl7.org/fhir/v3/AddressUse"),
)
self.assertEqual(force_bytes(inst.id), force_bytes("cm-address-use-v3"))
self.assertEqual(force_bytes(inst.name), force_bytes("v3 map for AddressUse"))
self.assertEqual(force_bytes(inst.publisher), force_bytes("HL7 (FHIR Project)"))
self.assertEqual(force_bytes(inst.status), force_bytes("draft"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(
force_bytes(inst.url),
force_bytes("http://hl7.org/fhir/ConceptMap/cm-address-use-v3"),
)
def testConceptMap2(self):
inst = self.instantiate_from("cm-medication-admin-status-v3.json")
self.assertIsNotNone(inst, "Must have instantiated a ConceptMap instance")
self.implConceptMap2(inst)
js = inst.as_json()
self.assertEqual("ConceptMap", js["resourceType"])
inst2 = conceptmap.ConceptMap(js)
self.implConceptMap2(inst2)
def implConceptMap2(self, inst):
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].system), force_bytes("url")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].value),
force_bytes("http://hl7.org/fhir"),
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[1].system), force_bytes("email")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[1].value),
force_bytes("<EMAIL>"),
)
self.assertEqual(inst.date.date, FHIRDate("2017-04-19T07:44:43+10:00").date)
self.assertEqual(inst.date.as_json(), "2017-04-19T07:44:43+10:00")
self.assertEqual(
force_bytes(inst.group[0].element[0].code), force_bytes("in-progress")
)
self.assertEqual(
force_bytes(inst.group[0].element[0].target[0].code), force_bytes("active")
)
self.assertEqual(
force_bytes(inst.group[0].element[0].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(
force_bytes(inst.group[0].element[1].code), force_bytes("on-hold")
)
self.assertEqual(
force_bytes(inst.group[0].element[1].target[0].code),
force_bytes("suspended"),
)
self.assertEqual(
force_bytes(inst.group[0].element[1].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(
force_bytes(inst.group[0].element[2].code), force_bytes("completed")
)
self.assertEqual(
force_bytes(inst.group[0].element[2].target[0].code),
force_bytes("completed"),
)
self.assertEqual(
force_bytes(inst.group[0].element[2].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(
force_bytes(inst.group[0].element[3].code), force_bytes("entered-in-error")
)
self.assertEqual(
force_bytes(inst.group[0].element[3].target[0].code),
force_bytes("nullified"),
)
self.assertEqual(
force_bytes(inst.group[0].element[3].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(
force_bytes(inst.group[0].element[4].code), force_bytes("stopped")
)
self.assertEqual(
force_bytes(inst.group[0].element[4].target[0].code), force_bytes("aborted")
)
self.assertEqual(
force_bytes(inst.group[0].element[4].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(
force_bytes(inst.group[0].source),
force_bytes("http://hl7.org/fhir/medication-admin-status"),
)
self.assertEqual(
force_bytes(inst.group[0].target),
force_bytes("http://hl7.org/fhir/v3/ActStatus"),
)
self.assertEqual(
force_bytes(inst.id), force_bytes("cm-medication-admin-status-v3")
)
self.assertEqual(
force_bytes(inst.name),
force_bytes("v3 map for MedicationAdministrationStatus"),
)
self.assertEqual(force_bytes(inst.publisher), force_bytes("HL7 (FHIR Project)"))
self.assertEqual(force_bytes(inst.status), force_bytes("draft"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(
force_bytes(inst.url),
force_bytes("http://hl7.org/fhir/ConceptMap/cm-medication-admin-status-v3"),
)
def testConceptMap3(self):
inst = self.instantiate_from("cm-medication-request-status-v3.json")
self.assertIsNotNone(inst, "Must have instantiated a ConceptMap instance")
self.implConceptMap3(inst)
js = inst.as_json()
self.assertEqual("ConceptMap", js["resourceType"])
inst2 = conceptmap.ConceptMap(js)
self.implConceptMap3(inst2)
def implConceptMap3(self, inst):
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].system), force_bytes("url")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].value),
force_bytes("http://hl7.org/fhir"),
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[1].system), force_bytes("email")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[1].value),
force_bytes("<EMAIL>"),
)
self.assertEqual(inst.date.date, FHIRDate("2017-04-19T07:44:43+10:00").date)
self.assertEqual(inst.date.as_json(), "2017-04-19T07:44:43+10:00")
self.assertEqual(
force_bytes(inst.group[0].element[0].code), force_bytes("active")
)
self.assertEqual(
force_bytes(inst.group[0].element[0].target[0].code), force_bytes("active")
)
self.assertEqual(
force_bytes(inst.group[0].element[0].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(
force_bytes(inst.group[0].element[1].code), force_bytes("on-hold")
)
self.assertEqual(
force_bytes(inst.group[0].element[1].target[0].code),
force_bytes("suspended"),
)
self.assertEqual(
force_bytes(inst.group[0].element[1].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(
force_bytes(inst.group[0].element[2].code), force_bytes("cancelled")
)
self.assertEqual(
force_bytes(inst.group[0].element[2].target[0].code),
force_bytes("cancelled"),
)
self.assertEqual(
force_bytes(inst.group[0].element[2].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(
force_bytes(inst.group[0].element[3].code), force_bytes("completed")
)
self.assertEqual(
force_bytes(inst.group[0].element[3].target[0].code),
force_bytes("completed"),
)
self.assertEqual(
force_bytes(inst.group[0].element[3].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(
force_bytes(inst.group[0].element[4].code), force_bytes("entered-in-error")
)
self.assertEqual(
force_bytes(inst.group[0].element[4].target[0].code),
force_bytes("nullified"),
)
self.assertEqual(
force_bytes(inst.group[0].element[4].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(
force_bytes(inst.group[0].element[5].code), force_bytes("stopped")
)
self.assertEqual(
force_bytes(inst.group[0].element[5].target[0].code), force_bytes("aborted")
)
self.assertEqual(
force_bytes(inst.group[0].element[5].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(
force_bytes(inst.group[0].element[6].code), force_bytes("draft")
)
self.assertEqual(
force_bytes(inst.group[0].element[6].target[0].code), force_bytes("new")
)
self.assertEqual(
force_bytes(inst.group[0].element[6].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(
force_bytes(inst.group[0].source),
force_bytes("http://hl7.org/fhir/medication-request-status"),
)
self.assertEqual(
force_bytes(inst.group[0].target),
force_bytes("http://hl7.org/fhir/v3/ActStatus"),
)
self.assertEqual(
force_bytes(inst.id), force_bytes("cm-medication-request-status-v3")
)
self.assertEqual(
force_bytes(inst.name), force_bytes("v3 map for MedicationRequestStatus")
)
self.assertEqual(force_bytes(inst.publisher), force_bytes("HL7 (FHIR Project)"))
self.assertEqual(force_bytes(inst.status), force_bytes("draft"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(
force_bytes(inst.url),
force_bytes(
"http://hl7.org/fhir/ConceptMap/cm-medication-request-status-v3"
),
)
def testConceptMap4(self):
inst = self.instantiate_from("cm-observation-relationshiptypes-v3.json")
self.assertIsNotNone(inst, "Must have instantiated a ConceptMap instance")
self.implConceptMap4(inst)
js = inst.as_json()
self.assertEqual("ConceptMap", js["resourceType"])
inst2 = conceptmap.ConceptMap(js)
self.implConceptMap4(inst2)
def implConceptMap4(self, inst):
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].system), force_bytes("url")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].value),
force_bytes("http://hl7.org/fhir"),
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[1].system), force_bytes("email")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[1].value),
force_bytes("<EMAIL>"),
)
self.assertEqual(inst.date.date, FHIRDate("2017-04-19T07:44:43+10:00").date)
self.assertEqual(inst.date.as_json(), "2017-04-19T07:44:43+10:00")
self.assertEqual(
force_bytes(inst.group[0].element[0].code), force_bytes("has-member")
)
self.assertEqual(
force_bytes(inst.group[0].element[0].target[0].code), force_bytes("MBR")
)
self.assertEqual(
force_bytes(inst.group[0].element[0].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(
force_bytes(inst.group[0].element[1].code), force_bytes("derived-from")
)
self.assertEqual(
force_bytes(inst.group[0].element[1].target[0].code), force_bytes("DRIV")
)
self.assertEqual(
force_bytes(inst.group[0].element[1].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(
force_bytes(inst.group[0].element[2].code), force_bytes("sequel-to")
)
self.assertEqual(
force_bytes(inst.group[0].element[2].target[0].code), force_bytes("SEQL")
)
self.assertEqual(
force_bytes(inst.group[0].element[2].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(
force_bytes(inst.group[0].element[3].code), force_bytes("replaces")
)
self.assertEqual(
force_bytes(inst.group[0].element[3].target[0].code), force_bytes("RPLC")
)
self.assertEqual(
force_bytes(inst.group[0].element[3].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(
force_bytes(inst.group[0].element[4].code), force_bytes("qualified-by")
)
self.assertEqual(
force_bytes(inst.group[0].element[4].target[0].code), force_bytes("QUALF")
)
self.assertEqual(
force_bytes(inst.group[0].element[4].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(
force_bytes(inst.group[0].element[5].code), force_bytes("interfered-by")
)
self.assertEqual(
force_bytes(inst.group[0].element[5].target[0].code), force_bytes("INTF")
)
self.assertEqual(
force_bytes(inst.group[0].element[5].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(
force_bytes(inst.group[0].source),
force_bytes("http://hl7.org/fhir/observation-relationshiptypes"),
)
self.assertEqual(
force_bytes(inst.group[0].target),
force_bytes("http://hl7.org/fhir/v3/ActRelationshipType"),
)
self.assertEqual(
force_bytes(inst.id), force_bytes("cm-observation-relationshiptypes-v3")
)
self.assertEqual(
force_bytes(inst.name),
force_bytes("v3 map for ObservationRelationshipType"),
)
self.assertEqual(force_bytes(inst.publisher), force_bytes("HL7 (FHIR Project)"))
self.assertEqual(force_bytes(inst.status), force_bytes("draft"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(
force_bytes(inst.url),
force_bytes(
"http://hl7.org/fhir/ConceptMap/cm-observation-relationshiptypes-v3"
),
)
def testConceptMap5(self):
inst = self.instantiate_from("cm-composition-status-v3.json")
self.assertIsNotNone(inst, "Must have instantiated a ConceptMap instance")
self.implConceptMap5(inst)
js = inst.as_json()
self.assertEqual("ConceptMap", js["resourceType"])
inst2 = conceptmap.ConceptMap(js)
self.implConceptMap5(inst2)
def implConceptMap5(self, inst):
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].system), force_bytes("url")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].value),
force_bytes("http://hl7.org/fhir"),
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[1].system), force_bytes("email")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[1].value),
force_bytes("<EMAIL>"),
)
self.assertEqual(inst.date.date, FHIRDate("2017-04-19T07:44:43+10:00").date)
self.assertEqual(inst.date.as_json(), "2017-04-19T07:44:43+10:00")
self.assertEqual(
force_bytes(inst.group[0].element[0].code), force_bytes("preliminary")
)
self.assertEqual(
force_bytes(inst.group[0].element[0].target[0].code), force_bytes("active")
)
self.assertEqual(
force_bytes(inst.group[0].element[0].target[0].equivalence),
force_bytes("equivalent"),
)
self.assertEqual(
force_bytes(inst.group[0].element[1].code), force_bytes("final")
)
self.assertEqual(
force_bytes(inst.group[0].element[1].target[0].code),
force_bytes("completed"),
)
self.assertEqual(
force_bytes(inst.group[0].element[1].target[0].equivalence),
force_bytes("wider"),
)
self.assertEqual(
force_bytes(inst.group[0].element[2].code), force_bytes("amended")
)
self.assertEqual(
force_bytes(inst.group[0].element[2].target[0].code),
force_bytes("completed"),
)
self.assertEqual(
force_bytes(inst.group[0].element[2].target[0].equivalence),
force_bytes("wider"),
)
self.assertEqual(
force_bytes(inst.group[0].element[3].code), force_bytes("entered-in-error")
)
self.assertEqual(
force_bytes(inst.group[0].element[3].target[0].code),
force_bytes("nullified"),
)
self.assertEqual(
force_bytes(inst.group[0].element[3].target[0].equivalence),
force_bytes("equivalent"),
)
self.assertEqual(
force_bytes(inst.group[0].source),
force_bytes("http://hl7.org/fhir/composition-status"),
)
self.assertEqual(
force_bytes(inst.group[0].target),
force_bytes("http://hl7.org/fhir/v3/ActStatus"),
)
self.assertEqual(force_bytes(inst.id), force_bytes("cm-composition-status-v3"))
self.assertEqual(
force_bytes(inst.name), force_bytes("v3 map for CompositionStatus")
)
self.assertEqual(force_bytes(inst.publisher), force_bytes("HL7 (FHIR Project)"))
self.assertEqual(force_bytes(inst.status), force_bytes("draft"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(
force_bytes(inst.url),
force_bytes("http://hl7.org/fhir/ConceptMap/cm-composition-status-v3"),
)
def testConceptMap6(self):
inst = self.instantiate_from("cm-contact-point-use-v2.json")
self.assertIsNotNone(inst, "Must have instantiated a ConceptMap instance")
self.implConceptMap6(inst)
js = inst.as_json()
self.assertEqual("ConceptMap", js["resourceType"])
inst2 = conceptmap.ConceptMap(js)
self.implConceptMap6(inst2)
def implConceptMap6(self, inst):
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].system), force_bytes("url")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].value),
force_bytes("http://hl7.org/fhir"),
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[1].system), force_bytes("email")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[1].value),
force_bytes("<EMAIL>"),
)
self.assertEqual(inst.date.date, FHIRDate("2017-04-19T07:44:43+10:00").date)
self.assertEqual(inst.date.as_json(), "2017-04-19T07:44:43+10:00")
self.assertEqual(
force_bytes(inst.group[0].element[0].code), force_bytes("home")
)
self.assertEqual(
force_bytes(inst.group[0].element[0].target[0].code), force_bytes("PRN")
)
self.assertEqual(
force_bytes(inst.group[0].element[0].target[0].equivalence),
force_bytes("wider"),
)
self.assertEqual(
force_bytes(inst.group[0].element[0].target[1].code), force_bytes("ORN")
)
self.assertEqual(
force_bytes(inst.group[0].element[0].target[1].equivalence),
force_bytes("wider"),
)
self.assertEqual(
force_bytes(inst.group[0].element[0].target[2].code), force_bytes("VHN")
)
self.assertEqual(
force_bytes(inst.group[0].element[0].target[2].equivalence),
force_bytes("wider"),
)
self.assertEqual(
force_bytes(inst.group[0].element[1].code), force_bytes("work")
)
self.assertEqual(
force_bytes(inst.group[0].element[1].target[0].code), force_bytes("WPN")
)
self.assertEqual(
force_bytes(inst.group[0].element[1].target[0].equivalence),
force_bytes("equivalent"),
)
self.assertEqual(
force_bytes(inst.group[0].element[2].code), force_bytes("mobile")
)
self.assertEqual(
force_bytes(inst.group[0].element[2].target[0].code), force_bytes("PRS")
)
self.assertEqual(
force_bytes(inst.group[0].element[2].target[0].equivalence),
force_bytes("equivalent"),
)
self.assertEqual(
force_bytes(inst.group[0].source),
force_bytes("http://hl7.org/fhir/contact-point-use"),
)
self.assertEqual(
force_bytes(inst.group[0].target),
force_bytes("http://hl7.org/fhir/v2/0201"),
)
self.assertEqual(force_bytes(inst.id), force_bytes("cm-contact-point-use-v2"))
self.assertEqual(
force_bytes(inst.name), force_bytes("v2 map for ContactPointUse")
)
self.assertEqual(force_bytes(inst.publisher), force_bytes("HL7 (FHIR Project)"))
self.assertEqual(force_bytes(inst.status), force_bytes("draft"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(
force_bytes(inst.url),
force_bytes("http://hl7.org/fhir/ConceptMap/cm-contact-point-use-v2"),
)
def testConceptMap7(self):
inst = self.instantiate_from("cm-contact-point-use-v3.json")
self.assertIsNotNone(inst, "Must have instantiated a ConceptMap instance")
self.implConceptMap7(inst)
js = inst.as_json()
self.assertEqual("ConceptMap", js["resourceType"])
inst2 = conceptmap.ConceptMap(js)
self.implConceptMap7(inst2)
def implConceptMap7(self, inst):
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].system), force_bytes("url")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].value),
force_bytes("http://hl7.org/fhir"),
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[1].system), force_bytes("email")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[1].value),
force_bytes("<EMAIL>"),
)
self.assertEqual(inst.date.date, FHIRDate("2017-04-19T07:44:43+10:00").date)
self.assertEqual(inst.date.as_json(), "2017-04-19T07:44:43+10:00")
self.assertEqual(
force_bytes(inst.group[0].element[0].code), force_bytes("home")
)
self.assertEqual(
force_bytes(inst.group[0].element[0].target[0].code), force_bytes("H")
)
self.assertEqual(
force_bytes(inst.group[0].element[0].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(
force_bytes(inst.group[0].element[1].code), force_bytes("work")
)
self.assertEqual(
force_bytes(inst.group[0].element[1].target[0].code), force_bytes("WP")
)
self.assertEqual(
force_bytes(inst.group[0].element[1].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(
force_bytes(inst.group[0].element[2].code), force_bytes("temp")
)
self.assertEqual(
force_bytes(inst.group[0].element[2].target[0].code), force_bytes("TMP")
)
self.assertEqual(
force_bytes(inst.group[0].element[2].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(force_bytes(inst.group[0].element[3].code), force_bytes("old"))
self.assertEqual(
force_bytes(inst.group[0].element[3].target[0].code), force_bytes("OLD")
)
self.assertEqual(
force_bytes(inst.group[0].element[3].target[0].comment),
force_bytes("Old and Bad"),
)
self.assertEqual(
force_bytes(inst.group[0].element[3].target[0].equivalence),
force_bytes("narrower"),
)
self.assertEqual(
force_bytes(inst.group[0].element[3].target[1].code), force_bytes("BAD")
)
self.assertEqual(
force_bytes(inst.group[0].element[3].target[1].comment),
force_bytes("Old and Bad"),
)
self.assertEqual(
force_bytes(inst.group[0].element[3].target[1].equivalence),
force_bytes("narrower"),
)
self.assertEqual(
force_bytes(inst.group[0].element[4].code), force_bytes("mobile")
)
self.assertEqual(
force_bytes(inst.group[0].element[4].target[0].code), force_bytes("MC")
)
self.assertEqual(
force_bytes(inst.group[0].element[4].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(
force_bytes(inst.group[0].source),
force_bytes("http://hl7.org/fhir/contact-point-use"),
)
self.assertEqual(
force_bytes(inst.group[0].target),
force_bytes("http://hl7.org/fhir/v3/AddressUse"),
)
self.assertEqual(force_bytes(inst.id), force_bytes("cm-contact-point-use-v3"))
self.assertEqual(
force_bytes(inst.name), force_bytes("v3 map for ContactPointUse")
)
self.assertEqual(force_bytes(inst.publisher), force_bytes("HL7 (FHIR Project)"))
self.assertEqual(force_bytes(inst.status), force_bytes("draft"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(
force_bytes(inst.url),
force_bytes("http://hl7.org/fhir/ConceptMap/cm-contact-point-use-v3"),
)
def testConceptMap8(self):
inst = self.instantiate_from("cm-address-use-v2.json")
self.assertIsNotNone(inst, "Must have instantiated a ConceptMap instance")
self.implConceptMap8(inst)
js = inst.as_json()
self.assertEqual("ConceptMap", js["resourceType"])
inst2 = conceptmap.ConceptMap(js)
self.implConceptMap8(inst2)
def implConceptMap8(self, inst):
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].system), force_bytes("url")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].value),
force_bytes("http://hl7.org/fhir"),
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[1].system), force_bytes("email")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[1].value),
force_bytes("<EMAIL>"),
)
self.assertEqual(inst.date.date, FHIRDate("2017-04-19T07:44:43+10:00").date)
self.assertEqual(inst.date.as_json(), "2017-04-19T07:44:43+10:00")
self.assertEqual(
force_bytes(inst.group[0].element[0].code), force_bytes("home")
)
self.assertEqual(
force_bytes(inst.group[0].element[0].target[0].code), force_bytes("H")
)
self.assertEqual(
force_bytes(inst.group[0].element[0].target[0].equivalence),
force_bytes("equivalent"),
)
self.assertEqual(
force_bytes(inst.group[0].element[1].code), force_bytes("work")
)
self.assertEqual(
force_bytes(inst.group[0].element[1].target[0].code), force_bytes("O")
)
self.assertEqual(
force_bytes(inst.group[0].element[1].target[0].equivalence),
force_bytes("equivalent"),
)
self.assertEqual(
force_bytes(inst.group[0].element[2].code), force_bytes("temp")
)
self.assertEqual(
force_bytes(inst.group[0].element[2].target[0].code), force_bytes("C")
)
self.assertEqual(
force_bytes(inst.group[0].element[2].target[0].equivalence),
force_bytes("equivalent"),
)
self.assertEqual(force_bytes(inst.group[0].element[3].code), force_bytes("old"))
self.assertEqual(
force_bytes(inst.group[0].element[3].target[0].code), force_bytes("BA")
)
self.assertEqual(
force_bytes(inst.group[0].element[3].target[0].comment),
force_bytes("unclear about old addresses"),
)
self.assertEqual(
force_bytes(inst.group[0].element[3].target[0].equivalence),
force_bytes("wider"),
)
self.assertEqual(
force_bytes(inst.group[0].source),
force_bytes("http://hl7.org/fhir/address-use"),
)
self.assertEqual(
force_bytes(inst.group[0].target),
force_bytes("http://hl7.org/fhir/v2/0190"),
)
self.assertEqual(force_bytes(inst.id), force_bytes("cm-address-use-v2"))
self.assertEqual(force_bytes(inst.name), force_bytes("v2 map for AddressUse"))
self.assertEqual(force_bytes(inst.publisher), force_bytes("HL7 (FHIR Project)"))
self.assertEqual(force_bytes(inst.status), force_bytes("draft"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(
force_bytes(inst.url),
force_bytes("http://hl7.org/fhir/ConceptMap/cm-address-use-v2"),
)
def testConceptMap9(self):
inst = self.instantiate_from("cm-detectedissue-severity-v3.json")
self.assertIsNotNone(inst, "Must have instantiated a ConceptMap instance")
self.implConceptMap9(inst)
js = inst.as_json()
self.assertEqual("ConceptMap", js["resourceType"])
inst2 = conceptmap.ConceptMap(js)
self.implConceptMap9(inst2)
def implConceptMap9(self, inst):
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].system), force_bytes("url")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].value),
force_bytes("http://hl7.org/fhir"),
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[1].system), force_bytes("email")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[1].value),
force_bytes("<EMAIL>"),
)
self.assertEqual(inst.date.date, FHIRDate("2017-04-19T07:44:43+10:00").date)
self.assertEqual(inst.date.as_json(), "2017-04-19T07:44:43+10:00")
self.assertEqual(
force_bytes(inst.group[0].element[0].code), force_bytes("high")
)
self.assertEqual(
force_bytes(inst.group[0].element[0].target[0].code), force_bytes("H")
)
self.assertEqual(
force_bytes(inst.group[0].element[0].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(
force_bytes(inst.group[0].element[1].code), force_bytes("moderate")
)
self.assertEqual(
force_bytes(inst.group[0].element[1].target[0].code), force_bytes("M")
)
self.assertEqual(
force_bytes(inst.group[0].element[1].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(force_bytes(inst.group[0].element[2].code), force_bytes("low"))
self.assertEqual(
force_bytes(inst.group[0].element[2].target[0].code), force_bytes("L")
)
self.assertEqual(
force_bytes(inst.group[0].element[2].target[0].equivalence),
force_bytes("equal"),
)
self.assertEqual(
force_bytes(inst.group[0].source),
force_bytes("http://hl7.org/fhir/detectedissue-severity"),
)
self.assertEqual(
force_bytes(inst.group[0].target),
force_bytes("http://hl7.org/fhir/v3/ObservationValue"),
)
self.assertEqual(
force_bytes(inst.id), force_bytes("cm-detectedissue-severity-v3")
)
self.assertEqual(
force_bytes(inst.name), force_bytes("v3 map for DetectedIssueSeverity")
)
self.assertEqual(force_bytes(inst.publisher), force_bytes("HL7 (FHIR Project)"))
self.assertEqual(force_bytes(inst.status), force_bytes("draft"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(
force_bytes(inst.url),
force_bytes("http://hl7.org/fhir/ConceptMap/cm-detectedissue-severity-v3"),
)
def testConceptMap10(self):
inst = self.instantiate_from("conceptmap-example.json")
self.assertIsNotNone(inst, "Must have instantiated a ConceptMap | |
<filename>src/static.py<gh_stars>0
from collections import OrderedDict
from tempfile import NamedTemporaryFile
from warnings import warn
from datetime import date
from ftplib import FTP
import libarchive.public
import requests
import json
import csv
import re
import os
from .utils_static import *
from .parser import Parser
from .utils import *
# List of rail stops used by S× lines. Other rail stops are ignored.
ACTIVE_RAIL_STATIONS = {
"4900", "4901", "7900", "7901", "7902", "2901", "2900", "2918", "2917", "2916", "2915",
"2909", "2908", "2907", "2906", "2905", "2904", "2903", "2902", "4902", "4903", "4923",
"4904", "4905", "2914", "2913", "2912", "2911", "2910", "4919", "3901", "4918", "4917",
"4913", "1910", "1909", "1908", "1907", "1906", "1905", "1904", "1903", "1902", "1901",
"7903", "5908", "5907", "5904", "5903", "5902"
}
PROPER_STOP_NAMES = {
"4040": "<NAME>", "1484": "<NAME>",
"2005": "Praga-Płd. - Ratusz", "1541": "<NAME>",
"5001": "Połczyńska - Parking P+R", "2296": "<NAME>",
"6201": "<NAME>", "1226": "Mańki-Wojody",
}
class Converter:
def __init__(self, version="", shapes=False, clear_shape_errors=True):
clear_directory("gtfs")
if clear_shape_errors: clear_directory("shape-errors")
# Stop info
self.missing_stops = requests.get("https://gist.githubusercontent.com/MKuranowski/0ca97a012d541899cb1f859cd0bab2e7/raw/missing_stops.json").json()
self.rail_platforms = requests.get("https://gist.githubusercontent.com/MKuranowski/0ca97a012d541899cb1f859cd0bab2e7/raw/rail_platforms.json").json()
self.incorrect_stops = []
self.unused_stops = list(self.missing_stops.keys())
self.stops_map = {}
self.stop_names = PROPER_STOP_NAMES.copy()
# File handler
self.version = None
self.reader = None
self.parser = None
# Get shape generator instance
if isinstance(shapes, Shaper):
self.shapes = shapes
self.shapes.open()
elif shapes:
self.shapes = Shaper()
self.shapes.open()
else:
self.shapes = None
self.get_file(version)
def get_file(self, version):
"Download and decompress schedules for current data. Returns tuple (TemporaryFile, version) - and that TemporaryFile is decompressed .TXT file"
# Login to ZTM server and get the list of files
server = FTP("rozklady.ztm.waw.pl")
server.login()
files = [f for f in server.nlst() if re.fullmatch(r"RA\d{6}\.7z", f)]
# If user has requested an exact version, check if it's on the server
if version:
fname = "{}.7z".format(version)
if fname not in files:
raise KeyError("Requested file version ({}) not found on ZTM server".format(version))
# If not, find one valid today
else:
fdate = date.today()
while True:
fname = fdate.strftime("RA%y%m%d.7z")
if fname in files: break
else: fdate -= timedelta(days=1)
# Create temporary files for storing th 7z archive and the compressed TXT file
temp_arch = NamedTemporaryFile(mode="w+b", delete=False)
self.reader = NamedTemporaryFile(mode="w+t", delete=True)
try:
# Download the file
server.retrbinary("RETR " + fname, temp_arch.write)
server.quit()
temp_arch.close()
# Open the temporary archive inside
with libarchive.public.file_reader(temp_arch.name) as arch:
# Iterate over each file inside the archive
for arch_file in arch:
# Assert the file inside the archive is the TXT file we're looking for
name_match = re.fullmatch(r"(RA\d{6})\.TXT", arch_file.pathname, flags=re.IGNORECASE)
if not name_match:
continue
# Save the feed version
self.version = name_match[1].upper()
# Decompress the TXT file block by block and save it to the reader
for block in arch_file.get_blocks():
self.reader.write(str(block, "cp1250"))
self.reader.seek(0)
# only one TXT file should be inside the archive
break
else:
raise FileNotFoundError("no schedule file found inside archive {}".format(fname))
# Remove the temp arch file at the end
finally:
os.remove(temp_arch.name)
self.parser = Parser(self.reader)
def calendar(self):
file = open("gtfs/calendar_dates.txt", mode="w", encoding="utf8", newline="")
writer = csv.writer(file)
writer.writerow(["service_id", "date", "exception_type"])
print("\033[1A\033[K" + "Parsing calendars (KA)")
for day in self.parser.parse_ka():
for service_id in day["services"]:
writer.writerow([service_id, day["date"], "1"])
file.close()
def _stopgroup_railway(self, writer, group_id, group_name):
# Load ZTM stakes from PR section
# self.parser.parse_pr() has to be called to skip to the next entry in ZP
stakes = list(self.parser.parse_pr())
# If group is not in ACTIVE_RAIL_STATIONS, ignore it
if group_id not in ACTIVE_RAIL_STATIONS:
for s in stakes: self.stops_map[s["id"]] = None
return
# Basic info about the station
station_info = self.rail_platforms.get(group_id, {})
# If this station is not in rail_platforms, average all stake positions
# In order to calculate an approx position of the station
if not station_info:
stake_positions = [(i["lat"], i["lon"]) for i in stakes]
stake_positions = [i for i in stake_positions if i[0] and i[1]]
if stake_positions:
station_lat, station_lon = avg_position(stake_positions)
# No position for the station
else:
for s in stakes: self.stops_map[s["id"]] = None
self.incorrect_stops.append(group_id)
return
# Otherwise get the position from rail_platforms data
else:
station_lat, station_lon = map(float, station_info["pos"].split(","))
group_name = station_info["name"]
# One Platform or No Platform data
if (not station_info) or station_info["oneplatform"]:
# Save position for shapes
if self.shapes:
self.shapes.stops[group_id] = station_lat, station_lon
# Add info for stops_map
for stake in stakes:
self.stops_map[stake["id"]] = group_id
# Output info to GTFS
writer.writerow([
group_id, group_name, station_lat, station_lon,
"", "", station_info.get("ibnr_code", ""),
"", station_info.get("wheelchair", 0),
])
# Multi-Platform station
else:
# Hub entry
writer.writerow([
group_id, group_name, station_lat, station_lon,
"1", "", station_info["ibnr_code"],
"", station_info.get("wheelchair", 0),
])
# Platforms
for platform_id, platform_pos in station_info["platforms"].items():
platform_lat, platform_lon = map(float, platform_pos.split(","))
platform_code = platform_id.split("p")[1]
platform_name = f"{group_name} peron {platform_code}"
# Save position for shapes
if self.shapes:
self.shapes.stops[platform_id] = platform_lat, platform_lon
# Output to GTFS
writer.writerow([
platform_id, platform_name, platform_lat, platform_lon,
"0", group_id, station_info["ibnr_code"],
platform_code, station_info.get("wheelchair", 0),
])
# Stops → Platforms
for stake in stakes:
# Defined stake in rail_platforms
if stake["id"] in station_info["stops"]:
self.stops_map[stake["id"]] = station_info["stops"][stake["id"]]
# Unknown stake
elif stake["id"] not in {"491303", "491304"}:
warn(f'No platform defined for railway PR entry {group_name} {stake["id"]}')
def _stopgroup_normal(self, writer, group_id, group_name):
# Load ZTM stakes from PR section
# self.parser.parse_pr() has to be called to skip to the next entry in ZP
stakes = list(self.parser.parse_pr())
# Split virtual stakes from normal stakes
virtual_stakes = [i for i in stakes if i["code"][0] == "8"]
normal_stakes = [i for i in stakes if i["code"][0] != "8"]
# Load positions from missing_stops to normal_stakes
for idx, stake in enumerate(normal_stakes):
if (stake["lat"] == None or stake["lon"] == None) and \
stake["id"] in self.missing_stops:
self.unused_stops.remove(stake["id"])
stake["lat"], stake["lon"] = self.missing_stops[stake["id"]]
normal_stakes[idx] = stake
position_stakes = [i for i in normal_stakes if i["lat"] and i["lon"]]
# Convert normal stakes
for stake in normal_stakes:
# Position defined
if stake["lat"] and stake["lon"]:
# Save position for shapes
if self.shapes:
self.shapes.stops[stake["id"]] = stake["lat"], stake["lon"]
# Output info to GTFS
writer.writerow([
stake["id"], f'{group_name} {stake["code"]}',
stake["lat"], stake["lon"],
"", "", "", "", stake["wheelchair"],
])
# Position undefined
else:
self.stops_map[stake["id"]] = None
self.incorrect_stops.append(stake["id"])
# Convert virtual stops
for stake in virtual_stakes:
stakes_with_same_pos = [i["id"] for i in position_stakes if \
(i["lat"], i["lon"]) == (stake["lat"], stake["lon"])]
stakes_with_same_code = [i["id"] for i in position_stakes if \
i["code"][1] == stake["code"][1]]
# Metro Młociny 88 → Metro Młociny 28
if stake["id"] == "605988":
counterpart_available = [i for i in position_stakes if \
i["id"] == "605928"]
# If 605928 is present, map 605988 to it.
# Otherwise fallback on defualt maching options
if counterpart_available:
self.stops_map["605988"] = "605928"
continue
# Map to a stake with same position
if stakes_with_same_pos:
self.stops_map[stake["id"]] = stakes_with_same_pos[0]
# Map to a stake with same digit
elif stakes_with_same_code:
self.stops_map[stake["id"]] = stakes_with_same_code[0]
# Unable find a matching stake
else:
self.stops_map[stake["id"]] = None
self.incorrect_stops.append(stake["id"])
def stops(self):
file = open("gtfs/stops.txt", mode="w", encoding="utf8", newline="")
writer = csv.writer(file)
writer.writerow(["stop_id", "stop_name", "stop_lat", "stop_lon", "location_type", "parent_station", "stop_IBNR", "platform_code", "wheelchair_boarding"])
print("\033[1A\033[K" + "Parsing stops (ZP)")
for group in self.parser.parse_zp():
# Fix town name for Kampinoski PN
if group["town"] == "Kampinoski Pn":
group["town"] = "Kampinoski PN"
# Add name to self.stop_names if it's missing
if group["id"] not in self.stop_names:
group["name"] = normal_stop_name(group["name"])
self.stop_names[group["id"]] = group["name"]
else:
group["name"] = self.stop_names[group["id"]]
# Add town name to stop name
if should_town_be_added_to_name(group):
group["name"] = f'{group["town"]} {group["name"]}'
self.stop_names[group["id"]] = group["name"]
# Parse stakes
if group["id"][1:3] in {"90", "91", "92"}:
self._stopgroup_railway(writer, group["id"], group["name"])
else:
self._stopgroup_normal(writer, group["id"], group["name"])
file.close()
def routes_schedules(self):
file_routes = open("gtfs/routes.txt", mode="w", encoding="utf8", newline="")
writer_routes = csv.writer(file_routes)
writer_routes.writerow(["agency_id", "route_id", "route_short_name", "route_long_name", "route_type", "route_color", "route_text_color", "route_sort_order"])
file_trips = open("gtfs/trips.txt", mode="w", encoding="utf8", newline="")
writer_trips = csv.writer(file_trips)
writer_trips.writerow(["route_id", "service_id", "trip_id", "trip_headsign", "direction_id", "shape_id", "exceptional", "wheelchair_accessible", "bikes_allowed"])
file_times = open("gtfs/stop_times.txt", mode="w", encoding="utf8", newline="")
writer_times = csv.writer(file_times)
writer_times.writerow(["trip_id", "arrival_time", "departure_time", "stop_id", "stop_sequence", "pickup_type", "drop_off_type", "shape_dist_traveled"])
route_sort_order = 1 # Leave first 2 blank for M1 and M2 routes
route_id = None
print("\033[1A\033[K" + "Parsing routes & schedules (LL)")
for route in self.parser.parse_ll():
route_id, route_desc = route["id"], route["desc"]
# Ignore Koleje Mazowieckie & Warszawska Kolej Dojazdowa routes
if route_id.startswith("R") or route_id.startswith("WKD"):
self.parser.skip_to_section("WK", end=True)
continue
print("\033[1A\033[K" + f"Parsing routes & schedules (LL) - {route_id}")
route_sort_order += 1
route_type, route_color, route_text_color = route_color_type(route_id, route_desc)
# Data loaded from TR section
route_name = ""
direction_stops = {"0": set(), "1": set()}
on_demand_stops = set()
inaccesible_trips = set()
variant_directions = | |
in (get_previous_epoch(state), get_current_epoch(state))
assert data.target.epoch == compute_epoch_at_slot(data.slot)
assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH
committee = get_beacon_committee(state, data.slot, data.index)
assert len(attestation.aggregation_bits) == len(committee)
pending_attestation = PendingAttestation(
data=data,
aggregation_bits=attestation.aggregation_bits,
inclusion_delay=state.slot - data.slot,
proposer_index=get_beacon_proposer_index(state),
)
if data.target.epoch == get_current_epoch(state):
assert data.source == state.current_justified_checkpoint
state.current_epoch_attestations.append(pending_attestation)
else:
assert data.source == state.previous_justified_checkpoint
state.previous_epoch_attestations.append(pending_attestation)
# Verify signature
# assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation))
def process_deposit(state: BeaconState, deposit: Deposit) -> None:
# Verify the Merkle branch
# assert is_valid_merkle_branch(
# leaf=hash_tree_root(deposit.data),
# branch=deposit.proof,
# depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in
# index=state.eth1_deposit_index,
# root=state.eth1_data.deposit_root,
# )
# Deposits must be processed in order
state.eth1_deposit_index += 1
pubkey = deposit.data.pubkey
amount = deposit.data.amount
validator_pubkeys = [v.pubkey for v in state.validators]
if pubkey not in validator_pubkeys:
# Verify the deposit signature (proof of possession) which is not checked by the deposit contract
deposit_message = DepositMessage(
pubkey=deposit.data.pubkey,
withdrawal_credentials=deposit.data.withdrawal_credentials,
amount=deposit.data.amount,
)
domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks
signing_root = compute_signing_root(deposit_message, domain)
if not bls.Verify(pubkey, signing_root, deposit.data.signature):
return
# Add validator and balance entries
state.validators.append(Validator(
pubkey=pubkey,
withdrawal_credentials=deposit.data.withdrawal_credentials,
activation_eligibility_epoch=FAR_FUTURE_EPOCH,
activation_epoch=FAR_FUTURE_EPOCH,
exit_epoch=FAR_FUTURE_EPOCH,
withdrawable_epoch=FAR_FUTURE_EPOCH,
effective_balance=min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE),
))
state.balances.append(amount)
else:
# Increase balance by deposit amount
index = ValidatorIndex(validator_pubkeys.index(pubkey))
increase_balance(state, index, amount)
def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None:
voluntary_exit = signed_voluntary_exit.message
validator = state.validators[voluntary_exit.validator_index]
# Verify the validator is active
assert is_active_validator(validator, get_current_epoch(state))
# Verify exit has not been initiated
assert validator.exit_epoch == FAR_FUTURE_EPOCH
# Exits must specify an epoch when they become valid; they are not valid before then
assert get_current_epoch(state) >= voluntary_exit.epoch
# Verify the validator has been active long enough
assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD
# Verify signature
domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch)
signing_root = compute_signing_root(voluntary_exit, domain)
assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature)
# Initiate exit
initiate_validator_exit(state, voluntary_exit.validator_index)
@dataclass(eq=True, frozen=True)
class LatestMessage(object):
epoch: Epoch
root: Root
@dataclass
class Store(object):
time: uint64
genesis_time: uint64
justified_checkpoint: Checkpoint
finalized_checkpoint: Checkpoint
best_justified_checkpoint: Checkpoint
blocks: Dict[Root, BeaconBlock] = field(default_factory=dict)
block_states: Dict[Root, BeaconState] = field(default_factory=dict)
checkpoint_states: Dict[Checkpoint, BeaconState] = field(default_factory=dict)
latest_messages: Dict[ValidatorIndex, LatestMessage] = field(default_factory=dict)
def get_forkchoice_store(anchor_state: BeaconState) -> Store:
anchor_block_header = anchor_state.latest_block_header.copy()
if anchor_block_header.state_root == Bytes32():
anchor_block_header.state_root = hash_tree_root(anchor_state)
anchor_root = hash_tree_root(anchor_block_header)
anchor_epoch = get_current_epoch(anchor_state)
justified_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root)
finalized_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root)
return Store(
time=anchor_state.genesis_time + SECONDS_PER_SLOT * anchor_state.slot,
genesis_time=anchor_state.genesis_time,
justified_checkpoint=justified_checkpoint,
finalized_checkpoint=finalized_checkpoint,
best_justified_checkpoint=justified_checkpoint,
blocks={anchor_root: anchor_block_header},
block_states={anchor_root: anchor_state.copy()},
checkpoint_states={justified_checkpoint: anchor_state.copy()},
)
def get_slots_since_genesis(store: Store) -> int:
return (store.time - store.genesis_time) // SECONDS_PER_SLOT
def get_current_slot(store: Store) -> Slot:
return Slot(GENESIS_SLOT + get_slots_since_genesis(store))
def compute_slots_since_epoch_start(slot: Slot) -> int:
return slot - compute_start_slot_at_epoch(compute_epoch_at_slot(slot))
def get_ancestor(store: Store, root: Root, slot: Slot) -> Root:
block = store.blocks[root]
if block.slot > slot:
return get_ancestor(store, block.parent_root, slot)
elif block.slot == slot:
return root
else:
# root is older than queried slot, thus a skip slot. Return earliest root prior to slot
return root
def get_latest_attesting_balance(store: Store, root: Root) -> Gwei:
state = store.checkpoint_states[store.justified_checkpoint]
active_indices = get_active_validator_indices(state, get_current_epoch(state))
return Gwei(sum(
state.validators[i].effective_balance for i in active_indices
if (i in store.latest_messages
and get_ancestor(store, store.latest_messages[i].root, store.blocks[root].slot) == root)
))
def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconBlock]) -> bool:
block = store.blocks[block_root]
children = [
root for root in store.blocks.keys()
if store.blocks[root].parent_root == block_root
]
# If any children branches contain expected finalized/justified checkpoints,
# add to filtered block-tree and signal viability to parent.
if any(children):
filter_block_tree_result = [filter_block_tree(store, child, blocks) for child in children]
if any(filter_block_tree_result):
blocks[block_root] = block
return True
return False
# If leaf block, check finalized/justified checkpoints as matching latest.
head_state = store.block_states[block_root]
correct_justified = (
store.justified_checkpoint.epoch == GENESIS_EPOCH
or head_state.current_justified_checkpoint == store.justified_checkpoint
)
correct_finalized = (
store.finalized_checkpoint.epoch == GENESIS_EPOCH
or head_state.finalized_checkpoint == store.finalized_checkpoint
)
# If expected finalized/justified, add to viable block-tree and signal viability to parent.
if correct_justified and correct_finalized:
blocks[block_root] = block
return True
# Otherwise, branch not viable
return False
def get_filtered_block_tree(store: Store) -> Dict[Root, BeaconBlock]:
"""
Retrieve a filtered block tree from ``store``, only returning branches
whose leaf state's justified/finalized info agrees with that in ``store``.
"""
base = store.justified_checkpoint.root
blocks: Dict[Root, BeaconBlock] = {}
filter_block_tree(store, base, blocks)
return blocks
def get_head(store: Store) -> Root:
# Get filtered block tree that only includes viable branches
blocks = get_filtered_block_tree(store)
# Execute the LMD-GHOST fork choice
head = store.justified_checkpoint.root
justified_slot = compute_start_slot_at_epoch(store.justified_checkpoint.epoch)
while True:
children = [
root for root in blocks.keys()
if blocks[root].parent_root == head and blocks[root].slot > justified_slot
]
if len(children) == 0:
return head
# Sort by latest attesting balance with ties broken lexicographically
head = max(children, key=lambda root: (get_latest_attesting_balance(store, root), root))
def should_update_justified_checkpoint(store: Store, new_justified_checkpoint: Checkpoint) -> bool:
"""
To address the bouncing attack, only update conflicting justified
checkpoints in the fork choice if in the early slots of the epoch.
Otherwise, delay incorporation of new justified checkpoint until next epoch boundary.
See https://ethresear.ch/t/prevention-of-bouncing-attack-on-ffg/6114 for more detailed analysis and discussion.
"""
if compute_slots_since_epoch_start(get_current_slot(store)) < SAFE_SLOTS_TO_UPDATE_JUSTIFIED:
return True
justified_slot = compute_start_slot_at_epoch(store.justified_checkpoint.epoch)
if not get_ancestor(store, new_justified_checkpoint.root, justified_slot) == store.justified_checkpoint.root:
return False
return True
def validate_on_attestation(store: Store, attestation: Attestation) -> None:
target = attestation.data.target
# Attestations must be from the current or previous epoch
current_epoch = compute_epoch_at_slot(get_current_slot(store))
# Use GENESIS_EPOCH for previous when genesis to avoid underflow
previous_epoch = current_epoch - 1 if current_epoch > GENESIS_EPOCH else GENESIS_EPOCH
# If attestation target is from a future epoch, delay consideration until the epoch arrives
assert target.epoch in [current_epoch, previous_epoch]
assert target.epoch == compute_epoch_at_slot(attestation.data.slot)
# Attestations target be for a known block. If target block is unknown, delay consideration until the block is found
assert target.root in store.blocks
# Attestations must be for a known block. If block is unknown, delay consideration until the block is found
assert attestation.data.beacon_block_root in store.blocks
# Attestations must not be for blocks in the future. If not, the attestation should not be considered
assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot
# FFG and LMD vote must be consistent with each other
target_slot = compute_start_slot_at_epoch(target.epoch)
assert target.root == get_ancestor(store, attestation.data.beacon_block_root, target_slot)
# Attestations can only affect the fork choice of subsequent slots.
# Delay consideration in the fork choice until their slot is in the past.
assert get_current_slot(store) >= attestation.data.slot + 1
def store_target_checkpoint_state(store: Store, target: Checkpoint) -> None:
# Store target checkpoint state if not yet seen
if target not in store.checkpoint_states:
base_state = store.block_states[target.root].copy()
# process_slots(base_state, compute_start_slot_at_epoch(target.epoch))
store.checkpoint_states[target] = base_state
def update_latest_messages(store: Store, attesting_indices: Sequence[ValidatorIndex], attestation: Attestation) -> None:
target = attestation.data.target
beacon_block_root = attestation.data.beacon_block_root
for i in attesting_indices:
if i not in store.latest_messages or target.epoch > store.latest_messages[i].epoch:
store.latest_messages[i] = LatestMessage(epoch=target.epoch, root=beacon_block_root)
def on_tick(store: Store, time: uint64) -> None:
previous_slot = get_current_slot(store)
# update store time
store.time = time
current_slot = get_current_slot(store)
# Not a new epoch, return
if not (current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0):
return
# Update store.justified_checkpoint if a better checkpoint is known
if store.best_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
store.justified_checkpoint = store.best_justified_checkpoint
def on_block(store: Store, signed_block: SignedBeaconBlock, state: BeaconState = None) -> None:
block = signed_block.message
# Make a copy of the state to avoid mutability issues
assert block.parent_root in store.block_states, "No parent in store"
pre_state = store.block_states[block.parent_root].copy()
# Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past.
assert get_current_slot(store) >= block.slot, "Block in the future"
# Add new block to the store
store.blocks[hash_tree_root(block)] = block
# Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor)
finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
assert block.slot > finalized_slot, "Block slot earlier than finalized epoch slot"
# Check block is a descendant of the finalized block at the checkpoint finalized slot
assert get_ancestor(store, hash_tree_root(block), finalized_slot) == store.finalized_checkpoint.root, "Block not a descendant of the finalized block at the checkpoint finalized slot"
# Check the block is valid and compute the post-state
if state is None:
state = state_transition(pre_state, signed_block, True)
else:
process_block(state, signed_block.message)
# Add new state for this block to the store
store.block_states[hash_tree_root(block)] = state
# Update justified checkpoint
if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
store.best_justified_checkpoint = state.current_justified_checkpoint
if should_update_justified_checkpoint(store, state.current_justified_checkpoint):
store.justified_checkpoint = state.current_justified_checkpoint
# Update finalized checkpoint
if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
| |
<gh_stars>1-10
# RNN implementation inspired by https://github.com/philipperemy/tensorflow-ctc-speech-recognition
import argparse
import collections
import os
import random
import time
from math import ceil
from os.path import join
import numpy as np
import tensorflow as tf
from constants import TRAIN_ROOT
from corpus.corpus_segment import Speech
from util.audio_util import distort, shift
from util.corpus_util import get_corpus
from util.log_util import *
from util.plot_util import visualize_cost
from util.rnn_util import CHAR_TOKENS, decode, FileLogger, encode, pad_sequences, sparse_tuple_from
from util.train_util import get_poc, get_target_dir, get_num_features
# -------------------------------------------------------------
# Constants, defaults and env-vars
# -------------------------------------------------------------
BATCH_SIZE = 50
MAX_EPOCHS = 1000 # number of epochs to train on
LER_CONVERGENCE = 0.05 # LER value for convergence (average over last 10 epochs)
MAX_SHIFT = 2000 # maximum number of frames to shift the audio
FEATURE_TYPE = 'mfcc'
SYNTHESIZE = False
os.environ['CUDA_VISIBLE_DEVICES'] = "2"
# -------------------------------------------------------------
# CLI arguments
# -------------------------------------------------------------
parser = argparse.ArgumentParser(description="""Train RNN with CTC cost function for speech recognition""")
parser.add_argument('-p', '--poc', type=str, nargs='?',
help='(optional) PoC # to train. If used, a preset choice of parameters is used.')
parser.add_argument('-c', '--corpus', type=str, choices=['rl', 'ls'],
help='corpus on which to train the RNN (rl=ReadyLingua, ls=LibriSpeech')
parser.add_argument('-l', '--language', type=str,
help='language on which to train the RNN')
parser.add_argument('-b', '--batch_size', type=int, nargs='?', default=BATCH_SIZE,
help=f'(optional) number of speech segments to include in one batch (default:{BATCH_SIZE})')
parser.add_argument('-f', '--feature_type', type=str, nargs='?', choices=['mfcc', 'mel', 'pow'], default='mfcc',
help=f'(optional) features to use for training (default: {FEATURE_TYPE})')
parser.add_argument('-id', '--id', type=str, nargs='?',
help='(optional) specify ID of single corpus entry on which to train on')
parser.add_argument('-ix', '--ix', type=str, nargs='?',
help='(optional) specify index of single corpus entry on which to train on')
parser.add_argument('-s', '--synthesize', action='store_true', default=SYNTHESIZE,
help=f'(optional) synthesize audio for training by adding distortion (default: {SYNTHESIZE})')
parser.add_argument('-t', '--target_root', type=str, nargs='?', default=TRAIN_ROOT,
help=f'(optional) root directory where results will be written to (default: {TRAIN_ROOT})')
parser.add_argument('-e', '--num_epochs', type=int, nargs='?', default=MAX_EPOCHS,
help=f'(optional) number of epochs to train the model (default: {MAX_EPOCHS})')
parser.add_argument('-le', '--limit_entries', type=int, nargs='?',
help='(optional) number of corpus entries from training set to use for training (default: all)')
parser.add_argument('-ls', '--limit_segments', type=int, nargs='?',
help='(optional) number of aligned speech segments to use per corpus entry (default: all)')
args = get_poc(parser.parse_args())
# -------------------------------------------------------------
# Other values
# -------------------------------------------------------------
num_classes = len(CHAR_TOKENS) + 2 # 26 lowercase ASCII chars + space + blank = 28 labels
num_hidden = 100
num_layers = 1
def main():
target_dir = get_target_dir('RNN', args)
print(f'Results will be written to: {target_dir}')
log_file_path = join(target_dir, 'train.log')
redirect_to_file(log_file_path)
print(create_args_str(args))
num_features = get_num_features(args.feature_type)
corpus = get_corpus(args.corpus)
train_set, dev_set, test_set = create_train_dev_test(corpus)
model_parms = create_model(num_features)
train_poc(model_parms, target_dir, train_set, dev_set)
fig_ctc, fig_ler, _ = visualize_cost(target_dir, args)
fig_ctc.savefig(join(target_dir, f'poc{args.poc}_ctc.png'), bbox_inches='tight')
fig_ler.savefig(join(target_dir, f'poc{args.poc}_ler.png'), bbox_inches='tight')
def create_train_dev_test(corpus):
repeat_sample = None
if args.id is not None:
if args.id not in corpus.keys:
print(f'Error: no entry with id={args.id} found!')
return exit()
print(f'training on corpus entry with id={args.id}')
repeat_sample = corpus[args.id]
if args.ix is not None:
if args.ix > len(corpus):
print(f'Error: {args.id} exceeds corpus bounds ({len(corpus)} entries)!')
return exit()
print(f'training on corpus entry with index={args.ix}')
repeat_sample = corpus[args.ix]
# create train/dev/test set by repeating first n speech segments from the same sample
if not repeat_sample:
raise ValueError(f'no corpus entry with index={args.ix} or id={args.id} found!')
speech_segments = repeat_sample.speech_segments_not_numeric[:args.limit_segments]
# use those segments also for validation and testing (will be randomly distorted after each epoch)
dev_set = speech_segments
test_set = speech_segments
# augment training data with synthesized speech segments if neccessary
if args.synthesize:
# add distorted variants of the original speech segments as synthesized training data
synthesized_segments = []
for speech_segment in speech_segments:
speech = Speech(speech_segment.start_frame, speech_segment.end_frame)
speech.corpus_entry = speech_segment.corpus_entry
speech.audio = distort(speech_segment.audio, speech_segment.rate, tempo=True)
speech.transcript = speech_segment.transcript
synthesized_segments.append(speech)
speech_segments += synthesized_segments
train_set = speech_segments
return train_set, dev_set, test_set
def create_model(num_features):
print('creating model')
graph = tf.Graph()
with graph.as_default():
# Input sequences: Has size [batch_size, max_step_size, num_features], but the batch_size and max_step_size
# can vary along each step
inputs = tf.placeholder(tf.float32, [None, None, num_features], name='input')
# Here we use sparse_placeholder that will generate a SparseTensor required by ctc_loss op.
targets = tf.sparse_placeholder(tf.int32, name='targets')
# Sequence length: 1d array of size [batch_size]
seq_len = tf.placeholder(tf.int32, [None], name='seq_len')
# single LSTM cell
cell = tf.contrib.rnn.LSTMCell(num_hidden, state_is_tuple=True)
# Stacking rnn cells
stack = tf.contrib.rnn.MultiRNNCell([cell] * num_layers, state_is_tuple=True)
# The second output is the last state and we will not use that
outputs, _ = tf.nn.dynamic_rnn(stack, inputs, seq_len, dtype=tf.float32)
shape = tf.shape(inputs)
batch_s, max_time_steps = shape[0], shape[1]
# Reshaping to apply the same weights over the timesteps
outputs = tf.reshape(outputs, [-1, num_hidden])
# Truncated normal with mean 0 and stdev=0.1
# Tip: Try another initialization
# see https://www.tensorflow.org/versions/r0.9/api_docs/python/contrib.layers.html#initializers
W = tf.Variable(tf.truncated_normal([num_hidden, num_classes], stddev=0.1))
# Zero initialization
# Tip: Is tf.zeros_initializer the same?
b = tf.Variable(tf.constant(0., shape=[num_classes]))
# Doing the affine projection
logits = tf.matmul(outputs, W) + b
# Reshaping back to the original shape
logits = tf.reshape(logits, [batch_s, -1, num_classes])
# Time major
logits = tf.transpose(logits, (1, 0, 2))
loss = tf.nn.ctc_loss(targets, logits, seq_len)
cost = tf.reduce_mean(loss)
# optimizer = tf.train.AdamOptimizer().minimize(cost)
# optimizer = tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9).minimize(cost)
# optimizer = tf.train.MomentumOptimizer(learning_rate=0.005, momentum=0.9).minimize(cost)
optimizer = tf.train.MomentumOptimizer(learning_rate=1e-2, momentum=0.9).minimize(cost)
# Option 2: tf.contrib.ctc.ctc_beam_search_decoder
# (it's slower but you'll get better results)
# decoded, log_prob = tf.nn.ctc_greedy_decoder(logits, seq_len)
decoded, log_prob = tf.nn.ctc_beam_search_decoder(logits, seq_len)
# Inaccuracy: label error rate
ler = tf.reduce_mean(tf.edit_distance(tf.cast(decoded[0], tf.int32), targets))
return {
'num_features': num_features,
'graph': graph,
'cost': cost,
'optimizer': optimizer,
'ler': ler,
'inputs': inputs,
'targets': targets,
'seq_len': seq_len,
'decoded': decoded,
'log_prob': log_prob
}
def train_poc(model_parms, target_dir, train_set, dev_set):
print(f'training on {ceil(len(train_set)/args.batch_size)} batches {len(train_set)} speech_segments')
num_features = model_parms['num_features']
graph = model_parms['graph']
cost = model_parms['cost']
optimizer = model_parms['optimizer']
ler = model_parms['ler']
inputs = model_parms['inputs']
targets = model_parms['targets']
seq_len = model_parms['seq_len']
decoded = model_parms['decoded']
log_prob = model_parms['log_prob']
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
train_cost_logger = create_cost_logger(target_dir, 'stats.tsv')
epoch_logger = create_epoch_logger(target_dir)
with tf.Session(graph=graph, config=config) as session:
tf.global_variables_initializer().run()
curr_epoch = 0
# sliding window over the values for CTC- and LER-cost of the last 10 epochs (train-set)
train_ctcs = collections.deque(maxlen=10)
train_lers = collections.deque(maxlen=10)
# sliding vindow over the last 10 average values of CTC/LER-cost (train-set)
ler_train_means = collections.deque([0], maxlen=10)
# sliding window over the LER values of the last 10 epochs (dev-set)
val_ctcs = collections.deque(maxlen=10)
val_lers = collections.deque(maxlen=10)
convergence = False
# train until convergence or MAX_EPOCHS
while not convergence and curr_epoch < MAX_EPOCHS:
curr_epoch += 1
num_samples = 0
ctc_train = ler_train = 0
start = time.time()
# iterate over batches for current epoch
for X, Y, batch_seq_len, ground_truths in generate_batches(train_set, args.feature_type, args.batch_size):
feed = {inputs: X, targets: Y, seq_len: batch_seq_len}
batch_cost, _ = session.run([cost, optimizer], feed)
batch_len = X.shape[0]
ctc_train += batch_cost * batch_len
ler_train += session.run(ler, feed_dict=feed) * batch_len
# # Decoding
# d = session.run(decoded[0], feed_dict=feed)
# dense_decoded = tf.sparse_tensor_to_dense(d, default_value=0).eval(session=session)
#
# for i, prediction_enc in enumerate(dense_decoded):
# ground_truth = ground_truths[i]
# prediction = decode(prediction_enc)
# print_prediction(ground_truth, prediction, 'train-set')
# log_prediction(epoch_logger, ground_truth, prediction, 'train-set')
num_samples += batch_len
# calculate costs for current epoch
ctc_train /= num_samples
ler_train /= num_samples
train_ctcs.append(ctc_train)
train_lers.append(ler_train)
# update means
ctc_train_mean = np.array(train_ctcs).mean()
ler_train_mean = np.array(train_lers).mean()
ler_train_means.append(ler_train_mean)
# convergence reached if mean LER-rate is below threshold and mean LER change rate is below 1%
ler_diff = np.diff(ler_train_means).mean()
convergence = ler_train_mean < LER_CONVERGENCE and abs(ler_diff) < 0.01
# validate cost with a randomly chosen entry from the dev-set that has been randomly shifted
val_batches = list(
generate_batches(dev_set, args.feature_type, args.batch_size, shift_audio=True, distort_audio=True,
limit_segments=5))
X_val, Y_val, val_seq_len, val_ground_truths = random.choice(val_batches)
val_feed = {inputs: X_val, targets: Y_val, seq_len: val_seq_len}
ctc_val, ler_val = session.run([cost, ler], feed_dict=val_feed)
val_ctcs.append(ctc_val)
ctc_val_mean = np.array(val_ctcs).mean()
val_lers.append(ler_val)
ler_val_mean = np.array(val_lers).mean()
# Decoding
d = session.run(decoded[0], feed_dict=val_feed)
dense_decoded = tf.sparse_tensor_to_dense(d, default_value=0).eval(session=session)
for i, prediction_enc in enumerate(dense_decoded):
ground_truth = ground_truths[i]
prediction = decode(prediction_enc)
print_prediction(ground_truth, prediction, 'dev-set')
log_prediction(epoch_logger, ground_truth, prediction, 'dev-set')
train_cost_logger.write_tabbed(
[curr_epoch, ctc_train, ctc_val, ctc_train_mean, ctc_val_mean, ler_train, ler_val, ler_train_mean,
ler_val_mean])
val_str = f'=== Epoch {curr_epoch}, ' \
f'ctc_train = {ctc_train:.3f}, ctc_val = {ctc_val:.3f}, ' \
f'ctc_train_mean = {ctc_train_mean:.3f}, ctc_val_mean = {ctc_val_mean:.3f} ' \
f'ler_train = {ler_train:.3f}, ler_val = {ler_val:.3f}, ' \
f'ler_train_mean = {ler_train_mean:.3f}, ler_val_mean = {ler_val_mean:.3f} ' \
f'ler_diff = {ler_diff:.3f}, time = {time.time() - start:.3f}==='
print(val_str)
epoch_logger.write(val_str)
print(f'convergence reached after {curr_epoch} epochs!')
saver = tf.train.Saver()
save_path = saver.save(session, join(target_dir, 'model.ckpt'))
print(f'Model saved to path: {save_path}')
return save_path
def generate_batches(speech_segments, feature_type, batch_size, shift_audio=False, distort_audio=False,
limit_segments=None):
l = limit_segments if limit_segments else len(speech_segments)
for ndx in range(0, l, batch_size):
batch = []
for speech_segment in speech_segments[ndx:min(ndx + batch_size, l)]:
| |
from ROAR.control_module.controller import Controller
from ROAR.utilities_module.vehicle_models import VehicleControl, Vehicle
from ROAR.utilities_module.data_structures_models import Transform, Location
import numpy as np
import logging
from ROAR.agent_module.agent import Agent
from typing import Tuple
import json
from pathlib import Path
import cvxpy as cp
import scipy
import scipy.signal
import scipy.linalg
class MPCController(Controller):
def __init__(self, agent, steering_boundary: Tuple[float, float],
throttle_boundary: Tuple[float, float], **kwargs):
super().__init__(agent, **kwargs)
self.max_speed = self.agent.agent_settings.max_speed
self.throttle_boundary = throttle_boundary
self.steering_boundary = steering_boundary
self.config = json.load(
Path(agent.agent_settings.mpc_config_file_path).open(mode='r'))
self.controller = FullMPCController(agent=agent,
throttle_boundary=throttle_boundary,
steering_boundary=steering_boundary,
max_speed=self.max_speed,
config=self.config)
self.logger = logging.getLogger(__name__)
def run_in_series(self, next_waypoint: Transform, **kwargs) -> VehicleControl:
long_control, lat_control = self.controller.run_in_series(next_waypoint=next_waypoint,
target_speed=kwargs.get("target_speed", self.max_speed))
long_control = float(np.clip(long_control, *self.throttle_boundary))
lat_control = float(np.clip(lat_control, *self.steering_boundary))
return VehicleControl(throttle=long_control, steering=lat_control)
class FullMPCController(Controller):
def __init__(self, agent, config: dict,
throttle_boundary: Tuple[float, float],
steering_boundary: Tuple[float, float],
max_speed: float,
dt: float = 0.03, **kwargs):
super().__init__(agent, **kwargs)
self.config = config
self.max_speed = max_speed
self.throttle_boundary = throttle_boundary
self.steering_boundary = steering_boundary
self._dt = dt
self.A_matrices, self.B_matrices = self.construct_linearized_matrices(max_speed)
self.last_steer_CMD = 0
def get_throttle_CMD(self, Fr_x, vx):
"""Calculates the motor input command
Calculates the motor input command based on the optimal rear tire longitudinal force
given by solving the CVXPY problem. The optimal rear tire longitudinal force is then
used with the longitudinal dynamics model to solve for the actual motor input command.
Args:
Fr_x: Optimal rear tire longitudinal force
vx: Current longitudinal velocity
Returns:
Motor input command
"""
return (Fr_x + self.config['F_friction'] + self.config['C_d'] * vx**2) / self.config['b_motor']
def get_steer_CMD(self, Ff_y, beta, r, vx):
"""Calculates the steering input command
Calculates the steering input command based on the optimal front tire lateral force
given by solving the CVXPY problem. The optimal front tire lateral force is then
used with the lateral dynamics model to solve for the actual steering input command.
Args:
Ff_y: Optimal front tire lateral force
beta: Current side slip angle of vehicle
r: Current angular velocity
vx: Current longitudinal velocity
Returns:
steer_cmd
"""
# Makes sure the argument to the arcsin function on the following line is valid
arcsin_arg = np.clip(Ff_y / (-self.config['mu'] * self.config['Ff_z']), -1, 1)
alpha_f = np.tan(np.arcsin(arcsin_arg) / self.config['C']) / self.config['B']
steer_angle = np.arctan(beta + ((r * self.config['Lf']) / (vx + 10e-1))) - alpha_f
steer_cmd = steer_angle / self.config['max_angle']
self.last_steer_CMD = np.abs(steer_cmd)
return steer_cmd
def linearize_around_steer_angle(self, steer_angle_eq, speed_eq):
"""Calculates linearized state space equations
Linearizes and discretizes the state space equations of the vehicle dynamics model
around a given equilibrium steering angle and equilibrium speed.
Args:
steer_angle_eq: Equilibrium steering angle to linearize around
speed_eq: Equilibrium vehicle speed to linearize around
Returns:
Ad: The linearized and discretized A matrix in the state space model
Bd: The linearized and discretized B matrix in the state space model
"""
# Linearize system state equations around a steering angle and 100km/hr
beta_eq = np.arctan((self.config['Lr'] / self.config['wheelbase']) * np.tan(steer_angle_eq))
vx_eq = speed_eq * np.cos(beta_eq)
r_eq = (speed_eq / self.config['Lr']) * np.sin(beta_eq)
alpha_f = np.arctan(beta_eq + (r_eq * self.config['Lf']) / vx_eq) - steer_angle_eq
Ff_y_eq = -self.config['mu'] * self.config['Ff_z'] * np.sin(self.config['C'] * np.arctan(self.config['B'] * alpha_f))
Fr_y_eq = (self.config['Lf'] * Ff_y_eq * np.cos(steer_angle_eq)) / self.config['Lr']
# Find partial derivative entries for A and B matrices
a_13 = -(Fr_y_eq + Ff_y_eq * np.cos(steer_angle_eq)) / (self.config['mass'] * vx_eq)
a_31 = -vx_eq * r_eq
# Below is a more complex a_13 term that comes from Gonzales dissertation, found to not be needed but may be useful for improving performance
# a_31 = vx_eq * r_eq \
# + ((Ff_y_eq * np.cos(steer_angle_eq)) / mass) \
# * (1 /(1 + (beta_eq + ((r_eq * Lf) / vx_eq))**2))
Ac = np.array([
[0, -1, a_13],
[0, 0, 0,],
[a_31, 0, 0]])
b_11 = np.cos(steer_angle_eq) / (self.config['mass'] * vx_eq)
b_21 = np.cos(steer_angle_eq) * self.config['Lf'] / self.config['Izz']
b_31 = -np.sin(steer_angle_eq) / self.config['mass']
Bc = np.array([
[b_11, 0],
[b_21, 0],
[b_31, 1/self.config['mass']]])
# C and D are just for calling cont2discrete
Cc = np.zeros((3, 3))
Dc = np.zeros((3, 2))
system = (Ac, Bc, Cc, Dc)
Ad, Bd, Cd, Dd, dt = scipy.signal.cont2discrete(system, self._dt)
return Ad, Bd
def construct_linearized_matrices(self, speed_eq):
"""Constructs dicts to hold A and B matrices
Runs through the array of equilibrium steering angles and calculates
the linearized A and B matrices for each angle. Those matrices then get
put into dicts that can be called while CARLA is running. The vehicle dynamics
change at different steering angles so the optimizer needs to change which
matrices it is working with or else it cannot solve for optimal vehicle inputs
Args:
speed_eq: Equilibrium vehicle speed to linearize around
Returns:
A_matrices: Dict holding the linearized and discretized A matrices
B_matrices: Dict holding the linearized and discretized B matrices
"""
A_matrices = {}
B_matrices = {}
for angle in self.config['equilibrium_angles']:
A, B = self.linearize_around_steer_angle(angle, speed_eq)
A_matrices.update({angle: A})
B_matrices.update({angle: B})
return A_matrices, B_matrices
def get_linearized_matrices(self, steer_angle):
"""Returns the correct A and B matrices for a given angle
Args:
steer_angle: Current steering angle of the car (should be absolute value)
Returns:
A and B matrices for the given steering angle
"""
for i, angle_entry in enumerate(self.config['equilibrium_angles']):
if i > 0 and steer_angle < angle_entry:
angle_eq = self.config['equilibrium_angles'][i-1]
return self.A_matrices.get(angle_eq), self.B_matrices.get(angle_eq)
elif i == len(self.config['equilibrium_angles']) - 1:
angle_eq = self.config['equilibrium_angles'][-1]
return self.A_matrices.get(angle_eq), self.B_matrices.get(angle_eq)
def solve_cftoc(self, target_state, current_state, state_bounds, input_bounds):
"""Solves for optimal vehicle inputs
Takes in the current vehicle state and the target state that the car should be at,
and then solves for the optimal input sequence to reach the target state. Vehicle
states are beta, yaw and longitudinal speed for a total of 3 state variables.
Vehicle inputs are front tire lateral force and rear tire longitudinal force, for a
total of 2 input variables.
Args:
target_state: The state that the vehicle should be at
current_state: The current vehicle state
state_bounds: Bounds that the state variables should not exceed or be under
input_bounds: Bounds that the inputs should not exceed or be under
Returns:
The optimal steering and throttle commands for the current time step
"""
# Number of future time steps to optimize over
M = 10
# Number of state variables, which are beta, yaw and longitudinal speed
nx = 3
# Number of input variables, which are front tire lateral force and rear tire longitudinal force
nu = 2
# Initialize the array of variables for each time step
x = cp.Variable((nx, M + 1))
u = cp.Variable((nu, M))
# Initialize cost and constraints
cost = 0
constr = []
# Set Initial State
constr += [x[:, 0] == current_state]
# Get correct linearized dynamics matrices based on the last steering angle
A, B = self.get_linearized_matrices(self.last_steer_CMD * self.config['max_angle'])
for m in range(M):
# Cost function: basically a sum of squares between the current beta, yaw and speed values and the target values
# The different coefficients come from the magnitude of the state values (i.e. beta is on the range of 0-2 while
# longitudinal speed can range from 0-100), and the importance of the state variables as well.
cost += 10**3 * cp.sum_squares(x[0, m] - target_state[0])
cost += cp.sum_squares(x[2, m] - target_state[2])
# The cost function value relating to the yaw is removed when the car needs to make a large turn
if np.abs(target_state[0]) < np.pi / 20:
cost += 10**1 * cp.sum_squares(x[1, m] - target_state[1])
# Constraint for dynamic model
constr += [x[:, m + 1] == A @ x[:, m] + B @ u[:, m]]
# Constraints for setting bounds on the input values
constr += [input_bounds[:, 0] <= u[:, m]]
constr += [input_bounds[:, 1] >= u[:, m]]
u_delta_limits = np.array(self.config['delta_lim'])
if m < M - 1:
# Constraint limiting how much inputs can change between time steps - ensures "smoother" input profiles
constr += [u[:, m + 1] - u[:, m] <= u_delta_limits, u[:, m + 1] - u[:, m] >= -u_delta_limits]
# Set terminal cost values
cost += 10**3 * cp.sum_squares(x[0, M] - target_state[0])
cost | |
<gh_stars>10-100
import pickle
import math
import re
import csv
import concurrent.futures
import os
from functools import reduce
from operator import add
import pandas as pd
import numpy as np
ROOT = "./mimic_database/"
## Utilities ##
def map_dict(elem, dictionary):
if elem in dictionary:
return dictionary[elem]
else:
return np.nan
## Proper Classes ##
class ParseItemID(object):
''' This class builds the dictionaries depending on desired features '''
def __init__(self):
self.dictionary = {}
self.feature_names = ['RBCs', 'WBCs', 'platelets', 'hemoglobin', 'hemocrit',
'atypical lymphocytes', 'bands', 'basophils', 'eosinophils', 'neutrophils',
'lymphocytes', 'monocytes', 'polymorphonuclear leukocytes',
'temperature (F)', 'heart rate', 'respiratory rate', 'systolic', 'diastolic',
'pulse oximetry',
'troponin', 'HDL', 'LDL', 'BUN', 'INR', 'PTT', 'PT', 'triglycerides', 'creatinine',
'glucose', 'sodium', 'potassium', 'chloride', 'bicarbonate',
'blood culture', 'urine culture', 'surface culture', 'sputum' +
' culture', 'wound culture', 'Inspired O2 Fraction', 'central venous pressure',
'PEEP Set', 'tidal volume', 'anion gap',
'daily weight', 'tobacco', 'diabetes', 'history of CV events']
self.features = ['$^RBC(?! waste)', '$.*wbc(?!.*apache)', '$^platelet(?!.*intake)',
'$^hemoglobin', '$hematocrit(?!.*Apache)',
'Differential-Atyps', 'Differential-Bands', 'Differential-Basos', 'Differential-Eos',
'Differential-Neuts', 'Differential-Lymphs', 'Differential-Monos', 'Differential-Polys',
'temperature f', 'heart rate', 'respiratory rate', 'systolic', 'diastolic',
'oxymetry(?! )',
'troponin', 'HDL', 'LDL', '$^bun(?!.*apache)', 'INR', 'PTT',
'$^pt\\b(?!.*splint)(?!.*exp)(?!.*leak)(?!.*family)(?!.*eval)(?!.*insp)(?!.*soft)',
'triglyceride', '$.*creatinine(?!.*apache)',
'(?<!boost )glucose(?!.*apache).*',
'$^sodium(?!.*apache)(?!.*bicarb)(?!.*phos)(?!.*ace)(?!.*chlo)(?!.*citrate)(?!.*bar)(?!.*PO)', '$.*(?<!penicillin G )(?<!urine )potassium(?!.*apache)',
'^chloride', 'bicarbonate', 'blood culture', 'urine culture', 'surface culture',
'sputum culture', 'wound culture', 'Inspired O2 Fraction', '$Central Venous Pressure(?! )',
'PEEP set', 'tidal volume \(set\)', 'anion gap', 'daily weight', 'tobacco', 'diabetes',
'CV - past']
self.patterns = []
for feature in self.features:
if '$' not in feature:
self.patterns.append('.*{0}.*'.format(feature))
elif '$' in feature:
self.patterns.append(feature[1::])
self.d_items = pd.read_csv(ROOT + 'D_ITEMS.csv', usecols=['ITEMID', 'LABEL'])
self.d_items.dropna(how='any', axis=0, inplace=True)
self.script_features_names = ['epoetin', 'warfarin', 'heparin', 'enoxaparin', 'fondaparinux',
'asprin', 'ketorolac', 'acetominophen',
'insulin', 'glucagon',
'potassium', 'calcium gluconate',
'fentanyl', 'magensium sulfate',
'D5W', 'dextrose',
'ranitidine', 'ondansetron', 'pantoprazole', 'metoclopramide',
'lisinopril', 'captopril', 'statin',
'hydralazine', 'diltiazem',
'carvedilol', 'metoprolol', 'labetalol', 'atenolol',
'amiodarone', 'digoxin(?!.*fab)',
'clopidogrel', 'nitroprusside', 'nitroglycerin',
'vasopressin', 'hydrochlorothiazide', 'furosemide',
'atropine', 'neostigmine',
'levothyroxine',
'oxycodone', 'hydromorphone', 'fentanyl citrate',
'tacrolimus', 'prednisone',
'phenylephrine', 'norepinephrine',
'haloperidol', 'phenytoin', 'trazodone', 'levetiracetam',
'diazepam', 'clonazepam',
'propofol', 'zolpidem', 'midazolam',
'albuterol', 'ipratropium',
'diphenhydramine',
'0.9% Sodium Chloride',
'phytonadione',
'metronidazole',
'cefazolin', 'cefepime', 'vancomycin', 'levofloxacin',
'cipfloxacin', 'fluconazole',
'meropenem', 'ceftriaxone', 'piperacillin',
'ampicillin-sulbactam', 'nafcillin', 'oxacillin',
'amoxicillin', 'penicillin', 'SMX-TMP']
self.script_features = ['epoetin', 'warfarin', 'heparin', 'enoxaparin', 'fondaparinux',
'aspirin', 'keterolac', 'acetaminophen',
'insulin', 'glucagon',
'potassium', 'calcium gluconate',
'fentanyl', 'magnesium sulfate',
'D5W', 'dextrose',
'ranitidine', 'ondansetron', 'pantoprazole', 'metoclopramide',
'lisinopril', 'captopril', 'statin',
'hydralazine', 'diltiazem',
'carvedilol', 'metoprolol', 'labetalol', 'atenolol',
'amiodarone', 'digoxin(?!.*fab)',
'clopidogrel', 'nitroprusside', 'nitroglycerin',
'vasopressin', 'hydrochlorothiazide', 'furosemide',
'atropine', 'neostigmine',
'levothyroxine',
'oxycodone', 'hydromorphone', 'fentanyl citrate',
'tacrolimus', 'prednisone',
'phenylephrine', 'norepinephrine',
'haloperidol', 'phenytoin', 'trazodone', 'levetiracetam',
'diazepam', 'clonazepam',
'propofol', 'zolpidem', 'midazolam',
'albuterol', '^ipratropium',
'diphenhydramine(?!.*%)(?!.*cream)(?!.*/)',
'^0.9% sodium chloride(?! )',
'phytonadione',
'metronidazole(?!.*%)(?! desensit)',
'cefazolin(?! )', 'cefepime(?! )', 'vancomycin', 'levofloxacin',
'cipfloxacin(?!.*ophth)', 'fluconazole(?! desensit)',
'meropenem(?! )', 'ceftriaxone(?! desensit)', 'piperacillin',
'ampicillin-sulbactam', 'nafcillin', 'oxacillin', 'amoxicillin',
'penicillin(?!.*Desen)', 'sulfamethoxazole']
self.script_patterns = ['.*' + feature + '.*' for feature in self.script_features]
def prescriptions_init(self):
self.prescriptions = pd.read_csv(ROOT + 'PRESCRIPTIONS.csv',
usecols=['ROW_ID', 'SUBJECT_ID', 'HADM_ID', 'DRUG',
'STARTDATE', 'ENDDATE'])
self.prescriptions.dropna(how='any', axis=0, inplace=True)
def query_prescriptions(self, feature_name):
pattern = '.*{0}.*'.format(feature_name)
condition = self.prescriptions['DRUG'].str.contains(pattern, flags=re.IGNORECASE)
return self.prescriptions['DRUG'].where(condition).dropna().values
def extractor(self, feature_name, pattern):
condition = self.d_items['LABEL'].str.contains(pattern, flags=re.IGNORECASE)
dictionary_value = self.d_items['ITEMID'].where(condition).dropna().values.astype('int')
self.dictionary[feature_name] = set(dictionary_value)
def query(self, feature_name):
pattern = '.*{0}.*'.format(feature_name)
print(pattern)
condition = self.d_items['LABEL'].str.contains(pattern, flags=re.IGNORECASE)
return self.d_items['LABEL'].where(condition).dropna().values
def query_pattern(self, pattern):
condition = self.d_items['LABEL'].str.contains(pattern, flags=re.IGNORECASE)
return self.d_items['LABEL'].where(condition).dropna().values
def build_dictionary(self):
assert len(self.feature_names) == len(self.features)
for feature, pattern in zip(self.feature_names, self.patterns):
self.extractor(feature, pattern)
def reverse_dictionary(self, dictionary):
self.rev = {}
for key, value in dictionary.items():
for elem in value:
self.rev[elem] = key
class MimicParser(object):
''' This class structures the MIMIC III and builds features then makes 24 hour windows '''
def __init__(self):
self.name = 'mimic_assembler'
self.pid = ParseItemID()
self.pid.build_dictionary()
self.features = self.pid.features
def reduce_total(self, filepath):
''' This will filter out rows from CHARTEVENTS.csv that are not feauture relevant '''
#CHARTEVENTS = 330712484
pid = ParseItemID()
pid.build_dictionary()
chunksize = 10000000
columns = ['SUBJECT_ID', 'HADM_ID', 'ICUSTAY_ID', 'ITEMID', 'CHARTTIME', 'VALUE',
'VALUENUM']
for i, df_chunk in enumerate(pd.read_csv(filepath, iterator=True, chunksize=chunksize)):
function = lambda x,y: x.union(y)
df = df_chunk[df_chunk['ITEMID'].isin(reduce(function, pid.dictionary.values()))]
df.dropna(inplace=True, axis=0, subset=columns)
if i == 0:
df.to_csv(ROOT + './mapped_elements/CHARTEVENTS_reduced.csv', index=False,
columns=columns)
print(i)
else:
df.to_csv(ROOT + './mapped_elements/CHARTEVENTS_reduced.csv', index=False,
columns=columns, header=None, mode='a')
print(i)
def map_files(self, shard_number, filename, low_memory=False):
''' HADM minimum is 100001 and maximum is 199999. Shards are built off of those.
See if can update based on removing rows from previous buckets to accelerate
speed (each iteration 10% faster) This may not be necessary of reduce total
works well (there are few features) '''
buckets = []
beg = 100001
end = 199999
interval = math.ceil((end - beg)/float(shard_number))
for i in np.arange(shard_number):
buckets.append(set(np.arange(beg+(i*interval),beg+(interval+(interval*i)))))
if low_memory==False:
for i in range(len(buckets)):
for i,chunk in enumerate(pd.read_csv(filename, iterator=True,
chunksize=10000000)):
print(buckets[i])
print(chunk['HADM_ID'].isin(buckets[i]))
sliced = chunk[chunk['HADM_ID'].astype('int').isin(buckets[i])]
sliced.to_csv(ROOT + 'mapped_elements/shard_{0}.csv'.format(i), index=False)
else:
for i in range(len(buckets)):
with open(filename, 'r') as chartevents:
chartevents.seek(0)
csvreader = csv.reader(chartevents)
with open(ROOT+'mapped_elements/shard_{0}.csv'.format(i), 'w') as shard_writer:
csvwriter = csv.writer(shard_writer)
for row in csvreader:
try:
if row[1] == "HADM_ID" or int(row[1]) in buckets[i]:
csvwriter.writerow(row)
except ValueError as e:
print(row)
print(e)
def create_day_blocks(self, file_name):
''' Uses pandas to take shards and build them out '''
pid = ParseItemID()
pid.build_dictionary()
pid.reverse_dictionary(pid.dictionary)
df = pd.read_csv(file_name)
df['CHARTDAY'] = df['CHARTTIME'].astype('str').str.split(' ').apply(lambda x: x[0])
df['HADMID_DAY'] = df['HADM_ID'].astype('str') + '_' + df['CHARTDAY']
df['FEATURES'] = df['ITEMID'].apply(lambda x: pid.rev[x])
self.hadm_dict = dict(zip(df['HADMID_DAY'], df['SUBJECT_ID']))
df2 = pd.pivot_table(df, index='HADMID_DAY', columns='FEATURES',
values='VALUENUM', fill_value=np.nan)
df3 = pd.pivot_table(df, index='HADMID_DAY', columns='FEATURES',
values='VALUENUM', aggfunc=np.std, fill_value=0)
df3.columns = ["{0}_std".format(i) for i in list(df2.columns)]
df4 = pd.pivot_table(df, index='HADMID_DAY', columns='FEATURES',
values='VALUENUM', aggfunc=np.amin, fill_value=np.nan)
df4.columns = ["{0}_min".format(i) for i in list(df2.columns)]
df5 = pd.pivot_table(df, index='HADMID_DAY', columns='FEATURES',
values='VALUENUM', aggfunc=np.amax, fill_value=np.nan)
df5.columns = ["{0}_max".format(i) for i in list(df2.columns)]
df2 = pd.concat([df2, df3, df4, df5], axis=1)
df2['tobacco'].apply(lambda x: np.around(x))
del df2['daily weight_std']
del df2['daily weight_min']
del df2['daily weight_max']
del df2['tobacco_std']
del df2['tobacco_min']
del df2['tobacco_max']
rel_columns = list(df2.columns)
rel_columns = [i for i in rel_columns if '_' not in i]
for col in rel_columns:
if len(np.unique(df2[col])[np.isfinite(np.unique(df2[col]))]) <= 2:
print(col)
del df2[col + '_std']
del df2[col + '_min']
del df2[col + '_max']
for i in list(df2.columns):
df2[i][df2[i] > df2[i].quantile(.95)] = df2[i].median()
# if i != 'troponin':
# df2[i] = df2[i].where(df2[i] > df2[i].quantile(.875)).fillna(df2[i].median())
for i in list(df2.columns):
df2[i].fillna(df2[i].median(), inplace=True)
df2['HADMID_DAY'] = df2.index
df2['INR'] = df2['INR'] + df2['PT']
df2['INR_std'] = df2['INR_std'] + df2['PT_std']
df2['INR_min'] = df2['INR_min'] + df2['PT_min']
df2['INR_max'] = df2['INR_max'] + df2['PT_max']
del df2['PT']
del df2['PT_std']
del df2['PT_min']
del df2['PT_max']
df2.dropna(thresh=int(0.75*len(df2.columns)), axis=0, inplace=True)
df2.to_csv(file_name[0:-4] + '_24_hour_blocks.csv', index=False)
def add_admissions_columns(self, file_name):
''' Add demographic columns to create_day_blocks '''
df = pd.read_csv('./mimic_database/ADMISSIONS.csv')
ethn_dict = dict(zip(df['HADM_ID'], df['ETHNICITY']))
admittime_dict = dict(zip(df['HADM_ID'], df['ADMITTIME']))
df_shard = pd.read_csv(file_name)
df_shard['HADM_ID'] = df_shard['HADMID_DAY'].str.split('_').apply(lambda x: x[0])
df_shard['HADM_ID'] = df_shard['HADM_ID'].astype('int')
df_shard['ETHNICITY'] = df_shard['HADM_ID'].apply(lambda x: map_dict(x, ethn_dict))
black_condition = df_shard['ETHNICITY'].str.contains('.*black.*', flags=re.IGNORECASE)
df_shard['BLACK'] = 0
df_shard['BLACK'][black_condition] = 1
del df_shard['ETHNICITY']
df_shard['ADMITTIME'] = df_shard['HADM_ID'].apply(lambda x: map_dict(x, admittime_dict))
df_shard.to_csv(file_name[0:-4] + '_plus_admissions.csv', index=False)
def add_patient_columns(self, file_name):
''' Add demographic columns to create_day_blocks '''
df = pd.read_csv('./mimic_database/PATIENTS.csv')
dob_dict = dict(zip(df['SUBJECT_ID'], df['DOB']))
gender_dict = dict(zip(df['SUBJECT_ID'], df['GENDER']))
df_shard = pd.read_csv(file_name)
df_shard['SUBJECT_ID'] = df_shard['HADMID_DAY'].apply(lambda x:
map_dict(x, self.hadm_dict))
df_shard['DOB'] = df_shard['SUBJECT_ID'].apply(lambda x: map_dict(x, dob_dict))
df_shard['YOB'] = df_shard['DOB'].str.split('-').apply(lambda x: x[0]).astype('int')
df_shard['ADMITYEAR'] = df_shard['ADMITTIME'].str.split('-').apply(lambda x: x[0]).astype('int')
df_shard['AGE'] = df_shard['ADMITYEAR'].subtract(df_shard['YOB'])
df_shard['GENDER'] = df_shard['SUBJECT_ID'].apply(lambda x: map_dict(x, gender_dict))
gender_dummied = pd.get_dummies(df_shard['GENDER'], drop_first=True)
gender_dummied.rename(columns={'M': 'Male', 'F': 'Female'})
COLUMNS = list(df_shard.columns)
COLUMNS.remove('GENDER')
df_shard = pd.concat([df_shard[COLUMNS], gender_dummied], axis=1)
df_shard.to_csv(file_name[0:-4] + '_plus_patients.csv', index=False)
def clean_prescriptions(self, file_name):
''' Add prescriptions '''
pid = ParseItemID()
pid.prescriptions_init()
pid.prescriptions.drop_duplicates(inplace=True)
pid.prescriptions['DRUG_FEATURE'] = np.nan
df_file = pd.read_csv(file_name)
hadm_id_array = pd.unique(df_file['HADM_ID'])
for feature, pattern in zip(pid.script_features_names, pid.script_patterns):
condition = pid.prescriptions['DRUG'].str.contains(pattern, flags=re.IGNORECASE)
pid.prescriptions['DRUG_FEATURE'][condition] = feature
pid.prescriptions.dropna(how='any', axis=0, inplace=True, subset=['DRUG_FEATURE'])
pid.prescriptions.to_csv('./mimic_database/PRESCRIPTIONS_reduced.csv', index=False)
def add_prescriptions(self, file_name):
df_file = pd.read_csv(file_name)
with open('./mimic_database/PRESCRIPTIONS_reduced.csv', 'r') as f:
csvreader = csv.reader(f)
with open('./mimic_database/PRESCRIPTIONS_reduced_byday.csv', 'w') as g:
csvwriter = csv.writer(g)
first_line = csvreader.__next__()
print(first_line[0:3] + ['CHARTDAY'] + [first_line[6]])
csvwriter.writerow(first_line[0:3] + ['CHARTDAY'] + [first_line[6]])
for row in csvreader:
for i in pd.date_range(row[3], row[4]).strftime('%Y-%m-%d'):
csvwriter.writerow(row[0:3] + [i] + |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.